summaryrefslogtreecommitdiffstats
path: root/sys/net
diff options
context:
space:
mode:
Diffstat (limited to 'sys/net')
-rw-r--r--sys/net/bpf.c1320
-rw-r--r--sys/net/bpf.h242
-rw-r--r--sys/net/bpf_compat.h54
-rw-r--r--sys/net/bpf_filter.c560
-rw-r--r--sys/net/bpfdesc.h104
-rw-r--r--sys/net/bridge.c655
-rw-r--r--sys/net/bridge.h142
-rw-r--r--sys/net/bsd_comp.c1113
-rw-r--r--sys/net/ethernet.h98
-rw-r--r--sys/net/fddi.h87
-rw-r--r--sys/net/hostcache.c249
-rw-r--r--sys/net/hostcache.h95
-rw-r--r--sys/net/if.c1300
-rw-r--r--sys/net/if.h274
-rw-r--r--sys/net/if_arp.h113
-rw-r--r--sys/net/if_atm.h112
-rw-r--r--sys/net/if_atmsubr.c350
-rw-r--r--sys/net/if_disc.c203
-rw-r--r--sys/net/if_dl.h88
-rw-r--r--sys/net/if_ef.c595
-rw-r--r--sys/net/if_ethersubr.c1308
-rw-r--r--sys/net/if_faith.c97
-rw-r--r--sys/net/if_fddisubr.c730
-rw-r--r--sys/net/if_gif.c468
-rw-r--r--sys/net/if_gif.h68
-rw-r--r--sys/net/if_iso88025subr.c406
-rw-r--r--sys/net/if_llc.h145
-rw-r--r--sys/net/if_loop.c401
-rw-r--r--sys/net/if_media.c26
-rw-r--r--sys/net/if_media.h65
-rw-r--r--sys/net/if_mib.c149
-rw-r--r--sys/net/if_mib.h170
-rw-r--r--sys/net/if_ppp.c1576
-rw-r--r--sys/net/if_ppp.h137
-rw-r--r--sys/net/if_pppvar.h111
-rw-r--r--sys/net/if_sl.c1040
-rw-r--r--sys/net/if_slvar.h84
-rw-r--r--sys/net/if_sppp.h175
-rw-r--r--sys/net/if_spppsubr.c4262
-rw-r--r--sys/net/if_tun.c690
-rw-r--r--sys/net/if_tun.h45
-rw-r--r--sys/net/if_tunvar.h52
-rw-r--r--sys/net/if_types.h103
-rw-r--r--sys/net/if_var.h370
-rw-r--r--sys/net/if_vlan.c557
-rw-r--r--sys/net/if_vlan_var.h92
-rw-r--r--sys/net/iso88025.h112
-rw-r--r--sys/net/net_osdep.c58
-rw-r--r--sys/net/net_osdep.h121
-rw-r--r--sys/net/netisr.h95
-rw-r--r--sys/net/pfkeyv2.h420
-rw-r--r--sys/net/ppp_comp.h165
-rw-r--r--sys/net/ppp_deflate.c680
-rw-r--r--sys/net/ppp_defs.h155
-rw-r--r--sys/net/ppp_tty.c1128
-rw-r--r--sys/net/radix.c1028
-rw-r--r--sys/net/radix.h170
-rw-r--r--sys/net/raw_cb.c145
-rw-r--r--sys/net/raw_cb.h76
-rw-r--r--sys/net/raw_usrreq.c300
-rw-r--r--sys/net/route.c1080
-rw-r--r--sys/net/route.h297
-rw-r--r--sys/net/rtsock.c1002
-rw-r--r--sys/net/slcompress.c614
-rw-r--r--sys/net/slcompress.h162
-rw-r--r--sys/net/slip.h62
-rw-r--r--sys/net/zlib.c5379
-rw-r--r--sys/net/zlib.h1013
68 files changed, 35328 insertions, 15 deletions
diff --git a/sys/net/bpf.c b/sys/net/bpf.c
new file mode 100644
index 0000000..3ac4283
--- /dev/null
+++ b/sys/net/bpf.c
@@ -0,0 +1,1320 @@
+/*
+ * Copyright (c) 1990, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from the Stanford/CMU enet packet filter,
+ * (net/enet.c) distributed as part of 4.3BSD, and code contributed
+ * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
+ * Berkeley Laboratory.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)bpf.c 8.2 (Berkeley) 3/28/94
+ *
+ * $FreeBSD$
+ */
+
+#include "bpf.h"
+
+#ifndef __GNUC__
+#define inline
+#else
+#define inline __inline
+#endif
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/conf.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/time.h>
+#include <sys/proc.h>
+#include <sys/signalvar.h>
+#include <sys/filio.h>
+#include <sys/sockio.h>
+#include <sys/ttycom.h>
+#include <sys/filedesc.h>
+
+#if defined(sparc) && BSD < 199103
+#include <sys/stream.h>
+#endif
+#include <sys/poll.h>
+
+#include <sys/socket.h>
+#include <sys/vnode.h>
+
+#include <net/if.h>
+#include <net/bpf.h>
+#include <net/bpfdesc.h>
+
+#include <netinet/in.h>
+#include <netinet/if_ether.h>
+#include <sys/kernel.h>
+#include <sys/sysctl.h>
+
+MALLOC_DEFINE(M_BPF, "BPF", "BPF data");
+
+#if NBPF > 0
+
+/*
+ * Older BSDs don't have kernel malloc.
+ */
+#if BSD < 199103
+extern bcopy();
+static caddr_t bpf_alloc();
+#include <net/bpf_compat.h>
+#define BPF_BUFSIZE (MCLBYTES-8)
+#define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio)
+#else
+#define BPF_BUFSIZE 4096
+#define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio)
+#endif
+
+#define PRINET 26 /* interruptible */
+
+/*
+ * The default read buffer size is patchable.
+ */
+static int bpf_bufsize = BPF_BUFSIZE;
+SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW,
+ &bpf_bufsize, 0, "");
+
+/*
+ * bpf_iflist is the list of interfaces; each corresponds to an ifnet
+ */
+static struct bpf_if *bpf_iflist;
+
+static int bpf_allocbufs __P((struct bpf_d *));
+static void bpf_attachd __P((struct bpf_d *d, struct bpf_if *bp));
+static void bpf_detachd __P((struct bpf_d *d));
+static void bpf_freed __P((struct bpf_d *));
+static void bpf_mcopy __P((const void *, void *, size_t));
+static int bpf_movein __P((struct uio *, int,
+ struct mbuf **, struct sockaddr *, int *));
+static int bpf_setif __P((struct bpf_d *, struct ifreq *));
+static inline void
+ bpf_wakeup __P((struct bpf_d *));
+static void catchpacket __P((struct bpf_d *, u_char *, u_int,
+ u_int, void (*)(const void *, void *, size_t)));
+static void reset_d __P((struct bpf_d *));
+static int bpf_setf __P((struct bpf_d *, struct bpf_program *));
+
+static d_open_t bpfopen;
+static d_close_t bpfclose;
+static d_read_t bpfread;
+static d_write_t bpfwrite;
+static d_ioctl_t bpfioctl;
+static d_poll_t bpfpoll;
+
+#define CDEV_MAJOR 23
+static struct cdevsw bpf_cdevsw = {
+ /* open */ bpfopen,
+ /* close */ bpfclose,
+ /* read */ bpfread,
+ /* write */ bpfwrite,
+ /* ioctl */ bpfioctl,
+ /* poll */ bpfpoll,
+ /* mmap */ nommap,
+ /* strategy */ nostrategy,
+ /* name */ "bpf",
+ /* maj */ CDEV_MAJOR,
+ /* dump */ nodump,
+ /* psize */ nopsize,
+ /* flags */ 0,
+ /* bmaj */ -1
+};
+
+
+static int
+bpf_movein(uio, linktype, mp, sockp, datlen)
+ register struct uio *uio;
+ int linktype, *datlen;
+ register struct mbuf **mp;
+ register struct sockaddr *sockp;
+{
+ struct mbuf *m;
+ int error;
+ int len;
+ int hlen;
+
+ /*
+ * Build a sockaddr based on the data link layer type.
+ * We do this at this level because the ethernet header
+ * is copied directly into the data field of the sockaddr.
+ * In the case of SLIP, there is no header and the packet
+ * is forwarded as is.
+ * Also, we are careful to leave room at the front of the mbuf
+ * for the link level header.
+ */
+ switch (linktype) {
+
+ case DLT_SLIP:
+ sockp->sa_family = AF_INET;
+ hlen = 0;
+ break;
+
+ case DLT_EN10MB:
+ sockp->sa_family = AF_UNSPEC;
+ /* XXX Would MAXLINKHDR be better? */
+ hlen = sizeof(struct ether_header);
+ break;
+
+ case DLT_FDDI:
+#if defined(__FreeBSD__) || defined(__bsdi__)
+ sockp->sa_family = AF_IMPLINK;
+ hlen = 0;
+#else
+ sockp->sa_family = AF_UNSPEC;
+ /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */
+ hlen = 24;
+#endif
+ break;
+
+ case DLT_RAW:
+ case DLT_NULL:
+ sockp->sa_family = AF_UNSPEC;
+ hlen = 0;
+ break;
+
+#ifdef __FreeBSD__
+ case DLT_ATM_RFC1483:
+ /*
+ * en atm driver requires 4-byte atm pseudo header.
+ * though it isn't standard, vpi:vci needs to be
+ * specified anyway.
+ */
+ sockp->sa_family = AF_UNSPEC;
+ hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
+ break;
+#endif
+
+ default:
+ return (EIO);
+ }
+
+ len = uio->uio_resid;
+ *datlen = len - hlen;
+ if ((unsigned)len > MCLBYTES)
+ return (EIO);
+
+ MGETHDR(m, M_WAIT, MT_DATA);
+ if (m == 0)
+ return (ENOBUFS);
+ if (len > MHLEN) {
+#if BSD >= 199103
+ MCLGET(m, M_WAIT);
+ if ((m->m_flags & M_EXT) == 0) {
+#else
+ MCLGET(m);
+ if (m->m_len != MCLBYTES) {
+#endif
+ error = ENOBUFS;
+ goto bad;
+ }
+ }
+ m->m_pkthdr.len = m->m_len = len;
+ m->m_pkthdr.rcvif = NULL;
+ *mp = m;
+ /*
+ * Make room for link header.
+ */
+ if (hlen != 0) {
+ m->m_pkthdr.len -= hlen;
+ m->m_len -= hlen;
+#if BSD >= 199103
+ m->m_data += hlen; /* XXX */
+#else
+ m->m_off += hlen;
+#endif
+ error = UIOMOVE((caddr_t)sockp->sa_data, hlen, UIO_WRITE, uio);
+ if (error)
+ goto bad;
+ }
+ error = UIOMOVE(mtod(m, caddr_t), len - hlen, UIO_WRITE, uio);
+ if (!error)
+ return (0);
+ bad:
+ m_freem(m);
+ return (error);
+}
+
+/*
+ * Attach file to the bpf interface, i.e. make d listen on bp.
+ * Must be called at splimp.
+ */
+static void
+bpf_attachd(d, bp)
+ struct bpf_d *d;
+ struct bpf_if *bp;
+{
+ /*
+ * Point d at bp, and add d to the interface's list of listeners.
+ * Finally, point the driver's bpf cookie at the interface so
+ * it will divert packets to bpf.
+ */
+ d->bd_bif = bp;
+ d->bd_next = bp->bif_dlist;
+ bp->bif_dlist = d;
+
+ bp->bif_ifp->if_bpf = bp;
+}
+
+/*
+ * Detach a file from its interface.
+ */
+static void
+bpf_detachd(d)
+ struct bpf_d *d;
+{
+ struct bpf_d **p;
+ struct bpf_if *bp;
+
+ bp = d->bd_bif;
+ /*
+ * Check if this descriptor had requested promiscuous mode.
+ * If so, turn it off.
+ */
+ if (d->bd_promisc) {
+ d->bd_promisc = 0;
+ if (ifpromisc(bp->bif_ifp, 0))
+ /*
+ * Something is really wrong if we were able to put
+ * the driver into promiscuous mode, but can't
+ * take it out.
+ */
+ panic("bpf: ifpromisc failed");
+ }
+ /* Remove d from the interface's descriptor list. */
+ p = &bp->bif_dlist;
+ while (*p != d) {
+ p = &(*p)->bd_next;
+ if (*p == 0)
+ panic("bpf_detachd: descriptor not in list");
+ }
+ *p = (*p)->bd_next;
+ if (bp->bif_dlist == 0)
+ /*
+ * Let the driver know that there are no more listeners.
+ */
+ d->bd_bif->bif_ifp->if_bpf = 0;
+ d->bd_bif = 0;
+}
+
+/*
+ * Open ethernet device. Returns ENXIO for illegal minor device number,
+ * EBUSY if file is open by another process.
+ */
+/* ARGSUSED */
+static int
+bpfopen(dev, flags, fmt, p)
+ dev_t dev;
+ int flags;
+ int fmt;
+ struct proc *p;
+{
+ register struct bpf_d *d;
+
+ if (p->p_prison)
+ return (EPERM);
+
+ d = dev->si_drv1;
+ /*
+ * Each minor can be opened by only one process. If the requested
+ * minor is in use, return EBUSY.
+ */
+ if (d)
+ return (EBUSY);
+ make_dev(&bpf_cdevsw, minor(dev), 0, 0, 0600, "bpf%d", lminor(dev));
+ MALLOC(d, struct bpf_d *, sizeof(*d), M_BPF, M_WAITOK);
+ bzero(d, sizeof(*d));
+ dev->si_drv1 = d;
+ d->bd_bufsize = bpf_bufsize;
+ d->bd_sig = SIGIO;
+
+ return (0);
+}
+
+/*
+ * Close the descriptor by detaching it from its interface,
+ * deallocating its buffers, and marking it free.
+ */
+/* ARGSUSED */
+static int
+bpfclose(dev, flags, fmt, p)
+ dev_t dev;
+ int flags;
+ int fmt;
+ struct proc *p;
+{
+ register struct bpf_d *d = dev->si_drv1;
+ register int s;
+
+ funsetown(d->bd_sigio);
+ s = splimp();
+ if (d->bd_bif)
+ bpf_detachd(d);
+ splx(s);
+ bpf_freed(d);
+ dev->si_drv1 = 0;
+ FREE(d, M_BPF);
+
+ return (0);
+}
+
+/*
+ * Support for SunOS, which does not have tsleep.
+ */
+#if BSD < 199103
+static
+bpf_timeout(arg)
+ caddr_t arg;
+{
+ struct bpf_d *d = (struct bpf_d *)arg;
+ d->bd_timedout = 1;
+ wakeup(arg);
+}
+
+#define BPF_SLEEP(chan, pri, s, t) bpf_sleep((struct bpf_d *)chan)
+
+int
+bpf_sleep(d)
+ register struct bpf_d *d;
+{
+ register int rto = d->bd_rtout;
+ register int st;
+
+ if (rto != 0) {
+ d->bd_timedout = 0;
+ timeout(bpf_timeout, (caddr_t)d, rto);
+ }
+ st = sleep((caddr_t)d, PRINET|PCATCH);
+ if (rto != 0) {
+ if (d->bd_timedout == 0)
+ untimeout(bpf_timeout, (caddr_t)d);
+ else if (st == 0)
+ return EWOULDBLOCK;
+ }
+ return (st != 0) ? EINTR : 0;
+}
+#else
+#define BPF_SLEEP tsleep
+#endif
+
+/*
+ * Rotate the packet buffers in descriptor d. Move the store buffer
+ * into the hold slot, and the free buffer into the store slot.
+ * Zero the length of the new store buffer.
+ */
+#define ROTATE_BUFFERS(d) \
+ (d)->bd_hbuf = (d)->bd_sbuf; \
+ (d)->bd_hlen = (d)->bd_slen; \
+ (d)->bd_sbuf = (d)->bd_fbuf; \
+ (d)->bd_slen = 0; \
+ (d)->bd_fbuf = 0;
+/*
+ * bpfread - read next chunk of packets from buffers
+ */
+static int
+bpfread(dev, uio, ioflag)
+ dev_t dev;
+ register struct uio *uio;
+ int ioflag;
+{
+ register struct bpf_d *d = dev->si_drv1;
+ int error;
+ int s;
+
+ /*
+ * Restrict application to use a buffer the same size as
+ * as kernel buffers.
+ */
+ if (uio->uio_resid != d->bd_bufsize)
+ return (EINVAL);
+
+ s = splimp();
+ /*
+ * If the hold buffer is empty, then do a timed sleep, which
+ * ends when the timeout expires or when enough packets
+ * have arrived to fill the store buffer.
+ */
+ while (d->bd_hbuf == 0) {
+ if (d->bd_immediate && d->bd_slen != 0) {
+ /*
+ * A packet(s) either arrived since the previous
+ * read or arrived while we were asleep.
+ * Rotate the buffers and return what's here.
+ */
+ ROTATE_BUFFERS(d);
+ break;
+ }
+ if (ioflag & IO_NDELAY)
+ error = EWOULDBLOCK;
+ else
+ error = BPF_SLEEP((caddr_t)d, PRINET|PCATCH, "bpf",
+ d->bd_rtout);
+ if (error == EINTR || error == ERESTART) {
+ splx(s);
+ return (error);
+ }
+ if (error == EWOULDBLOCK) {
+ /*
+ * On a timeout, return what's in the buffer,
+ * which may be nothing. If there is something
+ * in the store buffer, we can rotate the buffers.
+ */
+ if (d->bd_hbuf)
+ /*
+ * We filled up the buffer in between
+ * getting the timeout and arriving
+ * here, so we don't need to rotate.
+ */
+ break;
+
+ if (d->bd_slen == 0) {
+ splx(s);
+ return (0);
+ }
+ ROTATE_BUFFERS(d);
+ break;
+ }
+ }
+ /*
+ * At this point, we know we have something in the hold slot.
+ */
+ splx(s);
+
+ /*
+ * Move data from hold buffer into user space.
+ * We know the entire buffer is transferred since
+ * we checked above that the read buffer is bpf_bufsize bytes.
+ */
+ error = UIOMOVE(d->bd_hbuf, d->bd_hlen, UIO_READ, uio);
+
+ s = splimp();
+ d->bd_fbuf = d->bd_hbuf;
+ d->bd_hbuf = 0;
+ d->bd_hlen = 0;
+ splx(s);
+
+ return (error);
+}
+
+
+/*
+ * If there are processes sleeping on this descriptor, wake them up.
+ */
+static inline void
+bpf_wakeup(d)
+ register struct bpf_d *d;
+{
+ wakeup((caddr_t)d);
+ if (d->bd_async && d->bd_sig && d->bd_sigio)
+ pgsigio(d->bd_sigio, d->bd_sig, 0);
+
+#if BSD >= 199103
+ selwakeup(&d->bd_sel);
+ /* XXX */
+ d->bd_sel.si_pid = 0;
+#else
+ if (d->bd_selproc) {
+ selwakeup(d->bd_selproc, (int)d->bd_selcoll);
+ d->bd_selcoll = 0;
+ d->bd_selproc = 0;
+ }
+#endif
+}
+
+static int
+bpfwrite(dev, uio, ioflag)
+ dev_t dev;
+ struct uio *uio;
+ int ioflag;
+{
+ register struct bpf_d *d = dev->si_drv1;
+ struct ifnet *ifp;
+ struct mbuf *m;
+ int error, s;
+ static struct sockaddr dst;
+ int datlen;
+
+ if (d->bd_bif == 0)
+ return (ENXIO);
+
+ ifp = d->bd_bif->bif_ifp;
+
+ if (uio->uio_resid == 0)
+ return (0);
+
+ error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst, &datlen);
+ if (error)
+ return (error);
+
+ if (datlen > ifp->if_mtu)
+ return (EMSGSIZE);
+
+ if (d->bd_hdrcmplt)
+ dst.sa_family = pseudo_AF_HDRCMPLT;
+
+ s = splnet();
+#if BSD >= 199103
+ error = (*ifp->if_output)(ifp, m, &dst, (struct rtentry *)0);
+#else
+ error = (*ifp->if_output)(ifp, m, &dst);
+#endif
+ splx(s);
+ /*
+ * The driver frees the mbuf.
+ */
+ return (error);
+}
+
+/*
+ * Reset a descriptor by flushing its packet buffer and clearing the
+ * receive and drop counts. Should be called at splimp.
+ */
+static void
+reset_d(d)
+ struct bpf_d *d;
+{
+ if (d->bd_hbuf) {
+ /* Free the hold buffer. */
+ d->bd_fbuf = d->bd_hbuf;
+ d->bd_hbuf = 0;
+ }
+ d->bd_slen = 0;
+ d->bd_hlen = 0;
+ d->bd_rcount = 0;
+ d->bd_dcount = 0;
+}
+
+/*
+ * FIONREAD Check for read packet available.
+ * SIOCGIFADDR Get interface address - convenient hook to driver.
+ * BIOCGBLEN Get buffer len [for read()].
+ * BIOCSETF Set ethernet read filter.
+ * BIOCFLUSH Flush read packet buffer.
+ * BIOCPROMISC Put interface into promiscuous mode.
+ * BIOCGDLT Get link layer type.
+ * BIOCGETIF Get interface name.
+ * BIOCSETIF Set interface.
+ * BIOCSRTIMEOUT Set read timeout.
+ * BIOCGRTIMEOUT Get read timeout.
+ * BIOCGSTATS Get packet stats.
+ * BIOCIMMEDIATE Set immediate mode.
+ * BIOCVERSION Get filter language version.
+ * BIOCGHDRCMPLT Get "header already complete" flag
+ * BIOCSHDRCMPLT Set "header already complete" flag
+ */
+/* ARGSUSED */
+static int
+bpfioctl(dev, cmd, addr, flags, p)
+ dev_t dev;
+ u_long cmd;
+ caddr_t addr;
+ int flags;
+ struct proc *p;
+{
+ register struct bpf_d *d = dev->si_drv1;
+ int s, error = 0;
+
+ switch (cmd) {
+
+ default:
+ error = EINVAL;
+ break;
+
+ /*
+ * Check for read packet available.
+ */
+ case FIONREAD:
+ {
+ int n;
+
+ s = splimp();
+ n = d->bd_slen;
+ if (d->bd_hbuf)
+ n += d->bd_hlen;
+ splx(s);
+
+ *(int *)addr = n;
+ break;
+ }
+
+ case SIOCGIFADDR:
+ {
+ struct ifnet *ifp;
+
+ if (d->bd_bif == 0)
+ error = EINVAL;
+ else {
+ ifp = d->bd_bif->bif_ifp;
+ error = (*ifp->if_ioctl)(ifp, cmd, addr);
+ }
+ break;
+ }
+
+ /*
+ * Get buffer len [for read()].
+ */
+ case BIOCGBLEN:
+ *(u_int *)addr = d->bd_bufsize;
+ break;
+
+ /*
+ * Set buffer length.
+ */
+ case BIOCSBLEN:
+#if BSD < 199103
+ error = EINVAL;
+#else
+ if (d->bd_bif != 0)
+ error = EINVAL;
+ else {
+ register u_int size = *(u_int *)addr;
+
+ if (size > BPF_MAXBUFSIZE)
+ *(u_int *)addr = size = BPF_MAXBUFSIZE;
+ else if (size < BPF_MINBUFSIZE)
+ *(u_int *)addr = size = BPF_MINBUFSIZE;
+ d->bd_bufsize = size;
+ }
+#endif
+ break;
+
+ /*
+ * Set link layer read filter.
+ */
+ case BIOCSETF:
+ error = bpf_setf(d, (struct bpf_program *)addr);
+ break;
+
+ /*
+ * Flush read packet buffer.
+ */
+ case BIOCFLUSH:
+ s = splimp();
+ reset_d(d);
+ splx(s);
+ break;
+
+ /*
+ * Put interface into promiscuous mode.
+ */
+ case BIOCPROMISC:
+ if (d->bd_bif == 0) {
+ /*
+ * No interface attached yet.
+ */
+ error = EINVAL;
+ break;
+ }
+ s = splimp();
+ if (d->bd_promisc == 0) {
+ error = ifpromisc(d->bd_bif->bif_ifp, 1);
+ if (error == 0)
+ d->bd_promisc = 1;
+ }
+ splx(s);
+ break;
+
+ /*
+ * Get device parameters.
+ */
+ case BIOCGDLT:
+ if (d->bd_bif == 0)
+ error = EINVAL;
+ else
+ *(u_int *)addr = d->bd_bif->bif_dlt;
+ break;
+
+ /*
+ * Get interface name.
+ */
+ case BIOCGETIF:
+ if (d->bd_bif == 0)
+ error = EINVAL;
+ else {
+ struct ifnet *const ifp = d->bd_bif->bif_ifp;
+ struct ifreq *const ifr = (struct ifreq *)addr;
+
+ snprintf(ifr->ifr_name, sizeof(ifr->ifr_name),
+ "%s%d", ifp->if_name, ifp->if_unit);
+ }
+ break;
+
+ /*
+ * Set interface.
+ */
+ case BIOCSETIF:
+ error = bpf_setif(d, (struct ifreq *)addr);
+ break;
+
+ /*
+ * Set read timeout.
+ */
+ case BIOCSRTIMEOUT:
+ {
+ struct timeval *tv = (struct timeval *)addr;
+
+ /*
+ * Subtract 1 tick from tvtohz() since this isn't
+ * a one-shot timer.
+ */
+ if ((error = itimerfix(tv)) == 0)
+ d->bd_rtout = tvtohz(tv) - 1;
+ break;
+ }
+
+ /*
+ * Get read timeout.
+ */
+ case BIOCGRTIMEOUT:
+ {
+ struct timeval *tv = (struct timeval *)addr;
+
+ tv->tv_sec = d->bd_rtout / hz;
+ tv->tv_usec = (d->bd_rtout % hz) * tick;
+ break;
+ }
+
+ /*
+ * Get packet stats.
+ */
+ case BIOCGSTATS:
+ {
+ struct bpf_stat *bs = (struct bpf_stat *)addr;
+
+ bs->bs_recv = d->bd_rcount;
+ bs->bs_drop = d->bd_dcount;
+ break;
+ }
+
+ /*
+ * Set immediate mode.
+ */
+ case BIOCIMMEDIATE:
+ d->bd_immediate = *(u_int *)addr;
+ break;
+
+ case BIOCVERSION:
+ {
+ struct bpf_version *bv = (struct bpf_version *)addr;
+
+ bv->bv_major = BPF_MAJOR_VERSION;
+ bv->bv_minor = BPF_MINOR_VERSION;
+ break;
+ }
+
+ /*
+ * Get "header already complete" flag
+ */
+ case BIOCGHDRCMPLT:
+ *(u_int *)addr = d->bd_hdrcmplt;
+ break;
+
+ /*
+ * Set "header already complete" flag
+ */
+ case BIOCSHDRCMPLT:
+ d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0;
+ break;
+
+ case FIONBIO: /* Non-blocking I/O */
+ break;
+
+ case FIOASYNC: /* Send signal on receive packets */
+ d->bd_async = *(int *)addr;
+ break;
+
+ case FIOSETOWN:
+ error = fsetown(*(int *)addr, &d->bd_sigio);
+ break;
+
+ case FIOGETOWN:
+ *(int *)addr = fgetown(d->bd_sigio);
+ break;
+
+ /* This is deprecated, FIOSETOWN should be used instead. */
+ case TIOCSPGRP:
+ error = fsetown(-(*(int *)addr), &d->bd_sigio);
+ break;
+
+ /* This is deprecated, FIOGETOWN should be used instead. */
+ case TIOCGPGRP:
+ *(int *)addr = -fgetown(d->bd_sigio);
+ break;
+
+ case BIOCSRSIG: /* Set receive signal */
+ {
+ u_int sig;
+
+ sig = *(u_int *)addr;
+
+ if (sig >= NSIG)
+ error = EINVAL;
+ else
+ d->bd_sig = sig;
+ break;
+ }
+ case BIOCGRSIG:
+ *(u_int *)addr = d->bd_sig;
+ break;
+ }
+ return (error);
+}
+
+/*
+ * Set d's packet filter program to fp. If this file already has a filter,
+ * free it and replace it. Returns EINVAL for bogus requests.
+ */
+static int
+bpf_setf(d, fp)
+ struct bpf_d *d;
+ struct bpf_program *fp;
+{
+ struct bpf_insn *fcode, *old;
+ u_int flen, size;
+ int s;
+
+ old = d->bd_filter;
+ if (fp->bf_insns == 0) {
+ if (fp->bf_len != 0)
+ return (EINVAL);
+ s = splimp();
+ d->bd_filter = 0;
+ reset_d(d);
+ splx(s);
+ if (old != 0)
+ free((caddr_t)old, M_BPF);
+ return (0);
+ }
+ flen = fp->bf_len;
+ if (flen > BPF_MAXINSNS)
+ return (EINVAL);
+
+ size = flen * sizeof(*fp->bf_insns);
+ fcode = (struct bpf_insn *)malloc(size, M_BPF, M_WAITOK);
+ if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 &&
+ bpf_validate(fcode, (int)flen)) {
+ s = splimp();
+ d->bd_filter = fcode;
+ reset_d(d);
+ splx(s);
+ if (old != 0)
+ free((caddr_t)old, M_BPF);
+
+ return (0);
+ }
+ free((caddr_t)fcode, M_BPF);
+ return (EINVAL);
+}
+
+/*
+ * Detach a file from its current interface (if attached at all) and attach
+ * to the interface indicated by the name stored in ifr.
+ * Return an errno or 0.
+ */
+static int
+bpf_setif(d, ifr)
+ struct bpf_d *d;
+ struct ifreq *ifr;
+{
+ struct bpf_if *bp;
+ int s, error;
+ struct ifnet *theywant;
+
+ theywant = ifunit(ifr->ifr_name);
+ if (theywant == 0)
+ return ENXIO;
+
+ /*
+ * Look through attached interfaces for the named one.
+ */
+ for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) {
+ struct ifnet *ifp = bp->bif_ifp;
+
+ if (ifp == 0 || ifp != theywant)
+ continue;
+ /*
+ * We found the requested interface.
+ * If it's not up, return an error.
+ * Allocate the packet buffers if we need to.
+ * If we're already attached to requested interface,
+ * just flush the buffer.
+ */
+ if ((ifp->if_flags & IFF_UP) == 0)
+ return (ENETDOWN);
+
+ if (d->bd_sbuf == 0) {
+ error = bpf_allocbufs(d);
+ if (error != 0)
+ return (error);
+ }
+ s = splimp();
+ if (bp != d->bd_bif) {
+ if (d->bd_bif)
+ /*
+ * Detach if attached to something else.
+ */
+ bpf_detachd(d);
+
+ bpf_attachd(d, bp);
+ }
+ reset_d(d);
+ splx(s);
+ return (0);
+ }
+ /* Not found. */
+ return (ENXIO);
+}
+
+/*
+ * Support for select() and poll() system calls
+ *
+ * Return true iff the specific operation will not block indefinitely.
+ * Otherwise, return false but make a note that a selwakeup() must be done.
+ */
+int
+bpfpoll(dev, events, p)
+ register dev_t dev;
+ int events;
+ struct proc *p;
+{
+ register struct bpf_d *d;
+ register int s;
+ int revents = 0;
+
+ /*
+ * An imitation of the FIONREAD ioctl code.
+ */
+ d = dev->si_drv1;
+
+ s = splimp();
+ if (events & (POLLIN | POLLRDNORM)) {
+ if (d->bd_hlen != 0 || (d->bd_immediate && d->bd_slen != 0))
+ revents |= events & (POLLIN | POLLRDNORM);
+ else
+ selrecord(p, &d->bd_sel);
+ }
+ splx(s);
+ return (revents);
+}
+
+/*
+ * Incoming linkage from device drivers. Process the packet pkt, of length
+ * pktlen, which is stored in a contiguous buffer. The packet is parsed
+ * by each process' filter, and if accepted, stashed into the corresponding
+ * buffer.
+ */
+void
+bpf_tap(ifp, pkt, pktlen)
+ struct ifnet *ifp;
+ register u_char *pkt;
+ register u_int pktlen;
+{
+ struct bpf_if *bp;
+ register struct bpf_d *d;
+ register u_int slen;
+ /*
+ * Note that the ipl does not have to be raised at this point.
+ * The only problem that could arise here is that if two different
+ * interfaces shared any data. This is not the case.
+ */
+ bp = ifp->if_bpf;
+ for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
+ ++d->bd_rcount;
+ slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen);
+ if (slen != 0)
+ catchpacket(d, pkt, pktlen, slen, bcopy);
+ }
+}
+
+/*
+ * Copy data from an mbuf chain into a buffer. This code is derived
+ * from m_copydata in sys/uipc_mbuf.c.
+ */
+static void
+bpf_mcopy(src_arg, dst_arg, len)
+ const void *src_arg;
+ void *dst_arg;
+ register size_t len;
+{
+ register const struct mbuf *m;
+ register u_int count;
+ u_char *dst;
+
+ m = src_arg;
+ dst = dst_arg;
+ while (len > 0) {
+ if (m == 0)
+ panic("bpf_mcopy");
+ count = min(m->m_len, len);
+ bcopy(mtod(m, void *), dst, count);
+ m = m->m_next;
+ dst += count;
+ len -= count;
+ }
+}
+
+/*
+ * Incoming linkage from device drivers, when packet is in an mbuf chain.
+ */
+void
+bpf_mtap(ifp, m)
+ struct ifnet *ifp;
+ struct mbuf *m;
+{
+ struct bpf_if *bp = ifp->if_bpf;
+ struct bpf_d *d;
+ u_int pktlen, slen;
+ struct mbuf *m0;
+
+ pktlen = 0;
+ for (m0 = m; m0 != 0; m0 = m0->m_next)
+ pktlen += m0->m_len;
+
+ for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
+ ++d->bd_rcount;
+ slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0);
+ if (slen != 0)
+ catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy);
+ }
+}
+
+/*
+ * Move the packet data from interface memory (pkt) into the
+ * store buffer. Return 1 if it's time to wakeup a listener (buffer full),
+ * otherwise 0. "copy" is the routine called to do the actual data
+ * transfer. bcopy is passed in to copy contiguous chunks, while
+ * bpf_mcopy is passed in to copy mbuf chains. In the latter case,
+ * pkt is really an mbuf.
+ */
+static void
+catchpacket(d, pkt, pktlen, snaplen, cpfn)
+ register struct bpf_d *d;
+ register u_char *pkt;
+ register u_int pktlen, snaplen;
+ register void (*cpfn) __P((const void *, void *, size_t));
+{
+ register struct bpf_hdr *hp;
+ register int totlen, curlen;
+ register int hdrlen = d->bd_bif->bif_hdrlen;
+ /*
+ * Figure out how many bytes to move. If the packet is
+ * greater or equal to the snapshot length, transfer that
+ * much. Otherwise, transfer the whole packet (unless
+ * we hit the buffer size limit).
+ */
+ totlen = hdrlen + min(snaplen, pktlen);
+ if (totlen > d->bd_bufsize)
+ totlen = d->bd_bufsize;
+
+ /*
+ * Round up the end of the previous packet to the next longword.
+ */
+ curlen = BPF_WORDALIGN(d->bd_slen);
+ if (curlen + totlen > d->bd_bufsize) {
+ /*
+ * This packet will overflow the storage buffer.
+ * Rotate the buffers if we can, then wakeup any
+ * pending reads.
+ */
+ if (d->bd_fbuf == 0) {
+ /*
+ * We haven't completed the previous read yet,
+ * so drop the packet.
+ */
+ ++d->bd_dcount;
+ return;
+ }
+ ROTATE_BUFFERS(d);
+ bpf_wakeup(d);
+ curlen = 0;
+ }
+ else if (d->bd_immediate)
+ /*
+ * Immediate mode is set. A packet arrived so any
+ * reads should be woken up.
+ */
+ bpf_wakeup(d);
+
+ /*
+ * Append the bpf header.
+ */
+ hp = (struct bpf_hdr *)(d->bd_sbuf + curlen);
+#if BSD >= 199103
+ microtime(&hp->bh_tstamp);
+#elif defined(sun)
+ uniqtime(&hp->bh_tstamp);
+#else
+ hp->bh_tstamp = time;
+#endif
+ hp->bh_datalen = pktlen;
+ hp->bh_hdrlen = hdrlen;
+ /*
+ * Copy the packet data into the store buffer and update its length.
+ */
+ (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen));
+ d->bd_slen = curlen + totlen;
+}
+
+/*
+ * Initialize all nonzero fields of a descriptor.
+ */
+static int
+bpf_allocbufs(d)
+ register struct bpf_d *d;
+{
+ d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_BPF, M_WAITOK);
+ if (d->bd_fbuf == 0)
+ return (ENOBUFS);
+
+ d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_BPF, M_WAITOK);
+ if (d->bd_sbuf == 0) {
+ free(d->bd_fbuf, M_BPF);
+ return (ENOBUFS);
+ }
+ d->bd_slen = 0;
+ d->bd_hlen = 0;
+ return (0);
+}
+
+/*
+ * Free buffers currently in use by a descriptor.
+ * Called on close.
+ */
+static void
+bpf_freed(d)
+ register struct bpf_d *d;
+{
+ /*
+ * We don't need to lock out interrupts since this descriptor has
+ * been detached from its interface and it yet hasn't been marked
+ * free.
+ */
+ if (d->bd_sbuf != 0) {
+ free(d->bd_sbuf, M_BPF);
+ if (d->bd_hbuf != 0)
+ free(d->bd_hbuf, M_BPF);
+ if (d->bd_fbuf != 0)
+ free(d->bd_fbuf, M_BPF);
+ }
+ if (d->bd_filter)
+ free((caddr_t)d->bd_filter, M_BPF);
+}
+
+/*
+ * Attach an interface to bpf. driverp is a pointer to a (struct bpf_if *)
+ * in the driver's softc; dlt is the link layer type; hdrlen is the fixed
+ * size of the link header (variable length headers not yet supported).
+ */
+void
+bpfattach(ifp, dlt, hdrlen)
+ struct ifnet *ifp;
+ u_int dlt, hdrlen;
+{
+ struct bpf_if *bp;
+ bp = (struct bpf_if *)malloc(sizeof(*bp), M_BPF, M_DONTWAIT);
+ if (bp == 0)
+ panic("bpfattach");
+
+ bp->bif_dlist = 0;
+ bp->bif_ifp = ifp;
+ bp->bif_dlt = dlt;
+
+ bp->bif_next = bpf_iflist;
+ bpf_iflist = bp;
+
+ bp->bif_ifp->if_bpf = 0;
+
+ /*
+ * Compute the length of the bpf header. This is not necessarily
+ * equal to SIZEOF_BPF_HDR because we want to insert spacing such
+ * that the network layer header begins on a longword boundary (for
+ * performance reasons and to alleviate alignment restrictions).
+ */
+ bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen;
+
+ if (bootverbose)
+ printf("bpf: %s%d attached\n", ifp->if_name, ifp->if_unit);
+}
+
+static void bpf_drvinit __P((void *unused));
+
+static void
+bpf_drvinit(unused)
+ void *unused;
+{
+
+ cdevsw_add(&bpf_cdevsw);
+}
+
+SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvinit,NULL)
+
+#else /* !BPF */
+/*
+ * NOP stubs to allow bpf-using drivers to load and function.
+ *
+ * A 'better' implementation would allow the core bpf functionality
+ * to be loaded at runtime.
+ */
+
+void
+bpf_tap(ifp, pkt, pktlen)
+ struct ifnet *ifp;
+ register u_char *pkt;
+ register u_int pktlen;
+{
+}
+
+void
+bpf_mtap(ifp, m)
+ struct ifnet *ifp;
+ struct mbuf *m;
+{
+}
+
+void
+bpfattach(ifp, dlt, hdrlen)
+ struct ifnet *ifp;
+ u_int dlt, hdrlen;
+{
+}
+
+u_int
+bpf_filter(pc, p, wirelen, buflen)
+ register const struct bpf_insn *pc;
+ register u_char *p;
+ u_int wirelen;
+ register u_int buflen;
+{
+ return -1; /* "no filter" behaviour */
+}
+
+#endif /* !BPF */
diff --git a/sys/net/bpf.h b/sys/net/bpf.h
new file mode 100644
index 0000000..d3ab193
--- /dev/null
+++ b/sys/net/bpf.h
@@ -0,0 +1,242 @@
+/*
+ * Copyright (c) 1990, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from the Stanford/CMU enet packet filter,
+ * (net/enet.c) distributed as part of 4.3BSD, and code contributed
+ * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
+ * Berkeley Laboratory.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)bpf.h 8.1 (Berkeley) 6/10/93
+ * @(#)bpf.h 1.34 (LBL) 6/16/96
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NET_BPF_H_
+#define _NET_BPF_H_
+
+/* BSD style release date */
+#define BPF_RELEASE 199606
+
+typedef int32_t bpf_int32;
+typedef u_int32_t bpf_u_int32;
+
+/*
+ * Alignment macros. BPF_WORDALIGN rounds up to the next
+ * even multiple of BPF_ALIGNMENT.
+ */
+#define BPF_ALIGNMENT sizeof(long)
+#define BPF_WORDALIGN(x) (((x)+(BPF_ALIGNMENT-1))&~(BPF_ALIGNMENT-1))
+
+#define BPF_MAXINSNS 512
+#define BPF_MAXBUFSIZE 0x8000
+#define BPF_MINBUFSIZE 32
+
+/*
+ * Structure for BIOCSETF.
+ */
+struct bpf_program {
+ u_int bf_len;
+ struct bpf_insn *bf_insns;
+};
+
+/*
+ * Struct returned by BIOCGSTATS.
+ */
+struct bpf_stat {
+ u_int bs_recv; /* number of packets received */
+ u_int bs_drop; /* number of packets dropped */
+};
+
+/*
+ * Struct return by BIOCVERSION. This represents the version number of
+ * the filter language described by the instruction encodings below.
+ * bpf understands a program iff kernel_major == filter_major &&
+ * kernel_minor >= filter_minor, that is, if the value returned by the
+ * running kernel has the same major number and a minor number equal
+ * equal to or less than the filter being downloaded. Otherwise, the
+ * results are undefined, meaning an error may be returned or packets
+ * may be accepted haphazardly.
+ * It has nothing to do with the source code version.
+ */
+struct bpf_version {
+ u_short bv_major;
+ u_short bv_minor;
+};
+/* Current version number of filter architecture. */
+#define BPF_MAJOR_VERSION 1
+#define BPF_MINOR_VERSION 1
+
+#define BIOCGBLEN _IOR('B',102, u_int)
+#define BIOCSBLEN _IOWR('B',102, u_int)
+#define BIOCSETF _IOW('B',103, struct bpf_program)
+#define BIOCFLUSH _IO('B',104)
+#define BIOCPROMISC _IO('B',105)
+#define BIOCGDLT _IOR('B',106, u_int)
+#define BIOCGETIF _IOR('B',107, struct ifreq)
+#define BIOCSETIF _IOW('B',108, struct ifreq)
+#define BIOCSRTIMEOUT _IOW('B',109, struct timeval)
+#define BIOCGRTIMEOUT _IOR('B',110, struct timeval)
+#define BIOCGSTATS _IOR('B',111, struct bpf_stat)
+#define BIOCIMMEDIATE _IOW('B',112, u_int)
+#define BIOCVERSION _IOR('B',113, struct bpf_version)
+#define BIOCGRSIG _IOR('B',114, u_int)
+#define BIOCSRSIG _IOW('B',115, u_int)
+#define BIOCGHDRCMPLT _IOR('B',116, u_int)
+#define BIOCSHDRCMPLT _IOW('B',117, u_int)
+
+/*
+ * Structure prepended to each packet.
+ */
+struct bpf_hdr {
+ struct timeval bh_tstamp; /* time stamp */
+ bpf_u_int32 bh_caplen; /* length of captured portion */
+ bpf_u_int32 bh_datalen; /* original length of packet */
+ u_short bh_hdrlen; /* length of bpf header (this struct
+ plus alignment padding) */
+};
+/*
+ * Because the structure above is not a multiple of 4 bytes, some compilers
+ * will insist on inserting padding; hence, sizeof(struct bpf_hdr) won't work.
+ * Only the kernel needs to know about it; applications use bh_hdrlen.
+ */
+#ifdef KERNEL
+#define SIZEOF_BPF_HDR (sizeof(struct bpf_hdr) <= 20 ? 18 : \
+ sizeof(struct bpf_hdr))
+#endif
+
+/*
+ * Data-link level type codes.
+ */
+#define DLT_NULL 0 /* no link-layer encapsulation */
+#define DLT_EN10MB 1 /* Ethernet (10Mb) */
+#define DLT_EN3MB 2 /* Experimental Ethernet (3Mb) */
+#define DLT_AX25 3 /* Amateur Radio AX.25 */
+#define DLT_PRONET 4 /* Proteon ProNET Token Ring */
+#define DLT_CHAOS 5 /* Chaos */
+#define DLT_IEEE802 6 /* IEEE 802 Networks */
+#define DLT_ARCNET 7 /* ARCNET */
+#define DLT_SLIP 8 /* Serial Line IP */
+#define DLT_PPP 9 /* Point-to-point Protocol */
+#define DLT_FDDI 10 /* FDDI */
+#define DLT_ATM_RFC1483 11 /* LLC/SNAP encapsulated atm */
+#define DLT_RAW 12 /* raw IP */
+#define DLT_SLIP_BSDOS 13 /* BSD/OS Serial Line IP */
+#define DLT_PPP_BSDOS 14 /* BSD/OS Point-to-point Protocol */
+
+/*
+ * The instruction encodings.
+ */
+/* instruction classes */
+#define BPF_CLASS(code) ((code) & 0x07)
+#define BPF_LD 0x00
+#define BPF_LDX 0x01
+#define BPF_ST 0x02
+#define BPF_STX 0x03
+#define BPF_ALU 0x04
+#define BPF_JMP 0x05
+#define BPF_RET 0x06
+#define BPF_MISC 0x07
+
+/* ld/ldx fields */
+#define BPF_SIZE(code) ((code) & 0x18)
+#define BPF_W 0x00
+#define BPF_H 0x08
+#define BPF_B 0x10
+#define BPF_MODE(code) ((code) & 0xe0)
+#define BPF_IMM 0x00
+#define BPF_ABS 0x20
+#define BPF_IND 0x40
+#define BPF_MEM 0x60
+#define BPF_LEN 0x80
+#define BPF_MSH 0xa0
+
+/* alu/jmp fields */
+#define BPF_OP(code) ((code) & 0xf0)
+#define BPF_ADD 0x00
+#define BPF_SUB 0x10
+#define BPF_MUL 0x20
+#define BPF_DIV 0x30
+#define BPF_OR 0x40
+#define BPF_AND 0x50
+#define BPF_LSH 0x60
+#define BPF_RSH 0x70
+#define BPF_NEG 0x80
+#define BPF_JA 0x00
+#define BPF_JEQ 0x10
+#define BPF_JGT 0x20
+#define BPF_JGE 0x30
+#define BPF_JSET 0x40
+#define BPF_SRC(code) ((code) & 0x08)
+#define BPF_K 0x00
+#define BPF_X 0x08
+
+/* ret - BPF_K and BPF_X also apply */
+#define BPF_RVAL(code) ((code) & 0x18)
+#define BPF_A 0x10
+
+/* misc */
+#define BPF_MISCOP(code) ((code) & 0xf8)
+#define BPF_TAX 0x00
+#define BPF_TXA 0x80
+
+/*
+ * The instruction data structure.
+ */
+struct bpf_insn {
+ u_short code;
+ u_char jt;
+ u_char jf;
+ bpf_u_int32 k;
+};
+
+/*
+ * Macros for insn array initializers.
+ */
+#define BPF_STMT(code, k) { (u_short)(code), 0, 0, k }
+#define BPF_JUMP(code, k, jt, jf) { (u_short)(code), jt, jf, k }
+
+#ifdef KERNEL
+int bpf_validate __P((const struct bpf_insn *, int));
+void bpf_tap __P((struct ifnet *, u_char *, u_int));
+void bpf_mtap __P((struct ifnet *, struct mbuf *));
+void bpfattach __P((struct ifnet *, u_int, u_int));
+void bpfilterattach __P((int));
+u_int bpf_filter __P((const struct bpf_insn *, u_char *, u_int, u_int));
+#endif
+
+/*
+ * Number of scratch memory words (for BPF_LD|BPF_MEM and BPF_ST).
+ */
+#define BPF_MEMWORDS 16
+
+#endif
diff --git a/sys/net/bpf_compat.h b/sys/net/bpf_compat.h
new file mode 100644
index 0000000..0936b84
--- /dev/null
+++ b/sys/net/bpf_compat.h
@@ -0,0 +1,54 @@
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)bpf_compat.h 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#ifndef _NET_BPF_COMPAT_H_
+#define _NET_BPF_COMPAT_H_
+
+/*
+ * Some hacks for compatibility across SunOS and 4.4BSD. We emulate malloc
+ * and free with mbuf clusters. We store a pointer to the mbuf in the first
+ * word of the mbuf and return 8 bytes passed the start of data (for double
+ * word alignment). We cannot just use offsets because clusters are not at
+ * a fixed offset from the associated mbuf. Sorry for this kludge.
+ */
+#define malloc(size, type, canwait) bpf_alloc(size, canwait)
+#define free(cp, type) m_free(*(struct mbuf **)(cp - 8))
+#define M_WAITOK M_WAIT
+
+/* This mapping works for our purposes. */
+#define ERESTART EINTR
+
+#endif
diff --git a/sys/net/bpf_filter.c b/sys/net/bpf_filter.c
new file mode 100644
index 0000000..0b2ed9e
--- /dev/null
+++ b/sys/net/bpf_filter.c
@@ -0,0 +1,560 @@
+/*
+ * Copyright (c) 1990, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from the Stanford/CMU enet packet filter,
+ * (net/enet.c) distributed as part of 4.3BSD, and code contributed
+ * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
+ * Berkeley Laboratory.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)bpf_filter.c 8.1 (Berkeley) 6/10/93
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/param.h>
+
+#ifdef sun
+#include <netinet/in.h>
+#endif
+
+#if defined(sparc) || defined(mips) || defined(ibm032) || defined(__alpha__)
+#define BPF_ALIGN
+#endif
+
+#ifndef BPF_ALIGN
+#define EXTRACT_SHORT(p) ((u_int16_t)ntohs(*(u_int16_t *)p))
+#define EXTRACT_LONG(p) (ntohl(*(u_int32_t *)p))
+#else
+#define EXTRACT_SHORT(p)\
+ ((u_int16_t)\
+ ((u_int16_t)*((u_char *)p+0)<<8|\
+ (u_int16_t)*((u_char *)p+1)<<0))
+#define EXTRACT_LONG(p)\
+ ((u_int32_t)*((u_char *)p+0)<<24|\
+ (u_int32_t)*((u_char *)p+1)<<16|\
+ (u_int32_t)*((u_char *)p+2)<<8|\
+ (u_int32_t)*((u_char *)p+3)<<0)
+#endif
+
+#ifdef KERNEL
+#include <sys/mbuf.h>
+#endif
+#include <net/bpf.h>
+#ifdef KERNEL
+#define MINDEX(m, k) \
+{ \
+ register int len = m->m_len; \
+ \
+ while (k >= len) { \
+ k -= len; \
+ m = m->m_next; \
+ if (m == 0) \
+ return 0; \
+ len = m->m_len; \
+ } \
+}
+
+static u_int16_t m_xhalf __P((struct mbuf *m, bpf_u_int32 k, int *err));
+static u_int32_t m_xword __P((struct mbuf *m, bpf_u_int32 k, int *err));
+
+static u_int32_t
+m_xword(m, k, err)
+ register struct mbuf *m;
+ register bpf_u_int32 k;
+ register int *err;
+{
+ register size_t len;
+ register u_char *cp, *np;
+ register struct mbuf *m0;
+
+ len = m->m_len;
+ while (k >= len) {
+ k -= len;
+ m = m->m_next;
+ if (m == 0)
+ goto bad;
+ len = m->m_len;
+ }
+ cp = mtod(m, u_char *) + k;
+ if (len - k >= 4) {
+ *err = 0;
+ return EXTRACT_LONG(cp);
+ }
+ m0 = m->m_next;
+ if (m0 == 0 || m0->m_len + len - k < 4)
+ goto bad;
+ *err = 0;
+ np = mtod(m0, u_char *);
+ switch (len - k) {
+
+ case 1:
+ return
+ ((u_int32_t)cp[0] << 24) |
+ ((u_int32_t)np[0] << 16) |
+ ((u_int32_t)np[1] << 8) |
+ (u_int32_t)np[2];
+
+ case 2:
+ return
+ ((u_int32_t)cp[0] << 24) |
+ ((u_int32_t)cp[1] << 16) |
+ ((u_int32_t)np[0] << 8) |
+ (u_int32_t)np[1];
+
+ default:
+ return
+ ((u_int32_t)cp[0] << 24) |
+ ((u_int32_t)cp[1] << 16) |
+ ((u_int32_t)cp[2] << 8) |
+ (u_int32_t)np[0];
+ }
+ bad:
+ *err = 1;
+ return 0;
+}
+
+static u_int16_t
+m_xhalf(m, k, err)
+ register struct mbuf *m;
+ register bpf_u_int32 k;
+ register int *err;
+{
+ register size_t len;
+ register u_char *cp;
+ register struct mbuf *m0;
+
+ len = m->m_len;
+ while (k >= len) {
+ k -= len;
+ m = m->m_next;
+ if (m == 0)
+ goto bad;
+ len = m->m_len;
+ }
+ cp = mtod(m, u_char *) + k;
+ if (len - k >= 2) {
+ *err = 0;
+ return EXTRACT_SHORT(cp);
+ }
+ m0 = m->m_next;
+ if (m0 == 0)
+ goto bad;
+ *err = 0;
+ return (cp[0] << 8) | mtod(m0, u_char *)[0];
+ bad:
+ *err = 1;
+ return 0;
+}
+#endif
+
+/*
+ * Execute the filter program starting at pc on the packet p
+ * wirelen is the length of the original packet
+ * buflen is the amount of data present
+ */
+u_int
+bpf_filter(pc, p, wirelen, buflen)
+ register const struct bpf_insn *pc;
+ register u_char *p;
+ u_int wirelen;
+ register u_int buflen;
+{
+ register u_int32_t A = 0, X = 0;
+ register bpf_u_int32 k;
+ int32_t mem[BPF_MEMWORDS];
+
+ if (pc == 0)
+ /*
+ * No filter means accept all.
+ */
+ return (u_int)-1;
+
+ --pc;
+ while (1) {
+ ++pc;
+ switch (pc->code) {
+
+ default:
+#ifdef KERNEL
+ return 0;
+#else
+ abort();
+#endif
+ case BPF_RET|BPF_K:
+ return (u_int)pc->k;
+
+ case BPF_RET|BPF_A:
+ return (u_int)A;
+
+ case BPF_LD|BPF_W|BPF_ABS:
+ k = pc->k;
+ if (k > buflen || sizeof(int32_t) > buflen - k) {
+#ifdef KERNEL
+ int merr;
+
+ if (buflen != 0)
+ return 0;
+ A = m_xword((struct mbuf *)p, k, &merr);
+ if (merr != 0)
+ return 0;
+ continue;
+#else
+ return 0;
+#endif
+ }
+#ifdef BPF_ALIGN
+ if (((intptr_t)(p + k) & 3) != 0)
+ A = EXTRACT_LONG(&p[k]);
+ else
+#endif
+ A = ntohl(*(int32_t *)(p + k));
+ continue;
+
+ case BPF_LD|BPF_H|BPF_ABS:
+ k = pc->k;
+ if (k > buflen || sizeof(int16_t) > buflen - k) {
+#ifdef KERNEL
+ int merr;
+
+ if (buflen != 0)
+ return 0;
+ A = m_xhalf((struct mbuf *)p, k, &merr);
+ continue;
+#else
+ return 0;
+#endif
+ }
+ A = EXTRACT_SHORT(&p[k]);
+ continue;
+
+ case BPF_LD|BPF_B|BPF_ABS:
+ k = pc->k;
+ if (k >= buflen) {
+#ifdef KERNEL
+ register struct mbuf *m;
+
+ if (buflen != 0)
+ return 0;
+ m = (struct mbuf *)p;
+ MINDEX(m, k);
+ A = mtod(m, u_char *)[k];
+ continue;
+#else
+ return 0;
+#endif
+ }
+ A = p[k];
+ continue;
+
+ case BPF_LD|BPF_W|BPF_LEN:
+ A = wirelen;
+ continue;
+
+ case BPF_LDX|BPF_W|BPF_LEN:
+ X = wirelen;
+ continue;
+
+ case BPF_LD|BPF_W|BPF_IND:
+ k = X + pc->k;
+ if (pc->k > buflen || X > buflen - pc->k ||
+ sizeof(int32_t) > buflen - k) {
+#ifdef KERNEL
+ int merr;
+
+ if (buflen != 0)
+ return 0;
+ A = m_xword((struct mbuf *)p, k, &merr);
+ if (merr != 0)
+ return 0;
+ continue;
+#else
+ return 0;
+#endif
+ }
+#ifdef BPF_ALIGN
+ if (((intptr_t)(p + k) & 3) != 0)
+ A = EXTRACT_LONG(&p[k]);
+ else
+#endif
+ A = ntohl(*(int32_t *)(p + k));
+ continue;
+
+ case BPF_LD|BPF_H|BPF_IND:
+ k = X + pc->k;
+ if (X > buflen || pc->k > buflen - X ||
+ sizeof(int16_t) > buflen - k) {
+#ifdef KERNEL
+ int merr;
+
+ if (buflen != 0)
+ return 0;
+ A = m_xhalf((struct mbuf *)p, k, &merr);
+ if (merr != 0)
+ return 0;
+ continue;
+#else
+ return 0;
+#endif
+ }
+ A = EXTRACT_SHORT(&p[k]);
+ continue;
+
+ case BPF_LD|BPF_B|BPF_IND:
+ k = X + pc->k;
+ if (pc->k >= buflen || X >= buflen - pc->k) {
+#ifdef KERNEL
+ register struct mbuf *m;
+
+ if (buflen != 0)
+ return 0;
+ m = (struct mbuf *)p;
+ MINDEX(m, k);
+ A = mtod(m, char *)[k];
+ continue;
+#else
+ return 0;
+#endif
+ }
+ A = p[k];
+ continue;
+
+ case BPF_LDX|BPF_MSH|BPF_B:
+ k = pc->k;
+ if (k >= buflen) {
+#ifdef KERNEL
+ register struct mbuf *m;
+
+ if (buflen != 0)
+ return 0;
+ m = (struct mbuf *)p;
+ MINDEX(m, k);
+ X = (mtod(m, char *)[k] & 0xf) << 2;
+ continue;
+#else
+ return 0;
+#endif
+ }
+ X = (p[pc->k] & 0xf) << 2;
+ continue;
+
+ case BPF_LD|BPF_IMM:
+ A = pc->k;
+ continue;
+
+ case BPF_LDX|BPF_IMM:
+ X = pc->k;
+ continue;
+
+ case BPF_LD|BPF_MEM:
+ A = mem[pc->k];
+ continue;
+
+ case BPF_LDX|BPF_MEM:
+ X = mem[pc->k];
+ continue;
+
+ case BPF_ST:
+ mem[pc->k] = A;
+ continue;
+
+ case BPF_STX:
+ mem[pc->k] = X;
+ continue;
+
+ case BPF_JMP|BPF_JA:
+ pc += pc->k;
+ continue;
+
+ case BPF_JMP|BPF_JGT|BPF_K:
+ pc += (A > pc->k) ? pc->jt : pc->jf;
+ continue;
+
+ case BPF_JMP|BPF_JGE|BPF_K:
+ pc += (A >= pc->k) ? pc->jt : pc->jf;
+ continue;
+
+ case BPF_JMP|BPF_JEQ|BPF_K:
+ pc += (A == pc->k) ? pc->jt : pc->jf;
+ continue;
+
+ case BPF_JMP|BPF_JSET|BPF_K:
+ pc += (A & pc->k) ? pc->jt : pc->jf;
+ continue;
+
+ case BPF_JMP|BPF_JGT|BPF_X:
+ pc += (A > X) ? pc->jt : pc->jf;
+ continue;
+
+ case BPF_JMP|BPF_JGE|BPF_X:
+ pc += (A >= X) ? pc->jt : pc->jf;
+ continue;
+
+ case BPF_JMP|BPF_JEQ|BPF_X:
+ pc += (A == X) ? pc->jt : pc->jf;
+ continue;
+
+ case BPF_JMP|BPF_JSET|BPF_X:
+ pc += (A & X) ? pc->jt : pc->jf;
+ continue;
+
+ case BPF_ALU|BPF_ADD|BPF_X:
+ A += X;
+ continue;
+
+ case BPF_ALU|BPF_SUB|BPF_X:
+ A -= X;
+ continue;
+
+ case BPF_ALU|BPF_MUL|BPF_X:
+ A *= X;
+ continue;
+
+ case BPF_ALU|BPF_DIV|BPF_X:
+ if (X == 0)
+ return 0;
+ A /= X;
+ continue;
+
+ case BPF_ALU|BPF_AND|BPF_X:
+ A &= X;
+ continue;
+
+ case BPF_ALU|BPF_OR|BPF_X:
+ A |= X;
+ continue;
+
+ case BPF_ALU|BPF_LSH|BPF_X:
+ A <<= X;
+ continue;
+
+ case BPF_ALU|BPF_RSH|BPF_X:
+ A >>= X;
+ continue;
+
+ case BPF_ALU|BPF_ADD|BPF_K:
+ A += pc->k;
+ continue;
+
+ case BPF_ALU|BPF_SUB|BPF_K:
+ A -= pc->k;
+ continue;
+
+ case BPF_ALU|BPF_MUL|BPF_K:
+ A *= pc->k;
+ continue;
+
+ case BPF_ALU|BPF_DIV|BPF_K:
+ A /= pc->k;
+ continue;
+
+ case BPF_ALU|BPF_AND|BPF_K:
+ A &= pc->k;
+ continue;
+
+ case BPF_ALU|BPF_OR|BPF_K:
+ A |= pc->k;
+ continue;
+
+ case BPF_ALU|BPF_LSH|BPF_K:
+ A <<= pc->k;
+ continue;
+
+ case BPF_ALU|BPF_RSH|BPF_K:
+ A >>= pc->k;
+ continue;
+
+ case BPF_ALU|BPF_NEG:
+ A = -A;
+ continue;
+
+ case BPF_MISC|BPF_TAX:
+ X = A;
+ continue;
+
+ case BPF_MISC|BPF_TXA:
+ A = X;
+ continue;
+ }
+ }
+}
+
+#ifdef KERNEL
+/*
+ * Return true if the 'fcode' is a valid filter program.
+ * The constraints are that each jump be forward and to a valid
+ * code. The code must terminate with either an accept or reject.
+ *
+ * The kernel needs to be able to verify an application's filter code.
+ * Otherwise, a bogus program could easily crash the system.
+ */
+int
+bpf_validate(f, len)
+ const struct bpf_insn *f;
+ int len;
+{
+ register int i;
+ register const struct bpf_insn *p;
+
+ for (i = 0; i < len; ++i) {
+ /*
+ * Check that that jumps are forward, and within
+ * the code block.
+ */
+ p = &f[i];
+ if (BPF_CLASS(p->code) == BPF_JMP) {
+ register int from = i + 1;
+
+ if (BPF_OP(p->code) == BPF_JA) {
+ if (from >= len || p->k >= len - from)
+ return 0;
+ }
+ else if (from >= len || p->jt >= len - from ||
+ p->jf >= len - from)
+ return 0;
+ }
+ /*
+ * Check that memory operations use valid addresses.
+ */
+ if ((BPF_CLASS(p->code) == BPF_ST ||
+ (BPF_CLASS(p->code) == BPF_LD &&
+ (p->code & 0xe0) == BPF_MEM)) &&
+ p->k >= BPF_MEMWORDS)
+ return 0;
+ /*
+ * Check for constant division by 0.
+ */
+ if (p->code == (BPF_ALU|BPF_DIV|BPF_K) && p->k == 0)
+ return 0;
+ }
+ return BPF_CLASS(f[len - 1].code) == BPF_RET;
+}
+#endif
diff --git a/sys/net/bpfdesc.h b/sys/net/bpfdesc.h
new file mode 100644
index 0000000..1e638f7
--- /dev/null
+++ b/sys/net/bpfdesc.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 1990, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from the Stanford/CMU enet packet filter,
+ * (net/enet.c) distributed as part of 4.3BSD, and code contributed
+ * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
+ * Berkeley Laboratory.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)bpfdesc.h 8.1 (Berkeley) 6/10/93
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NET_BPFDESC_H_
+#define _NET_BPFDESC_H_
+
+#include <sys/select.h>
+
+/*
+ * Descriptor associated with each open bpf file.
+ */
+struct bpf_d {
+ struct bpf_d *bd_next; /* Linked list of descriptors */
+ /*
+ * Buffer slots: two mbuf clusters buffer the incoming packets.
+ * The model has three slots. Sbuf is always occupied.
+ * sbuf (store) - Receive interrupt puts packets here.
+ * hbuf (hold) - When sbuf is full, put cluster here and
+ * wakeup read (replace sbuf with fbuf).
+ * fbuf (free) - When read is done, put cluster here.
+ * On receiving, if sbuf is full and fbuf is 0, packet is dropped.
+ */
+ caddr_t bd_sbuf; /* store slot */
+ caddr_t bd_hbuf; /* hold slot */
+ caddr_t bd_fbuf; /* free slot */
+ int bd_slen; /* current length of store buffer */
+ int bd_hlen; /* current length of hold buffer */
+
+ int bd_bufsize; /* absolute length of buffers */
+
+ struct bpf_if * bd_bif; /* interface descriptor */
+ u_long bd_rtout; /* Read timeout in 'ticks' */
+ struct bpf_insn *bd_filter; /* filter code */
+ u_long bd_rcount; /* number of packets received */
+ u_long bd_dcount; /* number of packets dropped */
+
+ u_char bd_promisc; /* true if listening promiscuously */
+ u_char bd_state; /* idle, waiting, or timed out */
+ u_char bd_immediate; /* true to return on packet arrival */
+ int bd_hdrcmplt; /* false to fill in src lladdr automatically */
+ int bd_async; /* non-zero if packet reception should generate signal */
+ int bd_sig; /* signal to send upon packet reception */
+ struct sigio * bd_sigio; /* information for async I/O */
+#if BSD < 199103
+ u_char bd_selcoll; /* true if selects collide */
+ int bd_timedout;
+ struct proc * bd_selproc; /* process that last selected us */
+#else
+ u_char bd_pad; /* explicit alignment */
+ struct selinfo bd_sel; /* bsd select info */
+#endif
+};
+
+/*
+ * Descriptor associated with each attached hardware interface.
+ */
+struct bpf_if {
+ struct bpf_if *bif_next; /* list of all interfaces */
+ struct bpf_d *bif_dlist; /* descriptor list */
+ u_int bif_dlt; /* link layer type */
+ u_int bif_hdrlen; /* length of header (with padding) */
+ struct ifnet *bif_ifp; /* corresponding interface */
+};
+
+#endif
diff --git a/sys/net/bridge.c b/sys/net/bridge.c
new file mode 100644
index 0000000..eeb54ec
--- /dev/null
+++ b/sys/net/bridge.c
@@ -0,0 +1,655 @@
+/*
+ * Copyright (c) 1998 Luigi Rizzo
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * This code implements bridging in FreeBSD. It only acts on ethernet
+ * type of interfaces (others are still usable for routing).
+ * A bridging table holds the source MAC address/dest. interface for each
+ * known node. The table is indexed using an hash of the source address.
+ *
+ * Input packets are tapped near the end of the input routine in each
+ * driver (near the call to bpf_mtap, or before the call to ether_input)
+ * and analysed calling bridge_in(). Depending on the result, the packet
+ * can be forwarded to one or more output interfaces using bdg_forward(),
+ * and/or sent to the upper layer (e.g. in case of multicast).
+ *
+ * Output packets are intercepted near the end of ether_output(),
+ * the correct destination is selected calling bdg_dst_lookup(),
+ * and then forwarding is done using bdg_forward().
+ * Bridging is controlled by the sysctl variable net.link.ether.bridge
+ *
+ * The arp code is also modified to let a machine answer to requests
+ * irrespective of the port the request came from.
+ *
+ * In case of loops in the bridging topology, the bridge detects this
+ * event and temporarily mutes output bridging on one of the ports.
+ * Periodically, interfaces are unmuted by bdg_timeout(). (For the
+ * mute flag i am temporarily using IFF_LINK2 but this has to
+ * change.) Muting is only implemented as a safety measure, and also as
+ * a mechanism to support a user-space implementation of the spanning
+ * tree algorithm. In the final release, unmuting will only occur
+ * because of explicit action of the user-level daemon.
+ *
+ * To build a bridging kernel, use the following option
+ * option BRIDGE
+ * and then at runtime set the sysctl variable to enable bridging.
+ *
+ * Only one interface is supposed to have addresses set (but
+ * there are no problems in practice if you set addresses for more
+ * than one interface).
+ * Bridging will act before routing, but nothing prevents a machine
+ * from doing both (modulo bugs in the implementation...).
+ *
+ * THINGS TO REMEMBER
+ * - bridging requires some (small) modifications to the interface
+ * driver. Currently (980911) the "ed", "de", "tx", "lnc" drivers
+ * have been modified and tested. "fxp", "ep", "fe" have been
+ * modified but not tested. See the "ed" and "de" drivers as
+ * examples on how to operate.
+ * - bridging is incompatible with multicast routing on the same
+ * machine. There is not an easy fix to this.
+ * - loop detection is still not very robust.
+ * - the interface of bdg_forward() could be improved.
+ */
+
+#include <sys/param.h>
+#include <sys/mbuf.h>
+#include <sys/malloc.h>
+#include <sys/systm.h>
+#include <sys/socket.h> /* for net/if.h */
+#include <sys/kernel.h>
+#include <sys/sysctl.h>
+
+#include <net/if.h>
+#include <net/if_types.h>
+
+#include <netinet/in.h> /* for struct arpcom */
+#include <netinet/in_systm.h>
+#include <netinet/in_var.h>
+#include <netinet/ip.h>
+#include <netinet/if_ether.h> /* for struct arpcom */
+
+#include "opt_ipfw.h"
+#include "opt_ipdn.h"
+
+#if defined(IPFIREWALL)
+#include <net/route.h>
+#include <netinet/ip_fw.h>
+#if defined(DUMMYNET)
+#include <netinet/ip_dummynet.h>
+#endif
+#endif
+
+#include <net/bridge.h>
+
+/*
+ * For debugging, you can use the following macros.
+ * remember, rdtsc() only works on Pentium-class machines
+
+ quad_t ticks;
+ DDB(ticks = rdtsc();)
+ ... interesting code ...
+ DDB(bdg_fw_ticks += (u_long)(rdtsc() - ticks) ; bdg_fw_count++ ;)
+
+ *
+ */
+
+#define DDB(x) x
+#define DEB(x)
+
+/*
+ * System initialization
+ */
+
+static void bdginit(void *);
+static void flush_table(void);
+
+SYSINIT(interfaces, SI_SUB_PROTO_IF, SI_ORDER_SECOND, bdginit, NULL)
+
+static int bdg_ipfw = 0 ;
+int do_bridge = 0;
+bdg_hash_table *bdg_table = NULL ;
+
+/*
+ * we need additional info for the bridge. The bdg_ifp2sc[] array
+ * provides a pointer to this struct using the if_index.
+ * bdg_softc has a backpointer to the struct ifnet, the bridge
+ * flags, and a group (bridging occurs only between port of the
+ * same group).
+ */
+struct bdg_softc {
+ struct ifnet *ifp ;
+ /* ((struct arpcom *)ifp)->ac_enaddr is the eth. addr */
+ int flags ;
+ int group ;
+} ;
+
+static struct bdg_softc **ifp2sc = NULL ;
+
+#if 0 /* new code using ifp2sc */
+#define SAMEGROUP(ifp,src) (src == NULL || \
+ ifp2sc[ifp->if_index]->group == ifp2sc[src->if_index]->group )
+#define MUTED(ifp) (ifp2sc[ifp->if_index]->flags & IFF_MUTE)
+#define MUTE(ifp) ifp2sc[ifp->if_index]->flags |= IFF_MUTE
+#define UNMUTE(ifp) ifp2sc[ifp->if_index]->flags &= ~IFF_MUTE
+#else
+#define SAMEGROUP(a,b) 1
+#define MUTED(ifp) (ifp->if_flags & IFF_MUTE)
+#define MUTE(ifp) ifp->if_flags |= IFF_MUTE
+#define UNMUTE(ifp) ifp->if_flags &= ~IFF_MUTE
+#endif
+
+static int
+sysctl_bdg SYSCTL_HANDLER_ARGS
+{
+ int error, oldval = do_bridge ;
+
+ error = sysctl_handle_int(oidp,
+ oidp->oid_arg1, oidp->oid_arg2, req);
+ printf("called sysctl for bridge name %s arg2 %d val %d->%d\n",
+ oidp->oid_name, oidp->oid_arg2,
+ oldval, do_bridge);
+ if (bdg_table == NULL)
+ do_bridge = 0 ;
+ if (oldval != do_bridge) {
+ flush_table();
+ }
+ return error ;
+}
+
+SYSCTL_DECL(_net_link_ether);
+SYSCTL_PROC(_net_link_ether, OID_AUTO, bridge, CTLTYPE_INT|CTLFLAG_RW,
+ &do_bridge, 0, &sysctl_bdg, "I", "Bridging");
+
+SYSCTL_INT(_net_link_ether, OID_AUTO, bridge_ipfw, CTLFLAG_RW, &bdg_ipfw,0,"");
+#if 1 /* diagnostic vars */
+int bdg_in_count = 0 , bdg_in_ticks = 0 , bdg_fw_count = 0, bdg_fw_ticks = 0 ;
+SYSCTL_INT(_net_link_ether, OID_AUTO, bdginc, CTLFLAG_RW, &bdg_in_count,0,"");
+SYSCTL_INT(_net_link_ether, OID_AUTO, bdgint, CTLFLAG_RW, &bdg_in_ticks,0,"");
+SYSCTL_INT(_net_link_ether, OID_AUTO, bdgfwc, CTLFLAG_RW, &bdg_fw_count,0,"");
+SYSCTL_INT(_net_link_ether, OID_AUTO, bdgfwt, CTLFLAG_RW, &bdg_fw_ticks,0,"");
+#endif
+static struct bdg_stats bdg_stats ;
+SYSCTL_STRUCT(_net_link_ether, PF_BDG, bdgstats,
+ CTLFLAG_RD, &bdg_stats , bdg_stats, "bridge statistics");
+
+static int bdg_loops ;
+
+/*
+ * completely flush the bridge table.
+ */
+static void
+flush_table()
+{
+ int s,i;
+
+ if (bdg_table == NULL)
+ return ;
+ s = splimp();
+ for (i=0; i< HASH_SIZE; i++)
+ bdg_table[i].name= NULL; /* clear table */
+ splx(s);
+}
+
+/*
+ * called periodically to flush entries etc.
+ */
+static void
+bdg_timeout(void *dummy)
+{
+ struct ifnet *ifp ;
+ int s ;
+ static int slowtimer = 0 ;
+
+ if (do_bridge) {
+ static int age_index = 0 ; /* index of table position to age */
+ int l = age_index + HASH_SIZE/4 ;
+ /*
+ * age entries in the forwarding table.
+ */
+ if (l > HASH_SIZE)
+ l = HASH_SIZE ;
+ for (; age_index < l ; age_index++)
+ if (bdg_table[age_index].used)
+ bdg_table[age_index].used = 0 ;
+ else if (bdg_table[age_index].name) {
+ /* printf("xx flushing stale entry %d\n", age_index); */
+ bdg_table[age_index].name = NULL ;
+ }
+ if (age_index >= HASH_SIZE)
+ age_index = 0 ;
+
+ if (--slowtimer <= 0 ) {
+ slowtimer = 5 ;
+
+ for (ifp = ifnet.tqh_first; ifp; ifp = ifp->if_link.tqe_next) {
+ if (ifp->if_type != IFT_ETHER)
+ continue ;
+ if ( 0 == ( ifp->if_flags & IFF_UP) ) {
+ s = splimp();
+ if_up(ifp);
+ splx(s);
+ }
+ if ( 0 == ( ifp->if_flags & IFF_PROMISC) ) {
+ int ret ;
+ s = splimp();
+ ret = ifpromisc(ifp, 1);
+ splx(s);
+ printf(">> now %s%d flags 0x%x promisc %d\n",
+ ifp->if_name, ifp->if_unit,
+ ifp->if_flags, ret);
+ }
+ if (MUTED(ifp)) {
+ printf(">> unmuting %s%d\n", ifp->if_name, ifp->if_unit);
+ UNMUTE(ifp) ;
+ }
+ }
+ bdg_loops = 0 ;
+ }
+ }
+ timeout(bdg_timeout, (void *)0, 2*hz );
+}
+
+/*
+ * local MAC addresses are held in a small array. This makes comparisons
+ * much faster.
+ */
+unsigned char bdg_addresses[6*BDG_MAX_PORTS];
+int bdg_ports ;
+
+/*
+ * initialization of bridge code.
+ *
+ * This will have to change to support kldload.
+ */
+static void
+bdginit(dummy)
+ void *dummy;
+{
+ int i, s;
+ struct ifnet *ifp;
+ struct arpcom *ac ;
+ u_char *eth_addr ;
+
+ /*
+ * initialization of bridge code
+ */
+ s = splimp(); /* XXX does this matter? */
+ if (bdg_table == NULL)
+ bdg_table = (struct hash_table *)
+ malloc(HASH_SIZE * sizeof(struct hash_table),
+ M_IFADDR, M_WAITOK);
+ flush_table();
+
+ ifp2sc = malloc(if_index * sizeof(struct bdg_softc *), M_IFADDR, M_WAITOK );
+ bzero(ifp2sc, if_index * sizeof(struct bdg_softc *) );
+
+ bzero(&bdg_stats, sizeof(bdg_stats) );
+ bdg_ports = 0 ;
+ eth_addr = bdg_addresses ;
+
+ printf("BRIDGE 981214, have %d interfaces\n", if_index);
+ for (i = 0 , ifp = ifnet.tqh_first ; i < if_index ;
+ i++, ifp = ifp->if_link.tqe_next)
+ if (ifp->if_type == IFT_ETHER) { /* ethernet ? */
+ ac = (struct arpcom *)ifp;
+ sprintf(bdg_stats.s[ifp->if_index].name,
+ "%s%d", ifp->if_name, ifp->if_unit);
+ printf("-- index %d %s type %d phy %d addrl %d addr %6D\n",
+ ifp->if_index,
+ bdg_stats.s[ifp->if_index].name,
+ (int)ifp->if_type, (int) ifp->if_physical,
+ (int)ifp->if_addrlen,
+ ac->ac_enaddr, "." );
+ bcopy(ac->ac_enaddr, eth_addr, 6);
+ eth_addr += 6 ;
+
+ ifp2sc[bdg_ports] = malloc(sizeof(struct bdg_softc),
+ M_IFADDR, M_WAITOK );
+ ifp2sc[bdg_ports]->ifp = ifp ;
+ ifp2sc[bdg_ports]->flags = 0 ;
+ ifp2sc[bdg_ports]->group = 0 ;
+ bdg_ports ++ ;
+ }
+ bdg_timeout(0);
+ do_bridge=0;
+ splx(s);
+}
+
+/*
+ * bridge_in() is invoked to perform bridging decision on input packets.
+ * On Input:
+ * m packet to be bridged. The mbuf need not to hold the
+ * whole packet, only the first 14 bytes suffice. We
+ * assume them to be contiguous. No alignment assumptions
+ * because they are not a problem on i386 class machines.
+ *
+ * On Return: destination of packet, one of
+ * BDG_BCAST broadcast
+ * BDG_MCAST multicast
+ * BDG_LOCAL is only for a local address (do not forward)
+ * BDG_DROP drop the packet
+ * ifp ifp of the destination interface.
+ *
+ * Forwarding is not done directly to give a chance to some drivers
+ * to fetch more of the packet, or simply drop it completely.
+ */
+
+
+struct ifnet *
+bridge_in(struct mbuf *m)
+{
+ int index;
+ struct ifnet *ifp = m->m_pkthdr.rcvif, *dst , *old ;
+ int dropit = MUTED(ifp) ;
+ struct ether_header *eh;
+
+ eh = mtod(m, struct ether_header *);
+
+ /*
+ * hash the source address
+ */
+ index= HASH_FN(eh->ether_shost);
+ bdg_table[index].used = 1 ;
+ old = bdg_table[index].name ;
+ if ( old ) { /* the entry is valid. */
+ if (!BDG_MATCH( eh->ether_shost, bdg_table[index].etheraddr) ) {
+ printf("collision at %d\n", index);
+ bdg_table[index].name = NULL ;
+ } else if (old != ifp) {
+ /*
+ * found a loop. Either a machine has moved, or there
+ * is a misconfiguration/reconfiguration of the network.
+ * First, do not forward this packet!
+ * Record the relocation anyways; then, if loops persist,
+ * suspect a reconfiguration and disable forwarding
+ * from the old interface.
+ */
+ bdg_table[index].name = ifp ; /* relocate address */
+ printf("-- loop (%d) %6D to %s%d from %s%d (%s)\n",
+ bdg_loops, eh->ether_shost, ".",
+ ifp->if_name, ifp->if_unit,
+ old->if_name, old->if_unit,
+ old->if_flags & IFF_MUTE ? "muted":"ignore");
+ dropit = 1 ;
+ if ( !MUTED(old) ) {
+ if (++bdg_loops > 10)
+ MUTE(old) ;
+ }
+ }
+ }
+
+ /*
+ * now write the source address into the table
+ */
+ if (bdg_table[index].name == NULL) {
+ DEB(printf("new addr %6D at %d for %s%d\n",
+ eh->ether_shost, ".", index, ifp->if_name, ifp->if_unit);)
+ bcopy(eh->ether_shost, bdg_table[index].etheraddr, 6);
+ bdg_table[index].name = ifp ;
+ }
+ dst = bridge_dst_lookup(m);
+ /* Return values:
+ * BDG_BCAST, BDG_MCAST, BDG_LOCAL, BDG_UNKNOWN, BDG_DROP, ifp.
+ * For muted interfaces, the first 3 are changed in BDG_LOCAL,
+ * and others to BDG_DROP. Also, for incoming packets, ifp is changed
+ * to BDG_DROP in case ifp == src . These mods are not necessary
+ * for outgoing packets from ether_output().
+ */
+ BDG_STAT(ifp, BDG_IN);
+ switch ((int)dst) {
+ case (int)BDG_BCAST:
+ case (int)BDG_MCAST:
+ case (int)BDG_LOCAL:
+ case (int)BDG_UNKNOWN:
+ case (int)BDG_DROP:
+ BDG_STAT(ifp, dst);
+ break ;
+ default :
+ if (dst == ifp || dropit )
+ BDG_STAT(ifp, BDG_DROP);
+ else
+ BDG_STAT(ifp, BDG_FORWARD);
+ break ;
+ }
+
+ if ( dropit ) {
+ if (dst == BDG_BCAST || dst == BDG_MCAST || dst == BDG_LOCAL)
+ return BDG_LOCAL ;
+ else
+ return BDG_DROP ;
+ } else {
+ return (dst == ifp ? BDG_DROP : dst ) ;
+ }
+}
+
+/*
+ * Forward to dst, excluding src port and (if not a single interface)
+ * muted interfaces. The packet is freed if marked as such
+ * and not for a local destination.
+ * A cleaner implementation would be to make bdg_forward()
+ * always consume the packet, leaving to the caller the task
+ * to make a copy if it needs it. As it is now, bdg_forward()
+ * can keep a copy alive in some cases.
+ */
+int
+bdg_forward (struct mbuf **m0, struct ifnet *dst)
+{
+ struct ifnet *src = (*m0)->m_pkthdr.rcvif; /* could be NULL in output */
+ struct ifnet *ifp ;
+ int error=0, s ;
+ int once = 0; /* execute the loop only once */
+ int canfree = 1 ; /* can free the buf at the end */
+ struct mbuf *m ;
+#ifdef IPFIREWALL
+ struct ip *ip;
+ struct ether_header *eh = mtod(*m0, struct ether_header *); /* XXX */
+#endif
+
+ if (dst == BDG_DROP) { /* this should not happen */
+ printf("xx bdg_forward for BDG_DROP)\n");
+ m_freem(*m0) ;
+ *m0 = NULL ;
+ return 0;
+ }
+ if (dst == BDG_LOCAL) { /* this should not happen as well */
+ printf("xx ouch, bdg_forward for local pkt\n");
+ return 0;
+ }
+ if (dst == BDG_BCAST || dst == BDG_MCAST || dst == BDG_UNKNOWN) {
+ ifp = ifnet.tqh_first ;
+ once = 0 ;
+ if (dst != BDG_UNKNOWN)
+ canfree = 0 ;
+ } else {
+ ifp = dst ;
+ once = 1 ; /* and also canfree */
+ }
+#ifdef IPFIREWALL
+ /*
+ * do filtering in a very similar way to what is done
+ * in ip_output. Only for IP packets, and only pass/fail/dummynet
+ * is supported. The tricky thing is to make sure that enough of
+ * the packet (basically, Eth+IP+TCP/UDP headers) is contiguous
+ * so that calls to m_pullup in ip_fw_chk will not kill the
+ * ethernet header.
+ */
+ if (ip_fw_chk_ptr) {
+ u_int16_t dummy ;
+ struct ip_fw_chain *rule;
+ int off;
+
+ m = *m0 ;
+#ifdef DUMMYNET
+ if (m->m_type == MT_DUMMYNET) {
+ /*
+ * the packet was already tagged, so part of the
+ * processing was already done, and we need to go down.
+ */
+ rule = (struct ip_fw_chain *)(m->m_data) ;
+ (*m0) = m = m->m_next ;
+
+ src = m->m_pkthdr.rcvif; /* could be NULL in output */
+ eh = mtod(m, struct ether_header *); /* XXX */
+ canfree = 1 ; /* for sure, a copy is not needed later. */
+ goto forward; /* HACK! */
+ } else
+#endif
+ rule = NULL ;
+ if (bdg_ipfw == 0)
+ goto forward ;
+ if (src == NULL)
+ goto forward ; /* do not apply to packets from ether_output */
+ if (canfree == 0 ) /* need to make a copy */
+ m = m_copypacket(*m0, M_DONTWAIT);
+ if (m == NULL) {
+ /* fail... */
+ return 0 ;
+ }
+
+ dummy = 0 ;
+ /*
+ * before calling the firewall, swap fields the same as IP does.
+ * here we assume the pkt is an IP one and the header is contiguous
+ */
+ eh = mtod(m, struct ether_header *);
+ ip = (struct ip *)(eh + 1 ) ;
+ NTOHS(ip->ip_len);
+ NTOHS(ip->ip_id);
+ NTOHS(ip->ip_off);
+
+ /*
+ * The third parameter to the firewall code is the dst. interface.
+ * Since we apply checks only on input pkts we use NULL.
+ */
+ off = (*ip_fw_chk_ptr)(NULL, 0, NULL, &dummy, &m, &rule, NULL) ;
+ if (m == NULL) { /* pkt discarded by firewall */
+ if (canfree)
+ *m0 = NULL ;
+ return 0 ;
+ }
+ /*
+ * on return, the mbuf pointer might have changed. Restore
+ * *m0 (if it was the same as m), eh, ip and then
+ * restore original ordering.
+ */
+ eh = mtod(m, struct ether_header *);
+ ip = (struct ip *)(eh + 1 ) ;
+ if (canfree) /* m was a reference to *m0, so update *m0 */
+ *m0 = m ;
+ HTONS(ip->ip_len);
+ HTONS(ip->ip_id);
+ HTONS(ip->ip_off);
+ if (off == 0) {
+ if (canfree == 0)
+ m_freem(m);
+ goto forward ;
+ }
+#ifdef DUMMYNET
+ if (off & 0x10000) {
+ /*
+ * pass the pkt to dummynet. Need to include m, dst, rule.
+ * Dummynet consumes the packet in all cases.
+ */
+ dummynet_io((off & 0xffff), DN_TO_BDG_FWD, m, dst, NULL, 0, rule);
+ if (canfree) /* dummynet has consumed the original one */
+ *m0 = NULL ;
+ return 0 ;
+ }
+#endif
+ /* if none of the above matches, we have to drop the pkt */
+ if (m)
+ m_freem(m);
+ if (canfree && m != *m0) {
+ m_freem(*m0);
+ *m0 = NULL ;
+ }
+ return 0 ;
+ }
+forward:
+#endif /* IPFIREWALL */
+ if (canfree && once)
+ m = *m0 ;
+ else
+ m = NULL ;
+
+ for ( ; ifp ; ifp = ifp->if_link.tqe_next ) {
+ if (ifp != src && ifp->if_type == IFT_ETHER &&
+ (ifp->if_flags & (IFF_UP|IFF_RUNNING)) == (IFF_UP|IFF_RUNNING) &&
+ SAMEGROUP(ifp, src) && !MUTED(ifp) ) {
+ if (m == NULL) { /* do i need to make a copy ? */
+ if (canfree && ifp->if_link.tqe_next == NULL) /* last one! */
+ m = *m0 ;
+ else /* on a P5-90, m_packetcopy takes 540 ticks */
+ m = m_copypacket(*m0, M_DONTWAIT);
+ if (m == NULL) {
+ printf("bdg_forward: sorry, m_copy failed!\n");
+ return ENOBUFS ;
+ }
+ }
+ /*
+ * execute last part of ether_output.
+ */
+ s = splimp();
+ /*
+ * Queue message on interface, and start output if interface
+ * not yet active.
+ */
+ if (IF_QFULL(&ifp->if_snd)) {
+ IF_DROP(&ifp->if_snd);
+ MUTE(ifp); /* good measure... */
+ splx(s);
+ error = ENOBUFS ;
+ } else {
+ ifp->if_obytes += m->m_pkthdr.len ;
+ if (m->m_flags & M_MCAST)
+ ifp->if_omcasts++;
+ IF_ENQUEUE(&ifp->if_snd, m);
+ if ((ifp->if_flags & IFF_OACTIVE) == 0)
+ (*ifp->if_start)(ifp);
+ splx(s);
+ if (m == *m0)
+ *m0 = NULL ; /* the packet is gone... */
+ m = NULL ;
+ }
+ BDG_STAT(ifp, BDG_OUT);
+ }
+ if (once)
+ break ;
+ }
+
+ /* cleanup any mbuf leftover. */
+ if (m)
+ m_freem(m);
+ if (m == *m0)
+ *m0 = NULL ;
+ if (canfree && *m0) {
+ m_freem(*m0);
+ *m0 = NULL ;
+ }
+ return error ;
+}
diff --git a/sys/net/bridge.h b/sys/net/bridge.h
new file mode 100644
index 0000000..e151e4e
--- /dev/null
+++ b/sys/net/bridge.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 1998 Luigi Rizzo
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+extern int do_bridge;
+/*
+ * the hash table for bridge
+ */
+typedef struct hash_table {
+ struct ifnet *name ;
+ unsigned char etheraddr[6] ;
+ unsigned short used ;
+} bdg_hash_table ;
+
+extern bdg_hash_table *bdg_table ;
+
+#define BDG_MAX_PORTS 128
+extern unsigned char bdg_addresses[6*BDG_MAX_PORTS];
+extern int bdg_ports ;
+
+/*
+ * out of the 6 bytes, the last ones are more "variable". Since
+ * we are on a little endian machine, we have to do some gimmick...
+ */
+#define HASH_SIZE 8192 /* must be a power of 2 */
+#define HASH_FN(addr) ( \
+ ntohs( ((short *)addr)[1] ^ ((short *)addr)[2] ) & (HASH_SIZE -1))
+
+#define IFF_MUTE IFF_LINK2 /* will need a separate flag... */
+
+struct ifnet *bridge_in(struct mbuf *m);
+/* bdg_forward frees the mbuf if necessary, returning null */
+int bdg_forward (struct mbuf **m, struct ifnet *dst);
+
+#ifdef __i386__
+#define BDG_MATCH(a,b) ( \
+ ((unsigned short *)(a))[2] == ((unsigned short *)(b))[2] && \
+ *((unsigned int *)(a)) == *((unsigned int *)(b)) )
+#define IS_ETHER_BROADCAST(a) ( \
+ *((unsigned int *)(a)) == 0xffffffff && \
+ ((unsigned short *)(a))[2] == 0xffff )
+#else
+#warning... must complete these for the alpha etc.
+#define BDG_MATCH(a,b) (!bcmp(a, b, ETHER_ADDR_LEN) )
+#endif
+/*
+ * The following constants are not legal ifnet pointers, and are used
+ * as return values from the classifier, bridge_dst_lookup()
+ * The same values are used as index in the statistics arrays,
+ * with BDG_FORWARD replacing specifically forwarded packets.
+ */
+#define BDG_BCAST ( (struct ifnet *)1 )
+#define BDG_MCAST ( (struct ifnet *)2 )
+#define BDG_LOCAL ( (struct ifnet *)3 )
+#define BDG_DROP ( (struct ifnet *)4 )
+#define BDG_UNKNOWN ( (struct ifnet *)5 )
+#define BDG_IN ( (struct ifnet *)7 )
+#define BDG_OUT ( (struct ifnet *)8 )
+#define BDG_FORWARD ( (struct ifnet *)9 )
+
+#define PF_BDG 3 /* XXX superhack */
+/*
+ * statistics, passed up with sysctl interface and ns -p bdg
+ */
+
+#define STAT_MAX (int)BDG_FORWARD
+struct bdg_port_stat {
+ char name[16];
+ u_long collisions;
+ u_long p_in[STAT_MAX+1];
+} ;
+
+struct bdg_stats {
+ struct bdg_port_stat s[16];
+} ;
+
+
+#define BDG_STAT(ifp, type) bdg_stats.s[ifp->if_index].p_in[(int)type]++
+
+#ifdef KERNEL
+/*
+ * Find the right pkt destination:
+ * BDG_BCAST is a broadcast
+ * BDG_MCAST is a multicast
+ * BDG_LOCAL is for a local address
+ * BDG_DROP must be dropped
+ * other ifp of the dest. interface (incl.self)
+ */
+static __inline
+struct ifnet *
+bridge_dst_lookup(struct mbuf *m)
+{
+ struct ether_header *eh = mtod(m, struct ether_header *);
+ struct ifnet *dst ;
+ int index ;
+ u_char *eth_addr = bdg_addresses ;
+
+ if (IS_ETHER_BROADCAST(eh->ether_dhost))
+ return BDG_BCAST ;
+ if (eh->ether_dhost[0] & 1)
+ return BDG_MCAST ;
+ /*
+ * Lookup local addresses in case one matches.
+ */
+ for (index = bdg_ports, eth_addr = bdg_addresses ;
+ index ; index--, eth_addr += 6 )
+ if (BDG_MATCH(eth_addr, eh->ether_dhost) )
+ return BDG_LOCAL ;
+ /*
+ * Look for a possible destination in table
+ */
+ index= HASH_FN( eh->ether_dhost );
+ dst = bdg_table[index].name;
+ if ( dst && BDG_MATCH( bdg_table[index].etheraddr, eh->ether_dhost) )
+ return dst ;
+ else
+ return BDG_UNKNOWN ;
+}
+
+#endif /* KERNEL */
diff --git a/sys/net/bsd_comp.c b/sys/net/bsd_comp.c
new file mode 100644
index 0000000..d5072cc
--- /dev/null
+++ b/sys/net/bsd_comp.c
@@ -0,0 +1,1113 @@
+/* Because this code is derived from the 4.3BSD compress source:
+ *
+ *
+ * Copyright (c) 1985, 1986 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * James A. Woods, derived from original work by Spencer Thomas
+ * and Joseph Orost.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * This version is for use with mbufs on BSD-derived systems.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <net/ppp_defs.h>
+
+#define PACKETPTR struct mbuf *
+#include <net/ppp_comp.h>
+
+#if DO_BSD_COMPRESS
+/*
+ * PPP "BSD compress" compression
+ * The differences between this compression and the classic BSD LZW
+ * source are obvious from the requirement that the classic code worked
+ * with files while this handles arbitrarily long streams that
+ * are broken into packets. They are:
+ *
+ * When the code size expands, a block of junk is not emitted by
+ * the compressor and not expected by the decompressor.
+ *
+ * New codes are not necessarily assigned every time an old
+ * code is output by the compressor. This is because a packet
+ * end forces a code to be emitted, but does not imply that a
+ * new sequence has been seen.
+ *
+ * The compression ratio is checked at the first end of a packet
+ * after the appropriate gap. Besides simplifying and speeding
+ * things up, this makes it more likely that the transmitter
+ * and receiver will agree when the dictionary is cleared when
+ * compression is not going well.
+ */
+
+/*
+ * A dictionary for doing BSD compress.
+ */
+struct bsd_db {
+ int totlen; /* length of this structure */
+ u_int hsize; /* size of the hash table */
+ u_char hshift; /* used in hash function */
+ u_char n_bits; /* current bits/code */
+ u_char maxbits;
+ u_char debug;
+ u_char unit;
+ u_int16_t seqno; /* sequence # of next packet */
+ u_int hdrlen; /* header length to preallocate */
+ u_int mru;
+ u_int maxmaxcode; /* largest valid code */
+ u_int max_ent; /* largest code in use */
+ u_int in_count; /* uncompressed bytes, aged */
+ u_int bytes_out; /* compressed bytes, aged */
+ u_int ratio; /* recent compression ratio */
+ u_int checkpoint; /* when to next check the ratio */
+ u_int clear_count; /* times dictionary cleared */
+ u_int incomp_count; /* incompressible packets */
+ u_int incomp_bytes; /* incompressible bytes */
+ u_int uncomp_count; /* uncompressed packets */
+ u_int uncomp_bytes; /* uncompressed bytes */
+ u_int comp_count; /* compressed packets */
+ u_int comp_bytes; /* compressed bytes */
+ u_int16_t *lens; /* array of lengths of codes */
+ struct bsd_dict {
+ union { /* hash value */
+ u_int32_t fcode;
+ struct {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ u_int16_t prefix; /* preceding code */
+ u_char suffix; /* last character of new code */
+ u_char pad;
+#else
+ u_char pad;
+ u_char suffix; /* last character of new code */
+ u_int16_t prefix; /* preceding code */
+#endif
+ } hs;
+ } f;
+ u_int16_t codem1; /* output of hash table -1 */
+ u_int16_t cptr; /* map code to hash table entry */
+ } dict[1];
+};
+
+#define BSD_OVHD 2 /* BSD compress overhead/packet */
+#define BSD_INIT_BITS BSD_MIN_BITS
+
+static void bsd_clear __P((struct bsd_db *db));
+static int bsd_check __P((struct bsd_db *db));
+static void *bsd_alloc __P((u_char *options, int opt_len, int decomp));
+static int bsd_init __P((struct bsd_db *db, u_char *options, int opt_len,
+ int unit, int hdrlen, int mru, int debug,
+ int decomp));
+static void *bsd_comp_alloc __P((u_char *options, int opt_len));
+static void *bsd_decomp_alloc __P((u_char *options, int opt_len));
+static void bsd_free __P((void *state));
+static int bsd_comp_init __P((void *state, u_char *options, int opt_len,
+ int unit, int hdrlen, int debug));
+static int bsd_decomp_init __P((void *state, u_char *options, int opt_len,
+ int unit, int hdrlen, int mru, int debug));
+static int bsd_compress __P((void *state, struct mbuf **mret,
+ struct mbuf *mp, int slen, int maxolen));
+static void bsd_incomp __P((void *state, struct mbuf *dmsg));
+static int bsd_decompress __P((void *state, struct mbuf *cmp,
+ struct mbuf **dmpp));
+static void bsd_reset __P((void *state));
+static void bsd_comp_stats __P((void *state, struct compstat *stats));
+
+/*
+ * Procedures exported to if_ppp.c.
+ */
+struct compressor ppp_bsd_compress = {
+ CI_BSD_COMPRESS, /* compress_proto */
+ bsd_comp_alloc, /* comp_alloc */
+ bsd_free, /* comp_free */
+ bsd_comp_init, /* comp_init */
+ bsd_reset, /* comp_reset */
+ bsd_compress, /* compress */
+ bsd_comp_stats, /* comp_stat */
+ bsd_decomp_alloc, /* decomp_alloc */
+ bsd_free, /* decomp_free */
+ bsd_decomp_init, /* decomp_init */
+ bsd_reset, /* decomp_reset */
+ bsd_decompress, /* decompress */
+ bsd_incomp, /* incomp */
+ bsd_comp_stats, /* decomp_stat */
+};
+
+/*
+ * the next two codes should not be changed lightly, as they must not
+ * lie within the contiguous general code space.
+ */
+#define CLEAR 256 /* table clear output code */
+#define FIRST 257 /* first free entry */
+#define LAST 255
+
+#define MAXCODE(b) ((1 << (b)) - 1)
+#define BADCODEM1 MAXCODE(BSD_MAX_BITS)
+
+#define BSD_HASH(prefix,suffix,hshift) ((((u_int32_t)(suffix)) << (hshift)) \
+ ^ (u_int32_t)(prefix))
+#define BSD_KEY(prefix,suffix) ((((u_int32_t)(suffix)) << 16) \
+ + (u_int32_t)(prefix))
+
+#define CHECK_GAP 10000 /* Ratio check interval */
+
+#define RATIO_SCALE_LOG 8
+#define RATIO_SCALE (1<<RATIO_SCALE_LOG)
+#define RATIO_MAX (0x7fffffff>>RATIO_SCALE_LOG)
+
+/*
+ * clear the dictionary
+ */
+static void
+bsd_clear(db)
+ struct bsd_db *db;
+{
+ db->clear_count++;
+ db->max_ent = FIRST-1;
+ db->n_bits = BSD_INIT_BITS;
+ db->ratio = 0;
+ db->bytes_out = 0;
+ db->in_count = 0;
+ db->checkpoint = CHECK_GAP;
+}
+
+/*
+ * If the dictionary is full, then see if it is time to reset it.
+ *
+ * Compute the compression ratio using fixed-point arithmetic
+ * with 8 fractional bits.
+ *
+ * Since we have an infinite stream instead of a single file,
+ * watch only the local compression ratio.
+ *
+ * Since both peers must reset the dictionary at the same time even in
+ * the absence of CLEAR codes (while packets are incompressible), they
+ * must compute the same ratio.
+ */
+static int /* 1=output CLEAR */
+bsd_check(db)
+ struct bsd_db *db;
+{
+ u_int new_ratio;
+
+ if (db->in_count >= db->checkpoint) {
+ /* age the ratio by limiting the size of the counts */
+ if (db->in_count >= RATIO_MAX
+ || db->bytes_out >= RATIO_MAX) {
+ db->in_count -= db->in_count/4;
+ db->bytes_out -= db->bytes_out/4;
+ }
+
+ db->checkpoint = db->in_count + CHECK_GAP;
+
+ if (db->max_ent >= db->maxmaxcode) {
+ /* Reset the dictionary only if the ratio is worse,
+ * or if it looks as if it has been poisoned
+ * by incompressible data.
+ *
+ * This does not overflow, because
+ * db->in_count <= RATIO_MAX.
+ */
+ new_ratio = db->in_count << RATIO_SCALE_LOG;
+ if (db->bytes_out != 0)
+ new_ratio /= db->bytes_out;
+
+ if (new_ratio < db->ratio || new_ratio < 1 * RATIO_SCALE) {
+ bsd_clear(db);
+ return 1;
+ }
+ db->ratio = new_ratio;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Return statistics.
+ */
+static void
+bsd_comp_stats(state, stats)
+ void *state;
+ struct compstat *stats;
+{
+ struct bsd_db *db = (struct bsd_db *) state;
+ u_int out;
+
+ stats->unc_bytes = db->uncomp_bytes;
+ stats->unc_packets = db->uncomp_count;
+ stats->comp_bytes = db->comp_bytes;
+ stats->comp_packets = db->comp_count;
+ stats->inc_bytes = db->incomp_bytes;
+ stats->inc_packets = db->incomp_count;
+ stats->ratio = db->in_count;
+ out = db->bytes_out;
+ if (stats->ratio <= 0x7fffff)
+ stats->ratio <<= 8;
+ else
+ out >>= 8;
+ if (out != 0)
+ stats->ratio /= out;
+}
+
+/*
+ * Reset state, as on a CCP ResetReq.
+ */
+static void
+bsd_reset(state)
+ void *state;
+{
+ struct bsd_db *db = (struct bsd_db *) state;
+
+ db->seqno = 0;
+ bsd_clear(db);
+ db->clear_count = 0;
+}
+
+/*
+ * Allocate space for a (de) compressor.
+ */
+static void *
+bsd_alloc(options, opt_len, decomp)
+ u_char *options;
+ int opt_len, decomp;
+{
+ int bits;
+ u_int newlen, hsize, hshift, maxmaxcode;
+ struct bsd_db *db;
+
+ if (opt_len < CILEN_BSD_COMPRESS || options[0] != CI_BSD_COMPRESS
+ || options[1] != CILEN_BSD_COMPRESS
+ || BSD_VERSION(options[2]) != BSD_CURRENT_VERSION)
+ return NULL;
+ bits = BSD_NBITS(options[2]);
+ switch (bits) {
+ case 9: /* needs 82152 for both directions */
+ case 10: /* needs 84144 */
+ case 11: /* needs 88240 */
+ case 12: /* needs 96432 */
+ hsize = 5003;
+ hshift = 4;
+ break;
+ case 13: /* needs 176784 */
+ hsize = 9001;
+ hshift = 5;
+ break;
+ case 14: /* needs 353744 */
+ hsize = 18013;
+ hshift = 6;
+ break;
+ case 15: /* needs 691440 */
+ hsize = 35023;
+ hshift = 7;
+ break;
+ case 16: /* needs 1366160--far too much, */
+ /* hsize = 69001; */ /* and 69001 is too big for cptr */
+ /* hshift = 8; */ /* in struct bsd_db */
+ /* break; */
+ default:
+ return NULL;
+ }
+
+ maxmaxcode = MAXCODE(bits);
+ newlen = sizeof(*db) + (hsize-1) * (sizeof(db->dict[0]));
+ MALLOC(db, struct bsd_db *, newlen, M_DEVBUF, M_NOWAIT);
+ if (!db)
+ return NULL;
+ bzero(db, sizeof(*db) - sizeof(db->dict));
+
+ if (!decomp) {
+ db->lens = NULL;
+ } else {
+ MALLOC(db->lens, u_int16_t *, (maxmaxcode+1) * sizeof(db->lens[0]),
+ M_DEVBUF, M_NOWAIT);
+ if (!db->lens) {
+ FREE(db, M_DEVBUF);
+ return NULL;
+ }
+ }
+
+ db->totlen = newlen;
+ db->hsize = hsize;
+ db->hshift = hshift;
+ db->maxmaxcode = maxmaxcode;
+ db->maxbits = bits;
+
+ return (void *) db;
+}
+
+static void
+bsd_free(state)
+ void *state;
+{
+ struct bsd_db *db = (struct bsd_db *) state;
+
+ if (db->lens)
+ FREE(db->lens, M_DEVBUF);
+ FREE(db, M_DEVBUF);
+}
+
+static void *
+bsd_comp_alloc(options, opt_len)
+ u_char *options;
+ int opt_len;
+{
+ return bsd_alloc(options, opt_len, 0);
+}
+
+static void *
+bsd_decomp_alloc(options, opt_len)
+ u_char *options;
+ int opt_len;
+{
+ return bsd_alloc(options, opt_len, 1);
+}
+
+/*
+ * Initialize the database.
+ */
+static int
+bsd_init(db, options, opt_len, unit, hdrlen, mru, debug, decomp)
+ struct bsd_db *db;
+ u_char *options;
+ int opt_len, unit, hdrlen, mru, debug, decomp;
+{
+ int i;
+
+ if (opt_len < CILEN_BSD_COMPRESS || options[0] != CI_BSD_COMPRESS
+ || options[1] != CILEN_BSD_COMPRESS
+ || BSD_VERSION(options[2]) != BSD_CURRENT_VERSION
+ || BSD_NBITS(options[2]) != db->maxbits
+ || (decomp && db->lens == NULL))
+ return 0;
+
+ if (decomp) {
+ i = LAST+1;
+ while (i != 0)
+ db->lens[--i] = 1;
+ }
+ i = db->hsize;
+ while (i != 0) {
+ db->dict[--i].codem1 = BADCODEM1;
+ db->dict[i].cptr = 0;
+ }
+
+ db->unit = unit;
+ db->hdrlen = hdrlen;
+ db->mru = mru;
+#ifndef DEBUG
+ if (debug)
+#endif
+ db->debug = 1;
+
+ bsd_reset(db);
+
+ return 1;
+}
+
+static int
+bsd_comp_init(state, options, opt_len, unit, hdrlen, debug)
+ void *state;
+ u_char *options;
+ int opt_len, unit, hdrlen, debug;
+{
+ return bsd_init((struct bsd_db *) state, options, opt_len,
+ unit, hdrlen, 0, debug, 0);
+}
+
+static int
+bsd_decomp_init(state, options, opt_len, unit, hdrlen, mru, debug)
+ void *state;
+ u_char *options;
+ int opt_len, unit, hdrlen, mru, debug;
+{
+ return bsd_init((struct bsd_db *) state, options, opt_len,
+ unit, hdrlen, mru, debug, 1);
+}
+
+
+/*
+ * compress a packet
+ * One change from the BSD compress command is that when the
+ * code size expands, we do not output a bunch of padding.
+ */
+int /* new slen */
+bsd_compress(state, mret, mp, slen, maxolen)
+ void *state;
+ struct mbuf **mret; /* return compressed mbuf chain here */
+ struct mbuf *mp; /* from here */
+ int slen; /* uncompressed length */
+ int maxolen; /* max compressed length */
+{
+ struct bsd_db *db = (struct bsd_db *) state;
+ int hshift = db->hshift;
+ u_int max_ent = db->max_ent;
+ u_int n_bits = db->n_bits;
+ u_int bitno = 32;
+ u_int32_t accm = 0, fcode;
+ struct bsd_dict *dictp;
+ u_char c;
+ int hval, disp, ent, ilen;
+ u_char *rptr, *wptr;
+ u_char *cp_end;
+ int olen;
+ struct mbuf *m;
+
+#define PUTBYTE(v) { \
+ ++olen; \
+ if (wptr) { \
+ *wptr++ = (v); \
+ if (wptr >= cp_end) { \
+ m->m_len = wptr - mtod(m, u_char *); \
+ MGET(m->m_next, M_DONTWAIT, MT_DATA); \
+ m = m->m_next; \
+ if (m) { \
+ m->m_len = 0; \
+ if (maxolen - olen > MLEN) \
+ MCLGET(m, M_DONTWAIT); \
+ wptr = mtod(m, u_char *); \
+ cp_end = wptr + M_TRAILINGSPACE(m); \
+ } else \
+ wptr = NULL; \
+ } \
+ } \
+}
+
+#define OUTPUT(ent) { \
+ bitno -= n_bits; \
+ accm |= ((ent) << bitno); \
+ do { \
+ PUTBYTE(accm >> 24); \
+ accm <<= 8; \
+ bitno += 8; \
+ } while (bitno <= 24); \
+}
+
+ /*
+ * If the protocol is not in the range we're interested in,
+ * just return without compressing the packet. If it is,
+ * the protocol becomes the first byte to compress.
+ */
+ rptr = mtod(mp, u_char *);
+ ent = PPP_PROTOCOL(rptr);
+ if (ent < 0x21 || ent > 0xf9) {
+ *mret = NULL;
+ return slen;
+ }
+
+ /* Don't generate compressed packets which are larger than
+ the uncompressed packet. */
+ if (maxolen > slen)
+ maxolen = slen;
+
+ /* Allocate one mbuf to start with. */
+ MGET(m, M_DONTWAIT, MT_DATA);
+ *mret = m;
+ if (m != NULL) {
+ m->m_len = 0;
+ if (maxolen + db->hdrlen > MLEN)
+ MCLGET(m, M_DONTWAIT);
+ m->m_data += db->hdrlen;
+ wptr = mtod(m, u_char *);
+ cp_end = wptr + M_TRAILINGSPACE(m);
+ } else
+ wptr = cp_end = NULL;
+
+ /*
+ * Copy the PPP header over, changing the protocol,
+ * and install the 2-byte packet sequence number.
+ */
+ if (wptr) {
+ *wptr++ = PPP_ADDRESS(rptr); /* assumes the ppp header is */
+ *wptr++ = PPP_CONTROL(rptr); /* all in one mbuf */
+ *wptr++ = 0; /* change the protocol */
+ *wptr++ = PPP_COMP;
+ *wptr++ = db->seqno >> 8;
+ *wptr++ = db->seqno;
+ }
+ ++db->seqno;
+
+ olen = 0;
+ rptr += PPP_HDRLEN;
+ slen = mp->m_len - PPP_HDRLEN;
+ ilen = slen + 1;
+ for (;;) {
+ if (slen <= 0) {
+ mp = mp->m_next;
+ if (!mp)
+ break;
+ rptr = mtod(mp, u_char *);
+ slen = mp->m_len;
+ if (!slen)
+ continue; /* handle 0-length buffers */
+ ilen += slen;
+ }
+
+ slen--;
+ c = *rptr++;
+ fcode = BSD_KEY(ent, c);
+ hval = BSD_HASH(ent, c, hshift);
+ dictp = &db->dict[hval];
+
+ /* Validate and then check the entry. */
+ if (dictp->codem1 >= max_ent)
+ goto nomatch;
+ if (dictp->f.fcode == fcode) {
+ ent = dictp->codem1+1;
+ continue; /* found (prefix,suffix) */
+ }
+
+ /* continue probing until a match or invalid entry */
+ disp = (hval == 0) ? 1 : hval;
+ do {
+ hval += disp;
+ if (hval >= db->hsize)
+ hval -= db->hsize;
+ dictp = &db->dict[hval];
+ if (dictp->codem1 >= max_ent)
+ goto nomatch;
+ } while (dictp->f.fcode != fcode);
+ ent = dictp->codem1 + 1; /* finally found (prefix,suffix) */
+ continue;
+
+ nomatch:
+ OUTPUT(ent); /* output the prefix */
+
+ /* code -> hashtable */
+ if (max_ent < db->maxmaxcode) {
+ struct bsd_dict *dictp2;
+ /* expand code size if needed */
+ if (max_ent >= MAXCODE(n_bits))
+ db->n_bits = ++n_bits;
+
+ /* Invalidate old hash table entry using
+ * this code, and then take it over.
+ */
+ dictp2 = &db->dict[max_ent+1];
+ if (db->dict[dictp2->cptr].codem1 == max_ent)
+ db->dict[dictp2->cptr].codem1 = BADCODEM1;
+ dictp2->cptr = hval;
+ dictp->codem1 = max_ent;
+ dictp->f.fcode = fcode;
+
+ db->max_ent = ++max_ent;
+ }
+ ent = c;
+ }
+
+ OUTPUT(ent); /* output the last code */
+ db->bytes_out += olen;
+ db->in_count += ilen;
+ if (bitno < 32)
+ ++db->bytes_out; /* count complete bytes */
+
+ if (bsd_check(db))
+ OUTPUT(CLEAR); /* do not count the CLEAR */
+
+ /*
+ * Pad dribble bits of last code with ones.
+ * Do not emit a completely useless byte of ones.
+ */
+ if (bitno != 32)
+ PUTBYTE((accm | (0xff << (bitno-8))) >> 24);
+
+ if (m != NULL) {
+ m->m_len = wptr - mtod(m, u_char *);
+ m->m_next = NULL;
+ }
+
+ /*
+ * Increase code size if we would have without the packet
+ * boundary and as the decompressor will.
+ */
+ if (max_ent >= MAXCODE(n_bits) && max_ent < db->maxmaxcode)
+ db->n_bits++;
+
+ db->uncomp_bytes += ilen;
+ ++db->uncomp_count;
+ if (olen + PPP_HDRLEN + BSD_OVHD > maxolen) {
+ /* throw away the compressed stuff if it is longer than uncompressed */
+ if (*mret != NULL) {
+ m_freem(*mret);
+ *mret = NULL;
+ }
+ ++db->incomp_count;
+ db->incomp_bytes += ilen;
+ } else {
+ ++db->comp_count;
+ db->comp_bytes += olen + BSD_OVHD;
+ }
+
+ return olen + PPP_HDRLEN + BSD_OVHD;
+#undef OUTPUT
+#undef PUTBYTE
+}
+
+
+/*
+ * Update the "BSD Compress" dictionary on the receiver for
+ * incompressible data by pretending to compress the incoming data.
+ */
+static void
+bsd_incomp(state, dmsg)
+ void *state;
+ struct mbuf *dmsg;
+{
+ struct bsd_db *db = (struct bsd_db *) state;
+ u_int hshift = db->hshift;
+ u_int max_ent = db->max_ent;
+ u_int n_bits = db->n_bits;
+ struct bsd_dict *dictp;
+ u_int32_t fcode;
+ u_char c;
+ u_int32_t hval, disp;
+ int slen, ilen;
+ u_int bitno = 7;
+ u_char *rptr;
+ u_int ent;
+
+ /*
+ * If the protocol is not in the range we're interested in,
+ * just return without looking at the packet. If it is,
+ * the protocol becomes the first byte to "compress".
+ */
+ rptr = mtod(dmsg, u_char *);
+ ent = PPP_PROTOCOL(rptr);
+ if (ent < 0x21 || ent > 0xf9)
+ return;
+
+ db->seqno++;
+ ilen = 1; /* count the protocol as 1 byte */
+ rptr += PPP_HDRLEN;
+ slen = dmsg->m_len - PPP_HDRLEN;
+ for (;;) {
+ if (slen <= 0) {
+ dmsg = dmsg->m_next;
+ if (!dmsg)
+ break;
+ rptr = mtod(dmsg, u_char *);
+ slen = dmsg->m_len;
+ continue;
+ }
+ ilen += slen;
+
+ do {
+ c = *rptr++;
+ fcode = BSD_KEY(ent, c);
+ hval = BSD_HASH(ent, c, hshift);
+ dictp = &db->dict[hval];
+
+ /* validate and then check the entry */
+ if (dictp->codem1 >= max_ent)
+ goto nomatch;
+ if (dictp->f.fcode == fcode) {
+ ent = dictp->codem1+1;
+ continue; /* found (prefix,suffix) */
+ }
+
+ /* continue probing until a match or invalid entry */
+ disp = (hval == 0) ? 1 : hval;
+ do {
+ hval += disp;
+ if (hval >= db->hsize)
+ hval -= db->hsize;
+ dictp = &db->dict[hval];
+ if (dictp->codem1 >= max_ent)
+ goto nomatch;
+ } while (dictp->f.fcode != fcode);
+ ent = dictp->codem1+1;
+ continue; /* finally found (prefix,suffix) */
+
+ nomatch: /* output (count) the prefix */
+ bitno += n_bits;
+
+ /* code -> hashtable */
+ if (max_ent < db->maxmaxcode) {
+ struct bsd_dict *dictp2;
+ /* expand code size if needed */
+ if (max_ent >= MAXCODE(n_bits))
+ db->n_bits = ++n_bits;
+
+ /* Invalidate previous hash table entry
+ * assigned this code, and then take it over.
+ */
+ dictp2 = &db->dict[max_ent+1];
+ if (db->dict[dictp2->cptr].codem1 == max_ent)
+ db->dict[dictp2->cptr].codem1 = BADCODEM1;
+ dictp2->cptr = hval;
+ dictp->codem1 = max_ent;
+ dictp->f.fcode = fcode;
+
+ db->max_ent = ++max_ent;
+ db->lens[max_ent] = db->lens[ent]+1;
+ }
+ ent = c;
+ } while (--slen != 0);
+ }
+ bitno += n_bits; /* output (count) the last code */
+ db->bytes_out += bitno/8;
+ db->in_count += ilen;
+ (void)bsd_check(db);
+
+ ++db->incomp_count;
+ db->incomp_bytes += ilen;
+ ++db->uncomp_count;
+ db->uncomp_bytes += ilen;
+
+ /* Increase code size if we would have without the packet
+ * boundary and as the decompressor will.
+ */
+ if (max_ent >= MAXCODE(n_bits) && max_ent < db->maxmaxcode)
+ db->n_bits++;
+}
+
+
+/*
+ * Decompress "BSD Compress".
+ *
+ * Because of patent problems, we return DECOMP_ERROR for errors
+ * found by inspecting the input data and for system problems, but
+ * DECOMP_FATALERROR for any errors which could possibly be said to
+ * be being detected "after" decompression. For DECOMP_ERROR,
+ * we can issue a CCP reset-request; for DECOMP_FATALERROR, we may be
+ * infringing a patent of Motorola's if we do, so we take CCP down
+ * instead.
+ *
+ * Given that the frame has the correct sequence number and a good FCS,
+ * errors such as invalid codes in the input most likely indicate a
+ * bug, so we return DECOMP_FATALERROR for them in order to turn off
+ * compression, even though they are detected by inspecting the input.
+ */
+int
+bsd_decompress(state, cmp, dmpp)
+ void *state;
+ struct mbuf *cmp, **dmpp;
+{
+ struct bsd_db *db = (struct bsd_db *) state;
+ u_int max_ent = db->max_ent;
+ u_int32_t accm = 0;
+ u_int bitno = 32; /* 1st valid bit in accm */
+ u_int n_bits = db->n_bits;
+ u_int tgtbitno = 32-n_bits; /* bitno when we have a code */
+ struct bsd_dict *dictp;
+ int explen, i, seq, len;
+ u_int incode, oldcode, finchar;
+ u_char *p, *rptr, *wptr;
+ struct mbuf *m, *dmp, *mret;
+ int adrs, ctrl, ilen;
+ int space, codelen, extra;
+
+ /*
+ * Save the address/control from the PPP header
+ * and then get the sequence number.
+ */
+ *dmpp = NULL;
+ rptr = mtod(cmp, u_char *);
+ adrs = PPP_ADDRESS(rptr);
+ ctrl = PPP_CONTROL(rptr);
+ rptr += PPP_HDRLEN;
+ len = cmp->m_len - PPP_HDRLEN;
+ seq = 0;
+ for (i = 0; i < 2; ++i) {
+ while (len <= 0) {
+ cmp = cmp->m_next;
+ if (cmp == NULL)
+ return DECOMP_ERROR;
+ rptr = mtod(cmp, u_char *);
+ len = cmp->m_len;
+ }
+ seq = (seq << 8) + *rptr++;
+ --len;
+ }
+
+ /*
+ * Check the sequence number and give up if it differs from
+ * the value we're expecting.
+ */
+ if (seq != db->seqno) {
+ if (db->debug)
+ printf("bsd_decomp%d: bad sequence # %d, expected %d\n",
+ db->unit, seq, db->seqno - 1);
+ return DECOMP_ERROR;
+ }
+ ++db->seqno;
+
+ /*
+ * Allocate one mbuf to start with.
+ */
+ MGETHDR(dmp, M_DONTWAIT, MT_DATA);
+ if (dmp == NULL)
+ return DECOMP_ERROR;
+ mret = dmp;
+ dmp->m_len = 0;
+ dmp->m_next = NULL;
+ MCLGET(dmp, M_DONTWAIT);
+ dmp->m_data += db->hdrlen;
+ wptr = mtod(dmp, u_char *);
+ space = M_TRAILINGSPACE(dmp) - PPP_HDRLEN + 1;
+
+ /*
+ * Fill in the ppp header, but not the last byte of the protocol
+ * (that comes from the decompressed data).
+ */
+ wptr[0] = adrs;
+ wptr[1] = ctrl;
+ wptr[2] = 0;
+ wptr += PPP_HDRLEN - 1;
+
+ ilen = len;
+ oldcode = CLEAR;
+ explen = 0;
+ for (;;) {
+ if (len == 0) {
+ cmp = cmp->m_next;
+ if (!cmp) /* quit at end of message */
+ break;
+ rptr = mtod(cmp, u_char *);
+ len = cmp->m_len;
+ ilen += len;
+ continue; /* handle 0-length buffers */
+ }
+
+ /*
+ * Accumulate bytes until we have a complete code.
+ * Then get the next code, relying on the 32-bit,
+ * unsigned accm to mask the result.
+ */
+ bitno -= 8;
+ accm |= *rptr++ << bitno;
+ --len;
+ if (tgtbitno < bitno)
+ continue;
+ incode = accm >> tgtbitno;
+ accm <<= n_bits;
+ bitno += n_bits;
+
+ if (incode == CLEAR) {
+ /*
+ * The dictionary must only be cleared at
+ * the end of a packet. But there could be an
+ * empty mbuf at the end.
+ */
+ if (len > 0 || cmp->m_next != NULL) {
+ while ((cmp = cmp->m_next) != NULL)
+ len += cmp->m_len;
+ if (len > 0) {
+ m_freem(mret);
+ if (db->debug)
+ printf("bsd_decomp%d: bad CLEAR\n", db->unit);
+ return DECOMP_FATALERROR; /* probably a bug */
+ }
+ }
+ bsd_clear(db);
+ explen = ilen = 0;
+ break;
+ }
+
+ if (incode > max_ent + 2 || incode > db->maxmaxcode
+ || (incode > max_ent && oldcode == CLEAR)) {
+ m_freem(mret);
+ if (db->debug) {
+ printf("bsd_decomp%d: bad code 0x%x oldcode=0x%x ",
+ db->unit, incode, oldcode);
+ printf("max_ent=0x%x explen=%d seqno=%d\n",
+ max_ent, explen, db->seqno);
+ }
+ return DECOMP_FATALERROR; /* probably a bug */
+ }
+
+ /* Special case for KwKwK string. */
+ if (incode > max_ent) {
+ finchar = oldcode;
+ extra = 1;
+ } else {
+ finchar = incode;
+ extra = 0;
+ }
+
+ codelen = db->lens[finchar];
+ explen += codelen + extra;
+ if (explen > db->mru + 1) {
+ m_freem(mret);
+ if (db->debug) {
+ printf("bsd_decomp%d: ran out of mru\n", db->unit);
+#ifdef DEBUG
+ while ((cmp = cmp->m_next) != NULL)
+ len += cmp->m_len;
+ printf(" len=%d, finchar=0x%x, codelen=%d, explen=%d\n",
+ len, finchar, codelen, explen);
+#endif
+ }
+ return DECOMP_FATALERROR;
+ }
+
+ /*
+ * For simplicity, the decoded characters go in a single mbuf,
+ * so we allocate a single extra cluster mbuf if necessary.
+ */
+ if ((space -= codelen + extra) < 0) {
+ dmp->m_len = wptr - mtod(dmp, u_char *);
+ MGET(m, M_DONTWAIT, MT_DATA);
+ if (m == NULL) {
+ m_freem(mret);
+ return DECOMP_ERROR;
+ }
+ m->m_len = 0;
+ m->m_next = NULL;
+ dmp->m_next = m;
+ MCLGET(m, M_DONTWAIT);
+ space = M_TRAILINGSPACE(m) - (codelen + extra);
+ if (space < 0) {
+ /* now that's what I call *compression*. */
+ m_freem(mret);
+ return DECOMP_ERROR;
+ }
+ dmp = m;
+ wptr = mtod(dmp, u_char *);
+ }
+
+ /*
+ * Decode this code and install it in the decompressed buffer.
+ */
+ p = (wptr += codelen);
+ while (finchar > LAST) {
+ dictp = &db->dict[db->dict[finchar].cptr];
+#ifdef DEBUG
+ if (--codelen <= 0 || dictp->codem1 != finchar-1)
+ goto bad;
+#endif
+ *--p = dictp->f.hs.suffix;
+ finchar = dictp->f.hs.prefix;
+ }
+ *--p = finchar;
+
+#ifdef DEBUG
+ if (--codelen != 0)
+ printf("bsd_decomp%d: short by %d after code 0x%x, max_ent=0x%x\n",
+ db->unit, codelen, incode, max_ent);
+#endif
+
+ if (extra) /* the KwKwK case again */
+ *wptr++ = finchar;
+
+ /*
+ * If not first code in a packet, and
+ * if not out of code space, then allocate a new code.
+ *
+ * Keep the hash table correct so it can be used
+ * with uncompressed packets.
+ */
+ if (oldcode != CLEAR && max_ent < db->maxmaxcode) {
+ struct bsd_dict *dictp2;
+ u_int32_t fcode;
+ u_int32_t hval, disp;
+
+ fcode = BSD_KEY(oldcode,finchar);
+ hval = BSD_HASH(oldcode,finchar,db->hshift);
+ dictp = &db->dict[hval];
+
+ /* look for a free hash table entry */
+ if (dictp->codem1 < max_ent) {
+ disp = (hval == 0) ? 1 : hval;
+ do {
+ hval += disp;
+ if (hval >= db->hsize)
+ hval -= db->hsize;
+ dictp = &db->dict[hval];
+ } while (dictp->codem1 < max_ent);
+ }
+
+ /*
+ * Invalidate previous hash table entry
+ * assigned this code, and then take it over
+ */
+ dictp2 = &db->dict[max_ent+1];
+ if (db->dict[dictp2->cptr].codem1 == max_ent) {
+ db->dict[dictp2->cptr].codem1 = BADCODEM1;
+ }
+ dictp2->cptr = hval;
+ dictp->codem1 = max_ent;
+ dictp->f.fcode = fcode;
+
+ db->max_ent = ++max_ent;
+ db->lens[max_ent] = db->lens[oldcode]+1;
+
+ /* Expand code size if needed. */
+ if (max_ent >= MAXCODE(n_bits) && max_ent < db->maxmaxcode) {
+ db->n_bits = ++n_bits;
+ tgtbitno = 32-n_bits;
+ }
+ }
+ oldcode = incode;
+ }
+ dmp->m_len = wptr - mtod(dmp, u_char *);
+
+ /*
+ * Keep the checkpoint right so that incompressible packets
+ * clear the dictionary at the right times.
+ */
+ db->bytes_out += ilen;
+ db->in_count += explen;
+ if (bsd_check(db) && db->debug) {
+ printf("bsd_decomp%d: peer should have cleared dictionary\n",
+ db->unit);
+ }
+
+ ++db->comp_count;
+ db->comp_bytes += ilen + BSD_OVHD;
+ ++db->uncomp_count;
+ db->uncomp_bytes += explen;
+
+ *dmpp = mret;
+ return DECOMP_OK;
+
+#ifdef DEBUG
+ bad:
+ if (codelen <= 0) {
+ printf("bsd_decomp%d: fell off end of chain ", db->unit);
+ printf("0x%x at 0x%x by 0x%x, max_ent=0x%x\n",
+ incode, finchar, db->dict[finchar].cptr, max_ent);
+ } else if (dictp->codem1 != finchar-1) {
+ printf("bsd_decomp%d: bad code chain 0x%x finchar=0x%x ",
+ db->unit, incode, finchar);
+ printf("oldcode=0x%x cptr=0x%x codem1=0x%x\n", oldcode,
+ db->dict[finchar].cptr, dictp->codem1);
+ }
+ m_freem(mret);
+ return DECOMP_FATALERROR;
+#endif /* DEBUG */
+}
+#endif /* DO_BSD_COMPRESS */
diff --git a/sys/net/ethernet.h b/sys/net/ethernet.h
new file mode 100644
index 0000000..8e982d7
--- /dev/null
+++ b/sys/net/ethernet.h
@@ -0,0 +1,98 @@
+/*
+ * Fundamental constants relating to ethernet.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef _NET_ETHERNET_H_
+#define _NET_ETHERNET_H_
+
+/*
+ * The number of bytes in an ethernet (MAC) address.
+ */
+#define ETHER_ADDR_LEN 6
+
+/*
+ * The number of bytes in the type field.
+ */
+#define ETHER_TYPE_LEN 2
+
+/*
+ * The number of bytes in the trailing CRC field.
+ */
+#define ETHER_CRC_LEN 4
+
+/*
+ * The length of the combined header.
+ */
+#define ETHER_HDR_LEN (ETHER_ADDR_LEN*2+ETHER_TYPE_LEN)
+
+/*
+ * The minimum packet length.
+ */
+#define ETHER_MIN_LEN 64
+
+/*
+ * The maximum packet length.
+ */
+#define ETHER_MAX_LEN 1518
+
+/*
+ * A macro to validate a length with
+ */
+#define ETHER_IS_VALID_LEN(foo) \
+ ((foo) >= ETHER_MIN_LEN && (foo) <= ETHER_MAX_LEN)
+
+/*
+ * Structure of a 10Mb/s Ethernet header.
+ */
+struct ether_header {
+ u_char ether_dhost[ETHER_ADDR_LEN];
+ u_char ether_shost[ETHER_ADDR_LEN];
+ u_short ether_type;
+};
+
+/*
+ * Structure of a 48-bit Ethernet address.
+ */
+struct ether_addr {
+ u_char octet[ETHER_ADDR_LEN];
+};
+
+#define ETHERTYPE_PUP 0x0200 /* PUP protocol */
+#define ETHERTYPE_IP 0x0800 /* IP protocol */
+#define ETHERTYPE_ARP 0x0806 /* Addr. resolution protocol */
+#define ETHERTYPE_REVARP 0x8035 /* reverse Addr. resolution protocol */
+#define ETHERTYPE_VLAN 0x8100 /* IEEE 802.1Q VLAN tagging */
+#define ETHERTYPE_IPV6 0x86dd /* IPv6 */
+#define ETHERTYPE_LOOPBACK 0x9000 /* used to test interfaces */
+/* XXX - add more useful types here */
+
+/*
+ * The ETHERTYPE_NTRAILER packet types starting at ETHERTYPE_TRAIL have
+ * (type-ETHERTYPE_TRAIL)*512 bytes of data followed
+ * by an ETHER type (as given above) and then the (variable-length) header.
+ */
+#define ETHERTYPE_TRAIL 0x1000 /* Trailer packet */
+#define ETHERTYPE_NTRAILER 16
+
+#define ETHERMTU (ETHER_MAX_LEN-ETHER_HDR_LEN-ETHER_CRC_LEN)
+#define ETHERMIN (ETHER_MIN_LEN-ETHER_HDR_LEN-ETHER_CRC_LEN)
+
+#ifndef KERNEL
+#include <sys/cdefs.h>
+
+/*
+ * Ethernet address conversion/parsing routines.
+ */
+__BEGIN_DECLS
+struct ether_addr *ether_aton __P((char *));
+int ether_hostton __P((char *, struct ether_addr *));
+int ether_line __P((char *, struct ether_addr *, char *));
+char *ether_ntoa __P((struct ether_addr *));
+int ether_ntohost __P((char *, struct ether_addr *));
+__END_DECLS
+#endif /* !KERNEL */
+
+#endif /* !_NET_ETHERNET_H_ */
diff --git a/sys/net/fddi.h b/sys/net/fddi.h
new file mode 100644
index 0000000..fc6acc6
--- /dev/null
+++ b/sys/net/fddi.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ * Copyright (c) 1995 Matt Thomas (thomas@lkg.dec.com)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)if_fddi.h 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET_IF_FDDI_H_
+#define _NETINET_IF_FDDI_H_
+
+/*
+ * Structure of an 100Mb/s FDDI header.
+ */
+struct fddi_header {
+ u_char fddi_fc;
+ u_char fddi_dhost[6];
+ u_char fddi_shost[6];
+};
+
+#define FDDIIPMTU 4352
+#define FDDIMTU 4470
+#define FDDIMIN 3
+
+#define FDDIFC_C 0x80 /* 0b10000000 */
+#define FDDIFC_L 0x40 /* 0b01000000 */
+#define FDDIFC_F 0x30 /* 0b00110000 */
+#define FDDIFC_Z 0x0F /* 0b00001111 */
+
+#define FDDIFC_LLC_ASYNC 0x50
+#define FDDIFC_LLC_PRIO0 0
+#define FDDIFC_LLC_PRIO1 1
+#define FDDIFC_LLC_PRIO2 2
+#define FDDIFC_LLC_PRIO3 3
+#define FDDIFC_LLC_PRIO4 4
+#define FDDIFC_LLC_PRIO5 5
+#define FDDIFC_LLC_PRIO6 6
+#define FDDIFC_LLC_PRIO7 7
+#define FDDIFC_LLC_SYNC 0xd0
+#define FDDIFC_SMT 0x40
+
+#if defined(KERNEL) || defined(_KERNEL)
+#define fddibroadcastaddr etherbroadcastaddr
+#define fddi_ipmulticast_min ether_ipmulticast_min
+#define fddi_ipmulticast_max ether_ipmulticast_max
+#define fddi_addmulti ether_addmulti
+#define fddi_delmulti ether_delmulti
+#define fddi_sprintf ether_sprintf
+
+void fddi_ifattach __P((struct ifnet *));
+void fddi_input __P((struct ifnet *, struct fddi_header *, struct mbuf *));
+int fddi_output __P((struct ifnet *,
+ struct mbuf *, struct sockaddr *, struct rtentry *));
+
+#endif
+
+#endif
diff --git a/sys/net/hostcache.c b/sys/net/hostcache.c
new file mode 100644
index 0000000..0a1d4c0
--- /dev/null
+++ b/sys/net/hostcache.c
@@ -0,0 +1,249 @@
+/*
+ * Copyright 1997 Massachusetts Institute of Technology
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby
+ * granted, provided that both the above copyright notice and this
+ * permission notice appear in all copies, that both the above
+ * copyright notice and this permission notice appear in all
+ * supporting documentation, and that the name of M.I.T. not be used
+ * in advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission. M.I.T. makes
+ * no representations about the suitability of this software for any
+ * purpose. It is provided "as is" without express or implied
+ * warranty.
+ *
+ * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
+ * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
+ * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/socket.h>
+
+#include <net/hostcache.h>
+#include <net/route.h>
+
+MALLOC_DEFINE(M_HOSTCACHE, "hostcache", "per-host cache structure");
+
+static struct hctable hctable[AF_MAX];
+static int hc_timeout_interval = 120;
+static int hc_maxidle = 1800;
+
+static int cmpsa(const struct sockaddr *sa1, const struct sockaddr *sa2);
+static void hc_timeout(void *xhct);
+static void maybe_bump_hash(struct hctable *hct);
+
+int
+hc_init(int af, struct hccallback *hccb, int init_nelem, int primes)
+{
+ struct hctable *hct;
+ struct hchead *heads;
+ u_long nelem;
+
+ hct = &hctable[af];
+ nelem = init_nelem;
+ if (hct->hct_nentries)
+ return 0;
+
+ if (primes) {
+ heads = phashinit(init_nelem, M_HOSTCACHE, &nelem);
+ } else {
+ int i;
+ MALLOC(heads, struct hchead *, nelem * sizeof *heads,
+ M_HOSTCACHE, M_WAITOK);
+ for (i = 0; i < nelem; i++) {
+ LIST_INIT(&heads[i]);
+ }
+ }
+
+ hct->hct_heads = heads;
+ hct->hct_nentries = nelem;
+ hct->hct_primes = primes;
+ timeout(hc_timeout, hct, hc_timeout_interval * hz);
+ return 0;
+}
+
+struct hcentry *
+hc_get(struct sockaddr *sa)
+{
+ u_long hash;
+ struct hcentry *hc;
+ struct hctable *hct;
+ int s;
+
+ hct = &hctable[sa->sa_family];
+ if (hct->hct_nentries == 0)
+ return 0;
+ hash = hct->hct_cb->hccb_hash(sa, hct->hct_nentries);
+ hc = hct->hct_heads[hash].lh_first;
+ for (; hc; hc = hc->hc_link.le_next) {
+ if (cmpsa(hc->hc_host, sa) == 0)
+ break;
+ }
+ if (hc == 0)
+ return 0;
+ s = splnet();
+ if (hc->hc_rt && (hc->hc_rt->rt_flags & RTF_UP) == 0) {
+ RTFREE(hc->hc_rt);
+ hc->hc_rt = 0;
+ }
+ if (hc->hc_rt == 0) {
+ hc->hc_rt = rtalloc1(hc->hc_host, 1, 0);
+ }
+ hc_ref(hc);
+ splx(s);
+ /* XXX move to front of list? */
+ return hc;
+}
+
+void
+hc_ref(struct hcentry *hc)
+{
+ int s = splnet();
+ if (hc->hc_refcnt++ == 0) {
+ hc->hc_hct->hct_idle--;
+ hc->hc_hct->hct_active++;
+ }
+ splx(s);
+}
+
+void
+hc_rele(struct hcentry *hc)
+{
+ int s = splnet();
+#ifdef DIAGNOSTIC
+ printf("hc_rele: %p: negative refcnt!\n", (void *)hc);
+#endif
+ hc->hc_refcnt--;
+ if (hc->hc_refcnt == 0) {
+ hc->hc_hct->hct_idle++;
+ hc->hc_hct->hct_active--;
+ hc->hc_idlesince = mono_time; /* XXX right one? */
+ }
+ splx(s);
+}
+
+/*
+ * The user is expected to initialize hc_host with the address and everything
+ * else to the appropriate form of `0'.
+ */
+int
+hc_insert(struct hcentry *hc)
+{
+ struct hcentry *hc2;
+ struct hctable *hct;
+ u_long hash;
+ int s;
+
+ hct = &hctable[hc->hc_host->sa_family];
+ hash = hct->hct_cb->hccb_hash(hc->hc_host, hct->hct_nentries);
+
+ hc2 = hct->hct_heads[hash].lh_first;
+ for (; hc2; hc2 = hc2->hc_link.le_next) {
+ if (cmpsa(hc2->hc_host, hc->hc_host) == 0)
+ break;
+ }
+ if (hc2 != 0)
+ return EEXIST;
+ hc->hc_hct = hct;
+ s = splnet();
+ LIST_INSERT_HEAD(&hct->hct_heads[hash], hc, hc_link);
+ hct->hct_idle++;
+ /*
+ * If the table is now more than 75% full, consider bumping it.
+ */
+ if (100 * (hct->hct_idle + hct->hct_active) > 75 * hct->hct_nentries)
+ maybe_bump_hash(hct);
+ splx(s);
+ return 0;
+}
+
+/*
+ * It's not clear to me how much sense this makes as an external interface,
+ * since it is expected that the deletion will normally be handled by
+ * the cache timeout.
+ */
+int
+hc_delete(struct hcentry *hc)
+{
+ struct hctable *hct;
+ int error, s;
+
+ if (hc->hc_refcnt > 0)
+ return 0;
+
+ hct = hc->hc_hct;
+ error = hct->hct_cb->hccb_delete(hc);
+ if (error)
+ return 0;
+
+ s = splnet();
+ LIST_REMOVE(hc, hc_link);
+ hc->hc_hct->hct_idle--;
+ splx(s);
+ FREE(hc, M_HOSTCACHE);
+ return 0;
+}
+
+static void
+hc_timeout(void *xhct)
+{
+ struct hcentry *hc;
+ struct hctable *hct;
+ int j, s;
+ time_t start;
+
+ hct = xhct;
+ start = mono_time.tv_sec; /* for simplicity */
+
+ if (hct->hct_idle == 0)
+ return;
+ for (j = 0; j < hct->hct_nentries; j++) {
+ for (hc = hct->hct_heads[j].lh_first; hc;
+ hc = hc->hc_link.le_next) {
+ if (hc->hc_refcnt > 0)
+ continue;
+ if (hc->hc_idlesince.tv_sec + hc_maxidle <= start) {
+ if (hct->hct_cb->hccb_delete(hc))
+ continue;
+ s = splnet();
+ LIST_REMOVE(hc, hc_link);
+ hct->hct_idle--;
+ splx(s);
+ }
+ }
+ }
+ /*
+ * Fiddle something here based on tot_idle...
+ */
+ timeout(hc_timeout, xhct, hc_timeout_interval * hz);
+}
+
+static int
+cmpsa(const struct sockaddr *sa1, const struct sockaddr *sa2)
+{
+ if (sa1->sa_len != sa2->sa_len)
+ return ((int)sa1->sa_len - sa2->sa_len);
+ return bcmp(sa1, sa2, sa1->sa_len);
+}
+
+static void
+maybe_bump_hash(struct hctable *hct)
+{
+ ; /* XXX fill me in */
+}
diff --git a/sys/net/hostcache.h b/sys/net/hostcache.h
new file mode 100644
index 0000000..6ffb9ac
--- /dev/null
+++ b/sys/net/hostcache.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright 1997 Massachusetts Institute of Technology
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby
+ * granted, provided that both the above copyright notice and this
+ * permission notice appear in all copies, that both the above
+ * copyright notice and this permission notice appear in all
+ * supporting documentation, and that the name of M.I.T. not be used
+ * in advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission. M.I.T. makes
+ * no representations about the suitability of this software for any
+ * purpose. It is provided "as is" without express or implied
+ * warranty.
+ *
+ * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
+ * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
+ * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NET_HOSTCACHE_H
+#define _NET_HOSTCACHE_H 1
+
+/*
+ * This file defines the interface between network protocols and
+ * the cache of host-specific information maintained by the kernel.
+ * The generic interface takes care of inserting and deleting entries,
+ * maintaining mutual exclusion, and enforcing policy constraint on the
+ * size of the cache and the maximum age of its entries.
+ * It replaces an earlier scheme which overloaded the routing table
+ * for this purpose, and should be significantly more efficient
+ * at performing most operations. (It does keep a route to each
+ * entry in the cache.) Most protocols will want to define a
+ * structure which begins with `struct hcentry' so that they
+ * can keep additional, protocol-specific information in it.
+ */
+
+#include <sys/queue.h>
+
+struct hcentry {
+ LIST_ENTRY(hcentry) hc_link;
+ struct timeval hc_idlesince; /* time last ref dropped */
+ struct sockaddr *hc_host; /* address of this entry's host */
+ struct rtentry *hc_rt; /* route to get there */
+ /* struct nexthop *hc_nh; */
+ int hc_refcnt; /* reference count */
+ struct hctable *hc_hct; /* back ref to table */
+};
+
+struct hccallback {
+ u_long (*hccb_hash)(struct sockaddr *, u_long);
+ int (*hccb_delete)(struct hcentry *);
+ u_long (*hccb_bump)(u_long);
+};
+
+LIST_HEAD(hchead, hcentry);
+
+struct hctable {
+ u_long hct_nentries;
+ u_long hct_active;
+ u_long hct_idle;
+ struct hchead *hct_heads;
+ struct hccallback *hct_cb;
+ int hct_primes;
+};
+
+#ifdef KERNEL
+
+#ifdef MALLOC_DECLARE
+MALLOC_DECLARE(M_HOSTCACHE);
+#endif
+/*
+ * The table-modification functions must be called from user mode, as
+ * they may block waiting for memory and/or locks.
+ */
+int hc_init(int af, struct hccallback *hccb, int init_nelem, int primes);
+struct hcentry *hc_get(struct sockaddr *sa);
+void hc_ref(struct hcentry *hc);
+void hc_rele(struct hcentry *hc);
+int hc_insert(struct hcentry *hc);
+int hc_delete(struct hcentry *hc);
+#endif /* KERNEL */
+
+#endif /* _NET_HOSTCACHE_H */
diff --git a/sys/net/if.c b/sys/net/if.c
new file mode 100644
index 0000000..afbad05
--- /dev/null
+++ b/sys/net/if.c
@@ -0,0 +1,1300 @@
+/*
+ * Copyright (c) 1980, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)if.c 8.3 (Berkeley) 1/4/94
+ * $FreeBSD$
+ */
+
+#include "opt_compat.h"
+#include "opt_inet6.h"
+
+#include <sys/param.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/protosw.h>
+#include <sys/kernel.h>
+#include <sys/sockio.h>
+#include <sys/syslog.h>
+#include <sys/sysctl.h>
+
+#include <net/if.h>
+#include <net/if_dl.h>
+#include <net/radix.h>
+#include <net/route.h>
+
+#ifdef INET6
+/*XXX*/
+#include <netinet/in.h>
+#endif
+
+/*
+ * System initialization
+ */
+
+static int ifconf __P((u_long, caddr_t));
+static void ifinit __P((void *));
+static void if_qflush __P((struct ifqueue *));
+static void if_slowtimo __P((void *));
+static void link_rtrequest __P((int, struct rtentry *, struct sockaddr *));
+static int if_rtdel __P((struct radix_node *, void *));
+
+SYSINIT(interfaces, SI_SUB_PROTO_IF, SI_ORDER_FIRST, ifinit, NULL)
+
+MALLOC_DEFINE(M_IFADDR, "ifaddr", "interface address");
+MALLOC_DEFINE(M_IFMADDR, "ether_multi", "link-level multicast address");
+
+int ifqmaxlen = IFQ_MAXLEN;
+struct ifnethead ifnet; /* depend on static init XXX */
+
+#ifdef INET6
+/*
+ * XXX: declare here to avoid to include many inet6 related files..
+ * should be more generalized?
+ */
+extern void nd6_setmtu __P((struct ifnet *));
+#endif
+
+/*
+ * Network interface utility routines.
+ *
+ * Routines with ifa_ifwith* names take sockaddr *'s as
+ * parameters.
+ */
+/* ARGSUSED*/
+void
+ifinit(dummy)
+ void *dummy;
+{
+ struct ifnet *ifp;
+ int s;
+
+ s = splimp();
+ for (ifp = ifnet.tqh_first; ifp; ifp = ifp->if_link.tqe_next)
+ if (ifp->if_snd.ifq_maxlen == 0) {
+ printf("%s%d XXX: driver didn't set ifq_maxlen\n",
+ ifp->if_name, ifp->if_unit);
+ ifp->if_snd.ifq_maxlen = ifqmaxlen;
+ }
+ splx(s);
+ if_slowtimo(0);
+}
+
+int if_index = 0;
+struct ifaddr **ifnet_addrs;
+struct ifnet **ifindex2ifnet = NULL;
+
+
+/*
+ * Attach an interface to the
+ * list of "active" interfaces.
+ */
+void
+if_attach(ifp)
+ struct ifnet *ifp;
+{
+ unsigned socksize, ifasize;
+ int namelen, masklen;
+ char workbuf[64];
+ register struct sockaddr_dl *sdl;
+ register struct ifaddr *ifa;
+ static int if_indexlim = 8;
+ static int inited;
+
+ if (!inited) {
+ TAILQ_INIT(&ifnet);
+ inited = 1;
+ }
+
+ TAILQ_INSERT_TAIL(&ifnet, ifp, if_link);
+ ifp->if_index = ++if_index;
+ /*
+ * XXX -
+ * The old code would work if the interface passed a pre-existing
+ * chain of ifaddrs to this code. We don't trust our callers to
+ * properly initialize the tailq, however, so we no longer allow
+ * this unlikely case.
+ */
+ TAILQ_INIT(&ifp->if_addrhead);
+ TAILQ_INIT(&ifp->if_prefixhead);
+ LIST_INIT(&ifp->if_multiaddrs);
+ getmicrotime(&ifp->if_lastchange);
+ if (ifnet_addrs == 0 || if_index >= if_indexlim) {
+ unsigned n = (if_indexlim <<= 1) * sizeof(ifa);
+ caddr_t q = malloc(n, M_IFADDR, M_WAITOK);
+ bzero(q, n);
+ if (ifnet_addrs) {
+ bcopy((caddr_t)ifnet_addrs, (caddr_t)q, n/2);
+ free((caddr_t)ifnet_addrs, M_IFADDR);
+ }
+ ifnet_addrs = (struct ifaddr **)q;
+
+ /* grow ifindex2ifnet */
+ n = if_indexlim * sizeof(struct ifnet *);
+ q = malloc(n, M_IFADDR, M_WAITOK);
+ bzero(q, n);
+ if (ifindex2ifnet) {
+ bcopy((caddr_t)ifindex2ifnet, q, n/2);
+ free((caddr_t)ifindex2ifnet, M_IFADDR);
+ }
+ ifindex2ifnet = (struct ifnet **)q;
+ }
+
+ ifindex2ifnet[if_index] = ifp;
+
+ /*
+ * create a Link Level name for this device
+ */
+ namelen = snprintf(workbuf, sizeof(workbuf),
+ "%s%d", ifp->if_name, ifp->if_unit);
+#define _offsetof(t, m) ((int)((caddr_t)&((t *)0)->m))
+ masklen = _offsetof(struct sockaddr_dl, sdl_data[0]) + namelen;
+ socksize = masklen + ifp->if_addrlen;
+#define ROUNDUP(a) (1 + (((a) - 1) | (sizeof(long) - 1)))
+ if (socksize < sizeof(*sdl))
+ socksize = sizeof(*sdl);
+ socksize = ROUNDUP(socksize);
+ ifasize = sizeof(*ifa) + 2 * socksize;
+ ifa = (struct ifaddr *)malloc(ifasize, M_IFADDR, M_WAITOK);
+ if (ifa) {
+ bzero((caddr_t)ifa, ifasize);
+ sdl = (struct sockaddr_dl *)(ifa + 1);
+ sdl->sdl_len = socksize;
+ sdl->sdl_family = AF_LINK;
+ bcopy(workbuf, sdl->sdl_data, namelen);
+ sdl->sdl_nlen = namelen;
+ sdl->sdl_index = ifp->if_index;
+ sdl->sdl_type = ifp->if_type;
+ ifnet_addrs[if_index - 1] = ifa;
+ ifa->ifa_ifp = ifp;
+ ifa->ifa_rtrequest = link_rtrequest;
+ ifa->ifa_addr = (struct sockaddr *)sdl;
+ sdl = (struct sockaddr_dl *)(socksize + (caddr_t)sdl);
+ ifa->ifa_netmask = (struct sockaddr *)sdl;
+ sdl->sdl_len = masklen;
+ while (namelen != 0)
+ sdl->sdl_data[--namelen] = 0xff;
+ TAILQ_INSERT_HEAD(&ifp->if_addrhead, ifa, ifa_link);
+ }
+}
+
+/*
+ * Detach an interface, removing it from the
+ * list of "active" interfaces.
+ */
+void
+if_detach(ifp)
+ struct ifnet *ifp;
+{
+ struct ifaddr *ifa;
+ struct radix_node_head *rnh;
+ int s;
+ int i;
+
+ /*
+ * Remove routes and flush queues.
+ */
+ s = splnet();
+ if_down(ifp);
+
+ /*
+ * Remove address from ifnet_addrs[] and maybe decrement if_index.
+ * Clean up all addresses.
+ */
+ ifnet_addrs[ifp->if_index - 1] = 0;
+ while (if_index > 0 && ifnet_addrs[if_index - 1] == 0)
+ if_index--;
+
+ for (ifa = TAILQ_FIRST(&ifp->if_addrhead); ifa;
+ ifa = TAILQ_FIRST(&ifp->if_addrhead)) {
+ /* XXX: Ugly!! ad hoc just for INET */
+ if (ifa->ifa_addr && ifa->ifa_addr->sa_family == AF_INET) {
+ struct ifaliasreq ifr;
+
+ bzero(&ifr, sizeof(ifr));
+ if (ifa->ifa_addr)
+ ifr.ifra_addr = *ifa->ifa_addr;
+ if (ifa->ifa_dstaddr)
+ ifr.ifra_broadaddr = *ifa->ifa_dstaddr;
+ if (in_control(NULL, SIOCDIFADDR, (caddr_t)&ifr, ifp,
+ NULL) == 0)
+ continue;
+ }
+ TAILQ_REMOVE(&ifp->if_addrhead, ifa, ifa_link);
+ IFAFREE(ifa);
+ }
+
+ /*
+ * Delete all remaining routes using this interface
+ * Unfortuneatly the only way to do this is to slog through
+ * the entire routing table looking for routes which point
+ * to this interface...oh well...
+ */
+ for (i = 1; i <= AF_MAX; i++) {
+ if ((rnh = rt_tables[i]) == NULL)
+ continue;
+ (void) rnh->rnh_walktree(rnh, if_rtdel, ifp);
+ }
+
+ TAILQ_REMOVE(&ifnet, ifp, if_link);
+ splx(s);
+}
+
+/*
+ * Delete Routes for a Network Interface
+ *
+ * Called for each routing entry via the rnh->rnh_walktree() call above
+ * to delete all route entries referencing a detaching network interface.
+ *
+ * Arguments:
+ * rn pointer to node in the routing table
+ * arg argument passed to rnh->rnh_walktree() - detaching interface
+ *
+ * Returns:
+ * 0 successful
+ * errno failed - reason indicated
+ *
+ */
+static int
+if_rtdel(rn, arg)
+ struct radix_node *rn;
+ void *arg;
+{
+ struct rtentry *rt = (struct rtentry *)rn;
+ struct ifnet *ifp = arg;
+ int err;
+
+ if (rt->rt_ifp == ifp) {
+
+ /*
+ * Protect (sorta) against walktree recursion problems
+ * with cloned routes
+ */
+ if ((rt->rt_flags & RTF_UP) == 0)
+ return (0);
+
+ err = rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway,
+ rt_mask(rt), rt->rt_flags,
+ (struct rtentry **) NULL);
+ if (err) {
+ log(LOG_WARNING, "if_rtdel: error %d\n", err);
+ }
+ }
+
+ return (0);
+}
+
+/*
+ * Locate an interface based on a complete address.
+ */
+/*ARGSUSED*/
+struct ifaddr *
+ifa_ifwithaddr(addr)
+ register struct sockaddr *addr;
+{
+ register struct ifnet *ifp;
+ register struct ifaddr *ifa;
+
+#define equal(a1, a2) \
+ (bcmp((caddr_t)(a1), (caddr_t)(a2), ((struct sockaddr *)(a1))->sa_len) == 0)
+ for (ifp = ifnet.tqh_first; ifp; ifp = ifp->if_link.tqe_next)
+ for (ifa = ifp->if_addrhead.tqh_first; ifa;
+ ifa = ifa->ifa_link.tqe_next) {
+ if (ifa->ifa_addr->sa_family != addr->sa_family)
+ continue;
+ if (equal(addr, ifa->ifa_addr))
+ return (ifa);
+ if ((ifp->if_flags & IFF_BROADCAST) && ifa->ifa_broadaddr &&
+ /* IP6 doesn't have broadcast */
+ ifa->ifa_broadaddr->sa_len != 0 &&
+ equal(ifa->ifa_broadaddr, addr))
+ return (ifa);
+ }
+ return ((struct ifaddr *)0);
+}
+/*
+ * Locate the point to point interface with a given destination address.
+ */
+/*ARGSUSED*/
+struct ifaddr *
+ifa_ifwithdstaddr(addr)
+ register struct sockaddr *addr;
+{
+ register struct ifnet *ifp;
+ register struct ifaddr *ifa;
+
+ for (ifp = ifnet.tqh_first; ifp; ifp = ifp->if_link.tqe_next)
+ if (ifp->if_flags & IFF_POINTOPOINT)
+ for (ifa = ifp->if_addrhead.tqh_first; ifa;
+ ifa = ifa->ifa_link.tqe_next) {
+ if (ifa->ifa_addr->sa_family != addr->sa_family)
+ continue;
+ if (ifa->ifa_dstaddr && equal(addr, ifa->ifa_dstaddr))
+ return (ifa);
+ }
+ return ((struct ifaddr *)0);
+}
+
+/*
+ * Find an interface on a specific network. If many, choice
+ * is most specific found.
+ */
+struct ifaddr *
+ifa_ifwithnet(addr)
+ struct sockaddr *addr;
+{
+ register struct ifnet *ifp;
+ register struct ifaddr *ifa;
+ struct ifaddr *ifa_maybe = (struct ifaddr *) 0;
+ u_int af = addr->sa_family;
+ char *addr_data = addr->sa_data, *cplim;
+
+ /*
+ * AF_LINK addresses can be looked up directly by their index number,
+ * so do that if we can.
+ */
+ if (af == AF_LINK) {
+ register struct sockaddr_dl *sdl = (struct sockaddr_dl *)addr;
+ if (sdl->sdl_index && sdl->sdl_index <= if_index)
+ return (ifnet_addrs[sdl->sdl_index - 1]);
+ }
+
+ /*
+ * Scan though each interface, looking for ones that have
+ * addresses in this address family.
+ */
+ for (ifp = ifnet.tqh_first; ifp; ifp = ifp->if_link.tqe_next) {
+ for (ifa = ifp->if_addrhead.tqh_first; ifa;
+ ifa = ifa->ifa_link.tqe_next) {
+ register char *cp, *cp2, *cp3;
+
+ if (ifa->ifa_addr->sa_family != af)
+next: continue;
+ if (
+#ifdef INET6 /* XXX: for maching gif tunnel dst as routing entry gateway */
+ addr->sa_family != AF_INET6 &&
+#endif
+ ifp->if_flags & IFF_POINTOPOINT) {
+ /*
+ * This is a bit broken as it doesn't
+ * take into account that the remote end may
+ * be a single node in the network we are
+ * looking for.
+ * The trouble is that we don't know the
+ * netmask for the remote end.
+ */
+ if (ifa->ifa_dstaddr != 0
+ && equal(addr, ifa->ifa_dstaddr))
+ return (ifa);
+ } else {
+ /*
+ * if we have a special address handler,
+ * then use it instead of the generic one.
+ */
+ if (ifa->ifa_claim_addr) {
+ if ((*ifa->ifa_claim_addr)(ifa, addr)) {
+ return (ifa);
+ } else {
+ continue;
+ }
+ }
+
+ /*
+ * Scan all the bits in the ifa's address.
+ * If a bit dissagrees with what we are
+ * looking for, mask it with the netmask
+ * to see if it really matters.
+ * (A byte at a time)
+ */
+ if (ifa->ifa_netmask == 0)
+ continue;
+ cp = addr_data;
+ cp2 = ifa->ifa_addr->sa_data;
+ cp3 = ifa->ifa_netmask->sa_data;
+ cplim = ifa->ifa_netmask->sa_len
+ + (char *)ifa->ifa_netmask;
+ while (cp3 < cplim)
+ if ((*cp++ ^ *cp2++) & *cp3++)
+ goto next; /* next address! */
+ /*
+ * If the netmask of what we just found
+ * is more specific than what we had before
+ * (if we had one) then remember the new one
+ * before continuing to search
+ * for an even better one.
+ */
+ if (ifa_maybe == 0 ||
+ rn_refines((caddr_t)ifa->ifa_netmask,
+ (caddr_t)ifa_maybe->ifa_netmask))
+ ifa_maybe = ifa;
+ }
+ }
+ }
+ return (ifa_maybe);
+}
+
+/*
+ * Find an interface address specific to an interface best matching
+ * a given address.
+ */
+struct ifaddr *
+ifaof_ifpforaddr(addr, ifp)
+ struct sockaddr *addr;
+ register struct ifnet *ifp;
+{
+ register struct ifaddr *ifa;
+ register char *cp, *cp2, *cp3;
+ register char *cplim;
+ struct ifaddr *ifa_maybe = 0;
+ u_int af = addr->sa_family;
+
+ if (af >= AF_MAX)
+ return (0);
+ for (ifa = ifp->if_addrhead.tqh_first; ifa;
+ ifa = ifa->ifa_link.tqe_next) {
+ if (ifa->ifa_addr->sa_family != af)
+ continue;
+ if (ifa_maybe == 0)
+ ifa_maybe = ifa;
+ if (ifa->ifa_netmask == 0) {
+ if (equal(addr, ifa->ifa_addr) ||
+ (ifa->ifa_dstaddr && equal(addr, ifa->ifa_dstaddr)))
+ return (ifa);
+ continue;
+ }
+ if (ifp->if_flags & IFF_POINTOPOINT) {
+ if (equal(addr, ifa->ifa_dstaddr))
+ return (ifa);
+ } else {
+ cp = addr->sa_data;
+ cp2 = ifa->ifa_addr->sa_data;
+ cp3 = ifa->ifa_netmask->sa_data;
+ cplim = ifa->ifa_netmask->sa_len + (char *)ifa->ifa_netmask;
+ for (; cp3 < cplim; cp3++)
+ if ((*cp++ ^ *cp2++) & *cp3)
+ break;
+ if (cp3 == cplim)
+ return (ifa);
+ }
+ }
+ return (ifa_maybe);
+}
+
+#include <net/route.h>
+
+/*
+ * Default action when installing a route with a Link Level gateway.
+ * Lookup an appropriate real ifa to point to.
+ * This should be moved to /sys/net/link.c eventually.
+ */
+static void
+link_rtrequest(cmd, rt, sa)
+ int cmd;
+ register struct rtentry *rt;
+ struct sockaddr *sa;
+{
+ register struct ifaddr *ifa;
+ struct sockaddr *dst;
+ struct ifnet *ifp;
+
+ if (cmd != RTM_ADD || ((ifa = rt->rt_ifa) == 0) ||
+ ((ifp = ifa->ifa_ifp) == 0) || ((dst = rt_key(rt)) == 0))
+ return;
+ ifa = ifaof_ifpforaddr(dst, ifp);
+ if (ifa) {
+ IFAFREE(rt->rt_ifa);
+ rt->rt_ifa = ifa;
+ ifa->ifa_refcnt++;
+ if (ifa->ifa_rtrequest && ifa->ifa_rtrequest != link_rtrequest)
+ ifa->ifa_rtrequest(cmd, rt, sa);
+ }
+}
+
+/*
+ * Mark an interface down and notify protocols of
+ * the transition.
+ * NOTE: must be called at splnet or eqivalent.
+ */
+void
+if_unroute(ifp, flag, fam)
+ register struct ifnet *ifp;
+ int flag, fam;
+{
+ register struct ifaddr *ifa;
+
+ ifp->if_flags &= ~flag;
+ getmicrotime(&ifp->if_lastchange);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link)
+ if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family))
+ pfctlinput(PRC_IFDOWN, ifa->ifa_addr);
+ if_qflush(&ifp->if_snd);
+ rt_ifmsg(ifp);
+}
+
+/*
+ * Mark an interface up and notify protocols of
+ * the transition.
+ * NOTE: must be called at splnet or eqivalent.
+ */
+void
+if_route(ifp, flag, fam)
+ register struct ifnet *ifp;
+ int flag, fam;
+{
+ register struct ifaddr *ifa;
+
+ ifp->if_flags |= flag;
+ getmicrotime(&ifp->if_lastchange);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link)
+ if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family))
+ pfctlinput(PRC_IFUP, ifa->ifa_addr);
+ rt_ifmsg(ifp);
+#ifdef INET6
+ in6_if_up(ifp);
+#endif
+}
+
+/*
+ * Mark an interface down and notify protocols of
+ * the transition.
+ * NOTE: must be called at splnet or eqivalent.
+ */
+void
+if_down(ifp)
+ register struct ifnet *ifp;
+{
+
+ if_unroute(ifp, IFF_UP, AF_UNSPEC);
+}
+
+/*
+ * Mark an interface up and notify protocols of
+ * the transition.
+ * NOTE: must be called at splnet or eqivalent.
+ */
+void
+if_up(ifp)
+ register struct ifnet *ifp;
+{
+
+ if_route(ifp, IFF_UP, AF_UNSPEC);
+}
+
+/*
+ * Flush an interface queue.
+ */
+static void
+if_qflush(ifq)
+ register struct ifqueue *ifq;
+{
+ register struct mbuf *m, *n;
+
+ n = ifq->ifq_head;
+ while ((m = n) != 0) {
+ n = m->m_act;
+ m_freem(m);
+ }
+ ifq->ifq_head = 0;
+ ifq->ifq_tail = 0;
+ ifq->ifq_len = 0;
+}
+
+/*
+ * Handle interface watchdog timer routines. Called
+ * from softclock, we decrement timers (if set) and
+ * call the appropriate interface routine on expiration.
+ */
+static void
+if_slowtimo(arg)
+ void *arg;
+{
+ register struct ifnet *ifp;
+ int s = splimp();
+
+ for (ifp = ifnet.tqh_first; ifp; ifp = ifp->if_link.tqe_next) {
+ if (ifp->if_timer == 0 || --ifp->if_timer)
+ continue;
+ if (ifp->if_watchdog)
+ (*ifp->if_watchdog)(ifp);
+ }
+ splx(s);
+ timeout(if_slowtimo, (void *)0, hz / IFNET_SLOWHZ);
+}
+
+/*
+ * Map interface name to
+ * interface structure pointer.
+ */
+struct ifnet *
+ifunit(char *name)
+{
+ char namebuf[IFNAMSIZ + 1];
+ char *cp;
+ struct ifnet *ifp;
+ int unit;
+ unsigned len, m;
+ char c;
+
+ len = strlen(name);
+ if (len < 2 || len > IFNAMSIZ)
+ return NULL;
+ cp = name + len - 1;
+ c = *cp;
+ if (c < '0' || c > '9')
+ return NULL; /* trailing garbage */
+ unit = 0;
+ m = 1;
+ do {
+ if (cp == name)
+ return NULL; /* no interface name */
+ unit += (c - '0') * m;
+ if (unit > 1000000)
+ return NULL; /* number is unreasonable */
+ m *= 10;
+ c = *--cp;
+ } while (c >= '0' && c <= '9');
+ len = cp - name + 1;
+ bcopy(name, namebuf, len);
+ namebuf[len] = '\0';
+ /*
+ * Now search all the interfaces for this name/number
+ */
+ for (ifp = ifnet.tqh_first; ifp; ifp = ifp->if_link.tqe_next) {
+ if (strcmp(ifp->if_name, namebuf))
+ continue;
+ if (unit == ifp->if_unit)
+ break;
+ }
+ return (ifp);
+}
+
+
+/*
+ * Map interface name in a sockaddr_dl to
+ * interface structure pointer.
+ */
+struct ifnet *
+if_withname(sa)
+ struct sockaddr *sa;
+{
+ char ifname[IFNAMSIZ+1];
+ struct sockaddr_dl *sdl = (struct sockaddr_dl *)sa;
+
+ if ( (sa->sa_family != AF_LINK) || (sdl->sdl_nlen == 0) ||
+ (sdl->sdl_nlen > IFNAMSIZ) )
+ return NULL;
+
+ /*
+ * ifunit wants a null-terminated name. It may not be null-terminated
+ * in the sockaddr. We don't want to change the caller's sockaddr,
+ * and there might not be room to put the trailing null anyway, so we
+ * make a local copy that we know we can null terminate safely.
+ */
+
+ bcopy(sdl->sdl_data, ifname, sdl->sdl_nlen);
+ ifname[sdl->sdl_nlen] = '\0';
+ return ifunit(ifname);
+}
+
+
+/*
+ * Interface ioctls.
+ */
+int
+ifioctl(so, cmd, data, p)
+ struct socket *so;
+ u_long cmd;
+ caddr_t data;
+ struct proc *p;
+{
+ register struct ifnet *ifp;
+ register struct ifreq *ifr;
+ struct ifstat *ifs;
+ int error;
+ short oif_flags;
+
+ switch (cmd) {
+
+ case SIOCGIFCONF:
+ case OSIOCGIFCONF:
+ return (ifconf(cmd, data));
+ }
+ ifr = (struct ifreq *)data;
+ ifp = ifunit(ifr->ifr_name);
+ if (ifp == 0)
+ return (ENXIO);
+ switch (cmd) {
+
+ case SIOCGIFFLAGS:
+ ifr->ifr_flags = ifp->if_flags;
+ break;
+
+ case SIOCGIFMETRIC:
+ ifr->ifr_metric = ifp->if_metric;
+ break;
+
+ case SIOCGIFMTU:
+ ifr->ifr_mtu = ifp->if_mtu;
+ break;
+
+ case SIOCGIFPHYS:
+ ifr->ifr_phys = ifp->if_physical;
+ break;
+
+ case SIOCSIFFLAGS:
+ error = suser(p);
+ if (error)
+ return (error);
+ ifr->ifr_prevflags = ifp->if_flags;
+ if (ifp->if_flags & IFF_SMART) {
+ /* Smart drivers twiddle their own routes */
+ } else if (ifp->if_flags & IFF_UP &&
+ (ifr->ifr_flags & IFF_UP) == 0) {
+ int s = splimp();
+ if_down(ifp);
+ splx(s);
+ } else if (ifr->ifr_flags & IFF_UP &&
+ (ifp->if_flags & IFF_UP) == 0) {
+ int s = splimp();
+ if_up(ifp);
+ splx(s);
+ }
+ ifp->if_flags = (ifp->if_flags & IFF_CANTCHANGE) |
+ (ifr->ifr_flags &~ IFF_CANTCHANGE);
+ if (ifp->if_ioctl)
+ (void) (*ifp->if_ioctl)(ifp, cmd, data);
+ getmicrotime(&ifp->if_lastchange);
+ break;
+
+ case SIOCSIFMETRIC:
+ error = suser(p);
+ if (error)
+ return (error);
+ ifp->if_metric = ifr->ifr_metric;
+ getmicrotime(&ifp->if_lastchange);
+ break;
+
+ case SIOCSIFPHYS:
+ error = suser(p);
+ if (error)
+ return error;
+ if (!ifp->if_ioctl)
+ return EOPNOTSUPP;
+ error = (*ifp->if_ioctl)(ifp, cmd, data);
+ if (error == 0)
+ getmicrotime(&ifp->if_lastchange);
+ return(error);
+
+ case SIOCSIFMTU:
+ {
+ u_long oldmtu = ifp->if_mtu;
+
+ error = suser(p);
+ if (error)
+ return (error);
+ if (ifp->if_ioctl == NULL)
+ return (EOPNOTSUPP);
+ if (ifr->ifr_mtu < IF_MINMTU || ifr->ifr_mtu > IF_MAXMTU)
+ return (EINVAL);
+ error = (*ifp->if_ioctl)(ifp, cmd, data);
+ if (error == 0)
+ getmicrotime(&ifp->if_lastchange);
+ /*
+ * If the link MTU changed, do network layer specific procedure.
+ */
+ if (ifp->if_mtu != oldmtu) {
+#ifdef INET6
+ nd6_setmtu(ifp);
+#endif
+ }
+ return (error);
+ }
+
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ error = suser(p);
+ if (error)
+ return (error);
+
+ /* Don't allow group membership on non-multicast interfaces. */
+ if ((ifp->if_flags & IFF_MULTICAST) == 0)
+ return EOPNOTSUPP;
+
+ /* Don't let users screw up protocols' entries. */
+ if (ifr->ifr_addr.sa_family != AF_LINK)
+ return EINVAL;
+
+ if (cmd == SIOCADDMULTI) {
+ struct ifmultiaddr *ifma;
+ error = if_addmulti(ifp, &ifr->ifr_addr, &ifma);
+ } else {
+ error = if_delmulti(ifp, &ifr->ifr_addr);
+ }
+ if (error == 0)
+ getmicrotime(&ifp->if_lastchange);
+ return error;
+
+ case SIOCSIFMEDIA:
+ case SIOCSIFGENERIC:
+ error = suser(p);
+ if (error)
+ return (error);
+ if (ifp->if_ioctl == 0)
+ return (EOPNOTSUPP);
+ error = (*ifp->if_ioctl)(ifp, cmd, data);
+ if (error == 0)
+ getmicrotime(&ifp->if_lastchange);
+ return error;
+
+ case SIOCGIFSTATUS:
+ ifs = (struct ifstat *)data;
+ ifs->ascii[0] = '\0';
+
+ case SIOCGIFMEDIA:
+ case SIOCGIFGENERIC:
+ if (ifp->if_ioctl == 0)
+ return (EOPNOTSUPP);
+ return ((*ifp->if_ioctl)(ifp, cmd, data));
+
+ default:
+ oif_flags = ifp->if_flags;
+ if (so->so_proto == 0)
+ return (EOPNOTSUPP);
+#ifndef COMPAT_43
+ error = ((*so->so_proto->pr_usrreqs->pru_control)(so, cmd,
+ data,
+ ifp, p));
+#else
+ {
+ int ocmd = cmd;
+
+ switch (cmd) {
+
+ case SIOCSIFDSTADDR:
+ case SIOCSIFADDR:
+ case SIOCSIFBRDADDR:
+ case SIOCSIFNETMASK:
+#if BYTE_ORDER != BIG_ENDIAN
+ if (ifr->ifr_addr.sa_family == 0 &&
+ ifr->ifr_addr.sa_len < 16) {
+ ifr->ifr_addr.sa_family = ifr->ifr_addr.sa_len;
+ ifr->ifr_addr.sa_len = 16;
+ }
+#else
+ if (ifr->ifr_addr.sa_len == 0)
+ ifr->ifr_addr.sa_len = 16;
+#endif
+ break;
+
+ case OSIOCGIFADDR:
+ cmd = SIOCGIFADDR;
+ break;
+
+ case OSIOCGIFDSTADDR:
+ cmd = SIOCGIFDSTADDR;
+ break;
+
+ case OSIOCGIFBRDADDR:
+ cmd = SIOCGIFBRDADDR;
+ break;
+
+ case OSIOCGIFNETMASK:
+ cmd = SIOCGIFNETMASK;
+ }
+ error = ((*so->so_proto->pr_usrreqs->pru_control)(so,
+ cmd,
+ data,
+ ifp, p));
+ switch (ocmd) {
+
+ case OSIOCGIFADDR:
+ case OSIOCGIFDSTADDR:
+ case OSIOCGIFBRDADDR:
+ case OSIOCGIFNETMASK:
+ *(u_short *)&ifr->ifr_addr = ifr->ifr_addr.sa_family;
+
+ }
+ }
+#endif /* COMPAT_43 */
+
+ if ((oif_flags ^ ifp->if_flags) & IFF_UP) {
+#ifdef INET6
+ if (ifp->if_flags & IFF_UP) {
+ int s = splimp();
+ in6_if_up(ifp);
+ splx(s);
+ }
+#endif
+ }
+ return (error);
+
+ }
+ return (0);
+}
+
+/*
+ * Set/clear promiscuous mode on interface ifp based on the truth value
+ * of pswitch. The calls are reference counted so that only the first
+ * "on" request actually has an effect, as does the final "off" request.
+ * Results are undefined if the "off" and "on" requests are not matched.
+ */
+int
+ifpromisc(ifp, pswitch)
+ struct ifnet *ifp;
+ int pswitch;
+{
+ struct ifreq ifr;
+ int error;
+
+ if (pswitch) {
+ /*
+ * If the device is not configured up, we cannot put it in
+ * promiscuous mode.
+ */
+ if ((ifp->if_flags & IFF_UP) == 0)
+ return (ENETDOWN);
+ if (ifp->if_pcount++ != 0)
+ return (0);
+ ifp->if_flags |= IFF_PROMISC;
+ log(LOG_INFO, "%s%d: promiscuous mode enabled\n",
+ ifp->if_name, ifp->if_unit);
+ } else {
+ if (--ifp->if_pcount > 0)
+ return (0);
+ ifp->if_flags &= ~IFF_PROMISC;
+ log(LOG_INFO, "%s%d: promiscuous mode disabled\n",
+ ifp->if_name, ifp->if_unit);
+ }
+ ifr.ifr_flags = ifp->if_flags;
+ error = (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr);
+ if (error == 0)
+ rt_ifmsg(ifp);
+ return error;
+}
+
+/*
+ * Return interface configuration
+ * of system. List may be used
+ * in later ioctl's (above) to get
+ * other information.
+ */
+/*ARGSUSED*/
+static int
+ifconf(cmd, data)
+ u_long cmd;
+ caddr_t data;
+{
+ register struct ifconf *ifc = (struct ifconf *)data;
+ register struct ifnet *ifp = ifnet.tqh_first;
+ register struct ifaddr *ifa;
+ struct ifreq ifr, *ifrp;
+ int space = ifc->ifc_len, error = 0;
+
+ ifrp = ifc->ifc_req;
+ for (; space > sizeof (ifr) && ifp; ifp = ifp->if_link.tqe_next) {
+ char workbuf[64];
+ int ifnlen, addrs;
+
+ ifnlen = snprintf(workbuf, sizeof(workbuf),
+ "%s%d", ifp->if_name, ifp->if_unit);
+ if(ifnlen + 1 > sizeof ifr.ifr_name) {
+ error = ENAMETOOLONG;
+ } else {
+ strcpy(ifr.ifr_name, workbuf);
+ }
+
+ addrs = 0;
+ ifa = ifp->if_addrhead.tqh_first;
+ for ( ; space > sizeof (ifr) && ifa;
+ ifa = ifa->ifa_link.tqe_next) {
+ register struct sockaddr *sa = ifa->ifa_addr;
+ if (curproc->p_prison && prison_if(curproc, sa))
+ continue;
+ addrs++;
+#ifdef COMPAT_43
+ if (cmd == OSIOCGIFCONF) {
+ struct osockaddr *osa =
+ (struct osockaddr *)&ifr.ifr_addr;
+ ifr.ifr_addr = *sa;
+ osa->sa_family = sa->sa_family;
+ error = copyout((caddr_t)&ifr, (caddr_t)ifrp,
+ sizeof (ifr));
+ ifrp++;
+ } else
+#endif
+ if (sa->sa_len <= sizeof(*sa)) {
+ ifr.ifr_addr = *sa;
+ error = copyout((caddr_t)&ifr, (caddr_t)ifrp,
+ sizeof (ifr));
+ ifrp++;
+ } else {
+ space -= sa->sa_len - sizeof(*sa);
+ if (space < sizeof (ifr))
+ break;
+ error = copyout((caddr_t)&ifr, (caddr_t)ifrp,
+ sizeof (ifr.ifr_name));
+ if (error == 0)
+ error = copyout((caddr_t)sa,
+ (caddr_t)&ifrp->ifr_addr, sa->sa_len);
+ ifrp = (struct ifreq *)
+ (sa->sa_len + (caddr_t)&ifrp->ifr_addr);
+ }
+ if (error)
+ break;
+ space -= sizeof (ifr);
+ }
+ if (!addrs) {
+ bzero((caddr_t)&ifr.ifr_addr, sizeof(ifr.ifr_addr));
+ error = copyout((caddr_t)&ifr, (caddr_t)ifrp,
+ sizeof (ifr));
+ if (error)
+ break;
+ space -= sizeof (ifr), ifrp++;
+ }
+ }
+ ifc->ifc_len -= space;
+ return (error);
+}
+
+/*
+ * Just like if_promisc(), but for all-multicast-reception mode.
+ */
+int
+if_allmulti(ifp, onswitch)
+ struct ifnet *ifp;
+ int onswitch;
+{
+ int error = 0;
+ int s = splimp();
+
+ if (onswitch) {
+ if (ifp->if_amcount++ == 0) {
+ ifp->if_flags |= IFF_ALLMULTI;
+ error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, 0);
+ }
+ } else {
+ if (ifp->if_amcount > 1) {
+ ifp->if_amcount--;
+ } else {
+ ifp->if_amcount = 0;
+ ifp->if_flags &= ~IFF_ALLMULTI;
+ error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, 0);
+ }
+ }
+ splx(s);
+
+ if (error == 0)
+ rt_ifmsg(ifp);
+ return error;
+}
+
+/*
+ * Add a multicast listenership to the interface in question.
+ * The link layer provides a routine which converts
+ */
+int
+if_addmulti(ifp, sa, retifma)
+ struct ifnet *ifp; /* interface to manipulate */
+ struct sockaddr *sa; /* address to add */
+ struct ifmultiaddr **retifma;
+{
+ struct sockaddr *llsa, *dupsa;
+ int error, s;
+ struct ifmultiaddr *ifma;
+
+ /*
+ * If the matching multicast address already exists
+ * then don't add a new one, just add a reference
+ */
+ for (ifma = ifp->if_multiaddrs.lh_first; ifma;
+ ifma = ifma->ifma_link.le_next) {
+ if (equal(sa, ifma->ifma_addr)) {
+ ifma->ifma_refcount++;
+ if (retifma)
+ *retifma = ifma;
+ return 0;
+ }
+ }
+
+ /*
+ * Give the link layer a chance to accept/reject it, and also
+ * find out which AF_LINK address this maps to, if it isn't one
+ * already.
+ */
+ if (ifp->if_resolvemulti) {
+ error = ifp->if_resolvemulti(ifp, &llsa, sa);
+ if (error) return error;
+ } else {
+ llsa = 0;
+ }
+
+ MALLOC(ifma, struct ifmultiaddr *, sizeof *ifma, M_IFMADDR, M_WAITOK);
+ MALLOC(dupsa, struct sockaddr *, sa->sa_len, M_IFMADDR, M_WAITOK);
+ bcopy(sa, dupsa, sa->sa_len);
+
+ ifma->ifma_addr = dupsa;
+ ifma->ifma_lladdr = llsa;
+ ifma->ifma_ifp = ifp;
+ ifma->ifma_refcount = 1;
+ ifma->ifma_protospec = 0;
+ rt_newmaddrmsg(RTM_NEWMADDR, ifma);
+
+ /*
+ * Some network interfaces can scan the address list at
+ * interrupt time; lock them out.
+ */
+ s = splimp();
+ LIST_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link);
+ splx(s);
+ *retifma = ifma;
+
+ if (llsa != 0) {
+ for (ifma = ifp->if_multiaddrs.lh_first; ifma;
+ ifma = ifma->ifma_link.le_next) {
+ if (equal(ifma->ifma_addr, llsa))
+ break;
+ }
+ if (ifma) {
+ ifma->ifma_refcount++;
+ } else {
+ MALLOC(ifma, struct ifmultiaddr *, sizeof *ifma,
+ M_IFMADDR, M_WAITOK);
+ MALLOC(dupsa, struct sockaddr *, llsa->sa_len,
+ M_IFMADDR, M_WAITOK);
+ bcopy(llsa, dupsa, llsa->sa_len);
+ ifma->ifma_addr = dupsa;
+ ifma->ifma_ifp = ifp;
+ ifma->ifma_refcount = 1;
+ s = splimp();
+ LIST_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link);
+ splx(s);
+ }
+ }
+ /*
+ * We are certain we have added something, so call down to the
+ * interface to let them know about it.
+ */
+ s = splimp();
+ ifp->if_ioctl(ifp, SIOCADDMULTI, 0);
+ splx(s);
+
+ return 0;
+}
+
+/*
+ * Remove a reference to a multicast address on this interface. Yell
+ * if the request does not match an existing membership.
+ */
+int
+if_delmulti(ifp, sa)
+ struct ifnet *ifp;
+ struct sockaddr *sa;
+{
+ struct ifmultiaddr *ifma;
+ int s;
+
+ for (ifma = ifp->if_multiaddrs.lh_first; ifma;
+ ifma = ifma->ifma_link.le_next)
+ if (equal(sa, ifma->ifma_addr))
+ break;
+ if (ifma == 0)
+ return ENOENT;
+
+ if (ifma->ifma_refcount > 1) {
+ ifma->ifma_refcount--;
+ return 0;
+ }
+
+ rt_newmaddrmsg(RTM_DELMADDR, ifma);
+ sa = ifma->ifma_lladdr;
+ s = splimp();
+ LIST_REMOVE(ifma, ifma_link);
+ splx(s);
+ free(ifma->ifma_addr, M_IFMADDR);
+ free(ifma, M_IFMADDR);
+ if (sa == 0)
+ return 0;
+
+ /*
+ * Now look for the link-layer address which corresponds to
+ * this network address. It had been squirreled away in
+ * ifma->ifma_lladdr for this purpose (so we don't have
+ * to call ifp->if_resolvemulti() again), and we saved that
+ * value in sa above. If some nasty deleted the
+ * link-layer address out from underneath us, we can deal because
+ * the address we stored was is not the same as the one which was
+ * in the record for the link-layer address. (So we don't complain
+ * in that case.)
+ */
+ for (ifma = ifp->if_multiaddrs.lh_first; ifma;
+ ifma = ifma->ifma_link.le_next)
+ if (equal(sa, ifma->ifma_addr))
+ break;
+ if (ifma == 0)
+ return 0;
+
+ if (ifma->ifma_refcount > 1) {
+ ifma->ifma_refcount--;
+ return 0;
+ }
+
+ s = splimp();
+ LIST_REMOVE(ifma, ifma_link);
+ ifp->if_ioctl(ifp, SIOCDELMULTI, 0);
+ splx(s);
+ free(ifma->ifma_addr, M_IFMADDR);
+ free(sa, M_IFMADDR);
+ free(ifma, M_IFMADDR);
+
+ return 0;
+}
+
+struct ifmultiaddr *
+ifmaof_ifpforaddr(sa, ifp)
+ struct sockaddr *sa;
+ struct ifnet *ifp;
+{
+ struct ifmultiaddr *ifma;
+
+ for (ifma = ifp->if_multiaddrs.lh_first; ifma;
+ ifma = ifma->ifma_link.le_next)
+ if (equal(ifma->ifma_addr, sa))
+ break;
+
+ return ifma;
+}
+
+SYSCTL_NODE(_net, PF_LINK, link, CTLFLAG_RW, 0, "Link layers");
+SYSCTL_NODE(_net_link, 0, generic, CTLFLAG_RW, 0, "Generic link-management");
diff --git a/sys/net/if.h b/sys/net/if.h
new file mode 100644
index 0000000..350e6d8
--- /dev/null
+++ b/sys/net/if.h
@@ -0,0 +1,274 @@
+/*
+ * Copyright (c) 1982, 1986, 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)if.h 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#ifndef _NET_IF_H_
+#define _NET_IF_H_
+
+/*
+ * <net/if.h> does not depend on <sys/time.h> on most other systems. This
+ * helps userland compatability. (struct timeval ifi_lastchange)
+ */
+#ifndef KERNEL
+#include <sys/time.h>
+#endif
+
+/*
+ * Structure describing information about an interface
+ * which may be of interest to management entities.
+ */
+struct if_data {
+ /* generic interface information */
+ u_char ifi_type; /* ethernet, tokenring, etc */
+ u_char ifi_physical; /* e.g., AUI, Thinnet, 10base-T, etc */
+ u_char ifi_addrlen; /* media address length */
+ u_char ifi_hdrlen; /* media header length */
+ u_char ifi_recvquota; /* polling quota for receive intrs */
+ u_char ifi_xmitquota; /* polling quota for xmit intrs */
+ u_long ifi_mtu; /* maximum transmission unit */
+ u_long ifi_metric; /* routing metric (external only) */
+ u_long ifi_baudrate; /* linespeed */
+ /* volatile statistics */
+ u_long ifi_ipackets; /* packets received on interface */
+ u_long ifi_ierrors; /* input errors on interface */
+ u_long ifi_opackets; /* packets sent on interface */
+ u_long ifi_oerrors; /* output errors on interface */
+ u_long ifi_collisions; /* collisions on csma interfaces */
+ u_long ifi_ibytes; /* total number of octets received */
+ u_long ifi_obytes; /* total number of octets sent */
+ u_long ifi_imcasts; /* packets received via multicast */
+ u_long ifi_omcasts; /* packets sent via multicast */
+ u_long ifi_iqdrops; /* dropped on input, this interface */
+ u_long ifi_noproto; /* destined for unsupported protocol */
+ u_long ifi_recvtiming; /* usec spent receiving when timing */
+ u_long ifi_xmittiming; /* usec spent xmitting when timing */
+ struct timeval ifi_lastchange; /* time of last administrative change */
+};
+
+#define IFF_UP 0x1 /* interface is up */
+#define IFF_BROADCAST 0x2 /* broadcast address valid */
+#define IFF_DEBUG 0x4 /* turn on debugging */
+#define IFF_LOOPBACK 0x8 /* is a loopback net */
+#define IFF_POINTOPOINT 0x10 /* interface is point-to-point link */
+#define IFF_SMART 0x20 /* interface manages own routes */
+#define IFF_RUNNING 0x40 /* resources allocated */
+#define IFF_NOARP 0x80 /* no address resolution protocol */
+#define IFF_PROMISC 0x100 /* receive all packets */
+#define IFF_ALLMULTI 0x200 /* receive all multicast packets */
+#define IFF_OACTIVE 0x400 /* transmission in progress */
+#define IFF_SIMPLEX 0x800 /* can't hear own transmissions */
+#define IFF_LINK0 0x1000 /* per link layer defined bit */
+#define IFF_LINK1 0x2000 /* per link layer defined bit */
+#define IFF_LINK2 0x4000 /* per link layer defined bit */
+#define IFF_ALTPHYS IFF_LINK2 /* use alternate physical connection */
+#define IFF_MULTICAST 0x8000 /* supports multicast */
+
+/* flags set internally only: */
+#define IFF_CANTCHANGE \
+ (IFF_BROADCAST|IFF_POINTOPOINT|IFF_RUNNING|IFF_OACTIVE|\
+ IFF_SIMPLEX|IFF_MULTICAST|IFF_ALLMULTI|IFF_SMART)
+
+#define IFQ_MAXLEN 50
+#define IFNET_SLOWHZ 1 /* granularity is 1 second */
+
+/*
+ * Message format for use in obtaining information about interfaces
+ * from getkerninfo and the routing socket
+ */
+struct if_msghdr {
+ u_short ifm_msglen; /* to skip over non-understood messages */
+ u_char ifm_version; /* future binary compatability */
+ u_char ifm_type; /* message type */
+ int ifm_addrs; /* like rtm_addrs */
+ int ifm_flags; /* value of if_flags */
+ u_short ifm_index; /* index for associated ifp */
+ struct if_data ifm_data;/* statistics and other data about if */
+};
+
+/*
+ * Message format for use in obtaining information about interface addresses
+ * from getkerninfo and the routing socket
+ */
+struct ifa_msghdr {
+ u_short ifam_msglen; /* to skip over non-understood messages */
+ u_char ifam_version; /* future binary compatability */
+ u_char ifam_type; /* message type */
+ int ifam_addrs; /* like rtm_addrs */
+ int ifam_flags; /* value of ifa_flags */
+ u_short ifam_index; /* index for associated ifp */
+ int ifam_metric; /* value of ifa_metric */
+};
+
+/*
+ * Message format for use in obtaining information about multicast addresses
+ * from the routing socket
+ */
+struct ifma_msghdr {
+ u_short ifmam_msglen; /* to skip over non-understood messages */
+ u_char ifmam_version; /* future binary compatability */
+ u_char ifmam_type; /* message type */
+ int ifmam_addrs; /* like rtm_addrs */
+ int ifmam_flags; /* value of ifa_flags */
+ u_short ifmam_index; /* index for associated ifp */
+};
+
+/*
+ * Interface request structure used for socket
+ * ioctl's. All interface ioctl's must have parameter
+ * definitions which begin with ifr_name. The
+ * remainder may be interface specific.
+ */
+struct ifreq {
+#define IFNAMSIZ 16
+#define IF_NAMESIZE IFNAMSIZ
+ char ifr_name[IFNAMSIZ]; /* if name, e.g. "en0" */
+ union {
+ struct sockaddr ifru_addr;
+ struct sockaddr ifru_dstaddr;
+ struct sockaddr ifru_broadaddr;
+ short ifru_flags[2];
+ int ifru_metric;
+ int ifru_mtu;
+ int ifru_phys;
+ int ifru_media;
+ caddr_t ifru_data;
+ } ifr_ifru;
+#define ifr_addr ifr_ifru.ifru_addr /* address */
+#define ifr_dstaddr ifr_ifru.ifru_dstaddr /* other end of p-to-p link */
+#define ifr_broadaddr ifr_ifru.ifru_broadaddr /* broadcast address */
+#define ifr_flags ifr_ifru.ifru_flags[0] /* flags */
+#define ifr_prevflags ifr_ifru.ifru_flags[1] /* flags */
+#define ifr_metric ifr_ifru.ifru_metric /* metric */
+#define ifr_mtu ifr_ifru.ifru_mtu /* mtu */
+#define ifr_phys ifr_ifru.ifru_phys /* physical wire */
+#define ifr_media ifr_ifru.ifru_media /* physical media */
+#define ifr_data ifr_ifru.ifru_data /* for use by interface */
+};
+
+#define _SIZEOF_ADDR_IFREQ(ifr) \
+ ((ifr).ifr_addr.sa_len > sizeof(struct sockaddr) ? \
+ (sizeof(struct ifreq) - sizeof(struct sockaddr) + \
+ (ifr).ifr_addr.sa_len) : sizeof(struct ifreq))
+
+struct ifaliasreq {
+ char ifra_name[IFNAMSIZ]; /* if name, e.g. "en0" */
+ struct sockaddr ifra_addr;
+ struct sockaddr ifra_broadaddr;
+ struct sockaddr ifra_mask;
+};
+
+struct ifmediareq {
+ char ifm_name[IFNAMSIZ]; /* if name, e.g. "en0" */
+ int ifm_current; /* current media options */
+ int ifm_mask; /* don't care mask */
+ int ifm_status; /* media status */
+ int ifm_active; /* active options */
+ int ifm_count; /* # entries in ifm_ulist array */
+ int *ifm_ulist; /* media words */
+};
+
+/*
+ * Structure used to retrieve aux status data from interfaces.
+ * Kernel suppliers to this interface should respect the formatting
+ * needed by ifconfig(8): each line starts with a TAB and ends with
+ * a newline. The canonical example to copy and paste is in if_tun.c.
+ */
+
+#define IFSTATMAX 800 /* 10 lines of text */
+struct ifstat {
+ char ifs_name[IFNAMSIZ]; /* if name, e.g. "en0" */
+ char ascii[IFSTATMAX + 1];
+};
+
+/*
+ * Structure used in SIOCGIFCONF request.
+ * Used to retrieve interface configuration
+ * for machine (useful for programs which
+ * must know all networks accessible).
+ */
+struct ifconf {
+ int ifc_len; /* size of associated buffer */
+ union {
+ caddr_t ifcu_buf;
+ struct ifreq *ifcu_req;
+ } ifc_ifcu;
+#define ifc_buf ifc_ifcu.ifcu_buf /* buffer address */
+#define ifc_req ifc_ifcu.ifcu_req /* array of structures returned */
+};
+
+
+/*
+ * Structure for SIOC[AGD]LIFADDR
+ */
+struct if_laddrreq {
+ char iflr_name[IFNAMSIZ];
+ u_int flags;
+#define IFLR_PREFIX 0x8000 /* in: prefix given out: kernel fills id */
+ u_int prefixlen; /* in/out */
+ struct sockaddr_storage addr; /* in/out */
+ struct sockaddr_storage dstaddr; /* out */
+};
+
+#ifdef KERNEL
+#ifdef MALLOC_DECLARE
+MALLOC_DECLARE(M_IFADDR);
+MALLOC_DECLARE(M_IFMADDR);
+#endif
+#endif
+
+#ifndef KERNEL
+struct if_nameindex {
+ u_int if_index; /* 1, 2, ... */
+ char *if_name; /* null terminated name: "le0", ... */
+};
+
+__BEGIN_DECLS
+u_int if_nametoindex __P((const char *));
+char *if_indextoname __P((u_int, char *));
+struct if_nameindex *if_nameindex __P((void));
+void if_freenameindex __P((struct if_nameindex *));
+__END_DECLS
+#endif
+
+#ifdef KERNEL
+struct proc;
+
+int prison_if __P((struct proc *p, struct sockaddr *sa));
+
+/* XXX - this should go away soon. */
+#include <net/if_var.h>
+#endif
+
+#endif /* !_NET_IF_H_ */
diff --git a/sys/net/if_arp.h b/sys/net/if_arp.h
new file mode 100644
index 0000000..52eeb26
--- /dev/null
+++ b/sys/net/if_arp.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)if_arp.h 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#ifndef _NET_IF_ARP_H_
+#define _NET_IF_ARP_H_
+
+/*
+ * Address Resolution Protocol.
+ *
+ * See RFC 826 for protocol description. ARP packets are variable
+ * in size; the arphdr structure defines the fixed-length portion.
+ * Protocol type values are the same as those for 10 Mb/s Ethernet.
+ * It is followed by the variable-sized fields ar_sha, arp_spa,
+ * arp_tha and arp_tpa in that order, according to the lengths
+ * specified. Field names used correspond to RFC 826.
+ */
+struct arphdr {
+ u_short ar_hrd; /* format of hardware address */
+#define ARPHRD_ETHER 1 /* ethernet hardware format */
+#define ARPHRD_IEEE802 6 /* token-ring hardware format */
+#define ARPHRD_FRELAY 15 /* frame relay hardware format */
+ u_short ar_pro; /* format of protocol address */
+ u_char ar_hln; /* length of hardware address */
+ u_char ar_pln; /* length of protocol address */
+ u_short ar_op; /* one of: */
+#define ARPOP_REQUEST 1 /* request to resolve address */
+#define ARPOP_REPLY 2 /* response to previous request */
+#define ARPOP_REVREQUEST 3 /* request protocol address given hardware */
+#define ARPOP_REVREPLY 4 /* response giving protocol address */
+#define ARPOP_INVREQUEST 8 /* request to identify peer */
+#define ARPOP_INVREPLY 9 /* response identifying peer */
+/*
+ * The remaining fields are variable in size,
+ * according to the sizes above.
+ */
+#ifdef COMMENT_ONLY
+ u_char ar_sha[]; /* sender hardware address */
+ u_char ar_spa[]; /* sender protocol address */
+ u_char ar_tha[]; /* target hardware address */
+ u_char ar_tpa[]; /* target protocol address */
+#endif
+};
+
+/*
+ * ARP ioctl request
+ */
+struct arpreq {
+ struct sockaddr arp_pa; /* protocol address */
+ struct sockaddr arp_ha; /* hardware address */
+ int arp_flags; /* flags */
+};
+/* arp_flags and at_flags field values */
+#define ATF_INUSE 0x01 /* entry in use */
+#define ATF_COM 0x02 /* completed entry (enaddr valid) */
+#define ATF_PERM 0x04 /* permanent entry */
+#define ATF_PUBL 0x08 /* publish entry (respond for other host) */
+#define ATF_USETRAILERS 0x10 /* has requested trailers */
+
+#ifdef KERNEL
+/*
+ * Structure shared between the ethernet driver modules and
+ * the address resolution code. For example, each ec_softc or il_softc
+ * begins with this structure.
+ */
+struct arpcom {
+ /*
+ * The ifnet struct _must_ be at the head of this structure.
+ */
+ struct ifnet ac_if; /* network-visible interface */
+ u_char ac_enaddr[6]; /* ethernet hardware address */
+ int ac_multicnt; /* length of ac_multiaddrs list */
+/* #ifdef NETGRAPH */
+ void *ac_ng; /* hook to hang netgraph stuff off */
+/* #endif */
+};
+
+extern u_char etherbroadcastaddr[6];
+#endif
+
+#endif /* !_NET_IF_ARP_H_ */
diff --git a/sys/net/if_atm.h b/sys/net/if_atm.h
new file mode 100644
index 0000000..16b32a6
--- /dev/null
+++ b/sys/net/if_atm.h
@@ -0,0 +1,112 @@
+/* $NetBSD: if_atm.h,v 1.7 1996/11/09 23:02:27 chuck Exp $ */
+
+/*
+ *
+ * Copyright (c) 1996 Charles D. Cranor and Washington University.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Charles D. Cranor and
+ * Washington University.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * net/if_atm.h
+ */
+
+#if (defined(__FreeBSD__) || defined(__bsdi__)) && defined(KERNEL)
+#ifndef _KERNEL
+#define _KERNEL
+#endif
+#endif /* freebsd doesn't define _KERNEL */
+
+#if defined(__NetBSD__) || defined(__OpenBSD__) || defined(__bsdi__)
+#define RTALLOC1(A,B) rtalloc1((A),(B))
+#elif defined(__FreeBSD__)
+#define RTALLOC1(A,B) rtalloc1((A),(B),0UL)
+#endif
+
+/*
+ * pseudo header for packet transmission
+ */
+struct atm_pseudohdr {
+ u_int8_t atm_ph[4]; /* flags+VPI+VCI1(msb)+VCI2(lsb) */
+};
+
+#define ATM_PH_FLAGS(X) ((X)->atm_ph[0])
+#define ATM_PH_VPI(X) ((X)->atm_ph[1])
+#define ATM_PH_VCI(X) ((((X)->atm_ph[2]) << 8) | ((X)->atm_ph[3]))
+#define ATM_PH_SETVCI(X,V) { \
+ (X)->atm_ph[2] = ((V) >> 8) & 0xff; \
+ (X)->atm_ph[3] = ((V) & 0xff); \
+}
+
+#define ATM_PH_AAL5 0x01 /* use AAL5? (0 == aal0) */
+#define ATM_PH_LLCSNAP 0x02 /* use the LLC SNAP encoding (iff aal5) */
+
+#define ATM_PH_DRIVER7 0x40 /* reserve for driver's use */
+#define ATM_PH_DRIVER8 0x80 /* reserve for driver's use */
+
+#define ATMMTU 9180 /* ATM MTU size for IP */
+ /* XXX: could be 9188 with LLC/SNAP according
+ to comer */
+
+/* user's ioctl hook for raw atm mode */
+#define SIOCRAWATM _IOWR('a', 122, int) /* set driver's raw mode */
+
+/* atm_pseudoioctl: turns on and off RX VCIs [for internal use only!] */
+struct atm_pseudoioctl {
+ struct atm_pseudohdr aph;
+ void *rxhand;
+};
+#define SIOCATMENA _IOWR('a', 123, struct atm_pseudoioctl) /* enable */
+#define SIOCATMDIS _IOWR('a', 124, struct atm_pseudoioctl) /* disable */
+
+
+/*
+ * XXX forget all the garbage in if_llc.h and do it the easy way
+ */
+
+#define ATMLLC_HDR "\252\252\3\0\0\0"
+struct atmllc {
+ u_int8_t llchdr[6]; /* aa.aa.03.00.00.00 */
+ u_int8_t type[2]; /* "ethernet" type */
+};
+
+/* ATM_LLC macros: note type code in host byte order */
+#define ATM_LLC_TYPE(X) (((X)->type[0] << 8) | ((X)->type[1]))
+#define ATM_LLC_SETTYPE(X,V) { \
+ (X)->type[1] = ((V) >> 8) & 0xff; \
+ (X)->type[0] = ((V) & 0xff); \
+}
+
+#ifdef _KERNEL
+void atm_ifattach __P((struct ifnet *));
+void atm_input __P((struct ifnet *, struct atm_pseudohdr *,
+ struct mbuf *, void *));
+int atm_output __P((struct ifnet *, struct mbuf *, struct sockaddr *,
+ struct rtentry *));
+#endif
+
diff --git a/sys/net/if_atmsubr.c b/sys/net/if_atmsubr.c
new file mode 100644
index 0000000..0a357a6
--- /dev/null
+++ b/sys/net/if_atmsubr.c
@@ -0,0 +1,350 @@
+/* $NetBSD: if_atmsubr.c,v 1.10 1997/03/11 23:19:51 chuck Exp $ */
+
+/*
+ *
+ * Copyright (c) 1996 Charles D. Cranor and Washington University.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Charles D. Cranor and
+ * Washington University.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * if_atmsubr.c
+ */
+
+#include "opt_inet.h"
+#include "opt_inet6.h"
+#include "opt_natm.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/mbuf.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/malloc.h>
+#include <sys/errno.h>
+
+#include <net/if.h>
+#include <net/netisr.h>
+#include <net/route.h>
+#include <net/if_dl.h>
+#include <net/if_types.h>
+#include <net/if_atm.h>
+
+#include <netinet/in.h>
+#include <netinet/if_atm.h>
+#include <netinet/if_ether.h> /* XXX: for ETHERTYPE_* */
+#if defined(INET) || defined(INET6)
+#include <netinet/in_var.h>
+#endif
+#ifdef NATM
+#include <netnatm/natm.h>
+#endif
+
+#ifndef ETHERTYPE_IPV6
+#define ETHERTYPE_IPV6 0x86dd
+#endif
+
+#define senderr(e) { error = (e); goto bad;}
+
+/*
+ * atm_output: ATM output routine
+ * inputs:
+ * "ifp" = ATM interface to output to
+ * "m0" = the packet to output
+ * "dst" = the sockaddr to send to (either IP addr, or raw VPI/VCI)
+ * "rt0" = the route to use
+ * returns: error code [0 == ok]
+ *
+ * note: special semantic: if (dst == NULL) then we assume "m" already
+ * has an atm_pseudohdr on it and just send it directly.
+ * [for native mode ATM output] if dst is null, then
+ * rt0 must also be NULL.
+ */
+
+int
+atm_output(ifp, m0, dst, rt0)
+ register struct ifnet *ifp;
+ struct mbuf *m0;
+ struct sockaddr *dst;
+ struct rtentry *rt0;
+{
+ u_int16_t etype = 0; /* if using LLC/SNAP */
+ int s, error = 0, sz;
+ struct atm_pseudohdr atmdst, *ad;
+ register struct mbuf *m = m0;
+ register struct rtentry *rt;
+ struct atmllc *atmllc;
+ struct atmllc *llc_hdr = NULL;
+ u_int32_t atm_flags;
+
+ if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING))
+ senderr(ENETDOWN);
+
+ /*
+ * check route
+ */
+ if ((rt = rt0) != NULL) {
+
+ if ((rt->rt_flags & RTF_UP) == 0) { /* route went down! */
+ if ((rt0 = rt = RTALLOC1(dst, 0)) != NULL)
+ rt->rt_refcnt--;
+ else
+ senderr(EHOSTUNREACH);
+ }
+
+ if (rt->rt_flags & RTF_GATEWAY) {
+ if (rt->rt_gwroute == 0)
+ goto lookup;
+ if (((rt = rt->rt_gwroute)->rt_flags & RTF_UP) == 0) {
+ rtfree(rt); rt = rt0;
+ lookup: rt->rt_gwroute = RTALLOC1(rt->rt_gateway, 0);
+ if ((rt = rt->rt_gwroute) == 0)
+ senderr(EHOSTUNREACH);
+ }
+ }
+
+ /* XXX: put RTF_REJECT code here if doing ATMARP */
+
+ }
+
+ /*
+ * check for non-native ATM traffic (dst != NULL)
+ */
+ if (dst) {
+ switch (dst->sa_family) {
+#if defined(INET) || defined(INET6)
+ case AF_INET:
+ case AF_INET6:
+ if (dst->sa_family == AF_INET6)
+ etype = htons(ETHERTYPE_IPV6);
+ else
+ etype = htons(ETHERTYPE_IP);
+ if (!atmresolve(rt, m, dst, &atmdst)) {
+ m = NULL;
+ /* XXX: atmresolve already free'd it */
+ senderr(EHOSTUNREACH);
+ /* XXX: put ATMARP stuff here */
+ /* XXX: watch who frees m on failure */
+ }
+ break;
+#endif /* INET || INET6 */
+
+ case AF_UNSPEC:
+ /*
+ * XXX: bpfwrite. assuming dst contains 12 bytes
+ * (atm pseudo header (4) + LLC/SNAP (8))
+ */
+ bcopy(dst->sa_data, &atmdst, sizeof(atmdst));
+ llc_hdr = (struct atmllc *)(dst->sa_data + sizeof(atmdst));
+ break;
+
+ default:
+#if defined(__NetBSD__) || defined(__OpenBSD__)
+ printf("%s: can't handle af%d\n", ifp->if_xname,
+ dst->sa_family);
+#elif defined(__FreeBSD__) || defined(__bsdi__)
+ printf("%s%d: can't handle af%d\n", ifp->if_name,
+ ifp->if_unit, dst->sa_family);
+#endif
+ senderr(EAFNOSUPPORT);
+ }
+
+ /*
+ * must add atm_pseudohdr to data
+ */
+ sz = sizeof(atmdst);
+ atm_flags = ATM_PH_FLAGS(&atmdst);
+ if (atm_flags & ATM_PH_LLCSNAP) sz += 8; /* sizeof snap == 8 */
+ M_PREPEND(m, sz, M_DONTWAIT);
+ if (m == 0)
+ senderr(ENOBUFS);
+ ad = mtod(m, struct atm_pseudohdr *);
+ *ad = atmdst;
+ if (atm_flags & ATM_PH_LLCSNAP) {
+ atmllc = (struct atmllc *)(ad + 1);
+ if (llc_hdr == NULL) {
+ bcopy(ATMLLC_HDR, atmllc->llchdr,
+ sizeof(atmllc->llchdr));
+ ATM_LLC_SETTYPE(atmllc, etype);
+ /* note: already in network order */
+ }
+ else
+ bcopy(llc_hdr, atmllc, sizeof(struct atmllc));
+ }
+ }
+
+ /*
+ * Queue message on interface, and start output if interface
+ * not yet active.
+ */
+ s = splimp();
+ if (IF_QFULL(&ifp->if_snd)) {
+ IF_DROP(&ifp->if_snd);
+ splx(s);
+ senderr(ENOBUFS);
+ }
+ ifp->if_obytes += m->m_pkthdr.len;
+ IF_ENQUEUE(&ifp->if_snd, m);
+ if ((ifp->if_flags & IFF_OACTIVE) == 0)
+ (*ifp->if_start)(ifp);
+ splx(s);
+ return (error);
+
+bad:
+ if (m)
+ m_freem(m);
+ return (error);
+}
+
+/*
+ * Process a received ATM packet;
+ * the packet is in the mbuf chain m.
+ */
+void
+atm_input(ifp, ah, m, rxhand)
+ struct ifnet *ifp;
+ register struct atm_pseudohdr *ah;
+ struct mbuf *m;
+ void *rxhand;
+{
+ register struct ifqueue *inq;
+ u_int16_t etype = ETHERTYPE_IP; /* default */
+ int s;
+
+ if ((ifp->if_flags & IFF_UP) == 0) {
+ m_freem(m);
+ return;
+ }
+ ifp->if_ibytes += m->m_pkthdr.len;
+
+ if (rxhand) {
+#ifdef NATM
+ struct natmpcb *npcb = rxhand;
+ s = splimp(); /* in case 2 atm cards @ diff lvls */
+ npcb->npcb_inq++; /* count # in queue */
+ splx(s);
+ schednetisr(NETISR_NATM);
+ inq = &natmintrq;
+ m->m_pkthdr.rcvif = rxhand; /* XXX: overload */
+#else
+ printf("atm_input: NATM detected but not configured in kernel\n");
+ m_freem(m);
+ return;
+#endif
+ } else {
+ /*
+ * handle LLC/SNAP header, if present
+ */
+ if (ATM_PH_FLAGS(ah) & ATM_PH_LLCSNAP) {
+ struct atmllc *alc;
+ if (m->m_len < sizeof(*alc) &&
+ (m = m_pullup(m, sizeof(*alc))) == 0)
+ return; /* failed */
+ alc = mtod(m, struct atmllc *);
+ if (bcmp(alc, ATMLLC_HDR, 6)) {
+#if defined(__NetBSD__) || defined(__OpenBSD__)
+ printf("%s: recv'd invalid LLC/SNAP frame [vp=%d,vc=%d]\n",
+ ifp->if_xname, ATM_PH_VPI(ah), ATM_PH_VCI(ah));
+#elif defined(__FreeBSD__) || defined(__bsdi__)
+ printf("%s%d: recv'd invalid LLC/SNAP frame [vp=%d,vc=%d]\n",
+ ifp->if_name, ifp->if_unit, ATM_PH_VPI(ah), ATM_PH_VCI(ah));
+#endif
+ m_freem(m);
+ return;
+ }
+ etype = ATM_LLC_TYPE(alc);
+ m_adj(m, sizeof(*alc));
+ }
+
+ switch (etype) {
+#ifdef INET
+ case ETHERTYPE_IP:
+ schednetisr(NETISR_IP);
+ inq = &ipintrq;
+ break;
+#endif
+#ifdef INET6
+ case ETHERTYPE_IPV6:
+ schednetisr(NETISR_IPV6);
+ inq = &ip6intrq;
+ break;
+#endif
+ default:
+ m_freem(m);
+ return;
+ }
+ }
+
+ s = splimp();
+ if (IF_QFULL(inq)) {
+ IF_DROP(inq);
+ m_freem(m);
+ } else
+ IF_ENQUEUE(inq, m);
+ splx(s);
+}
+
+/*
+ * Perform common duties while attaching to interface list
+ */
+void
+atm_ifattach(ifp)
+ register struct ifnet *ifp;
+{
+ register struct ifaddr *ifa;
+ register struct sockaddr_dl *sdl;
+
+ ifp->if_type = IFT_ATM;
+ ifp->if_addrlen = 0;
+ ifp->if_hdrlen = 0;
+ ifp->if_mtu = ATMMTU;
+ ifp->if_output = atm_output;
+ ifp->if_snd.ifq_maxlen = 50; /* dummy */
+
+#if defined(__NetBSD__) || defined(__OpenBSD__)
+ for (ifa = ifp->if_addrlist.tqh_first; ifa != 0;
+ ifa = ifa->ifa_list.tqe_next)
+#elif defined(__FreeBSD__) && (__FreeBSD__ > 2)
+ for (ifa = ifp->if_addrhead.tqh_first; ifa;
+ ifa = ifa->ifa_link.tqe_next)
+#elif defined(__FreeBSD__) || defined(__bsdi__)
+ for (ifa = ifp->if_addrlist; ifa; ifa = ifa->ifa_next)
+#endif
+ if ((sdl = (struct sockaddr_dl *)ifa->ifa_addr) &&
+ sdl->sdl_family == AF_LINK) {
+ sdl->sdl_type = IFT_ATM;
+ sdl->sdl_alen = ifp->if_addrlen;
+#ifdef notyet /* if using ATMARP, store hardware address using the next line */
+ bcopy(ifp->hw_addr, LLADDR(sdl), ifp->if_addrlen);
+#endif
+ break;
+ }
+
+}
diff --git a/sys/net/if_disc.c b/sys/net/if_disc.c
new file mode 100644
index 0000000..66bd5f8
--- /dev/null
+++ b/sys/net/if_disc.c
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * From: @(#)if_loop.c 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+/*
+ * Discard interface driver for protocol testing and timing.
+ * (Based on the loopback.)
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/mbuf.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+
+#include <net/if.h>
+#include <net/if_types.h>
+#include <net/route.h>
+#include <net/bpf.h>
+
+#include "opt_inet.h"
+#include "opt_inet6.h"
+
+#ifdef TINY_DSMTU
+#define DSMTU (1024+512)
+#else
+#define DSMTU 65532
+#endif
+
+static void discattach __P((void *dummy));
+PSEUDO_SET(discattach, if_disc);
+
+static struct ifnet discif;
+static int discoutput(struct ifnet *, struct mbuf *, struct sockaddr *,
+ struct rtentry *);
+static void discrtrequest(int cmd, struct rtentry *rt, struct sockaddr *sa);
+static int discioctl(struct ifnet *, u_long, caddr_t);
+
+/* ARGSUSED */
+static void
+discattach(dummy)
+ void *dummy;
+{
+ register struct ifnet *ifp = &discif;
+
+ ifp->if_name = "ds";
+ ifp->if_mtu = DSMTU;
+ ifp->if_flags = IFF_LOOPBACK | IFF_MULTICAST;
+ ifp->if_ioctl = discioctl;
+ ifp->if_output = discoutput;
+ ifp->if_type = IFT_LOOP;
+ ifp->if_hdrlen = 0;
+ ifp->if_addrlen = 0;
+ ifp->if_snd.ifq_maxlen = 20;
+ if_attach(ifp);
+ bpfattach(ifp, DLT_NULL, sizeof(u_int));
+}
+
+static int
+discoutput(ifp, m, dst, rt)
+ struct ifnet *ifp;
+ register struct mbuf *m;
+ struct sockaddr *dst;
+ register struct rtentry *rt;
+{
+ if ((m->m_flags & M_PKTHDR) == 0)
+ panic("discoutput no HDR");
+ /* BPF write needs to be handled specially */
+ if (dst->sa_family == AF_UNSPEC) {
+ dst->sa_family = *(mtod(m, int *));
+ m->m_len -= sizeof(int);
+ m->m_pkthdr.len -= sizeof(int);
+ m->m_data += sizeof(int);
+ }
+
+ if (discif.if_bpf) {
+ /*
+ * We need to prepend the address family as
+ * a four byte field. Cons up a dummy header
+ * to pacify bpf. This is safe because bpf
+ * will only read from the mbuf (i.e., it won't
+ * try to free it or keep a pointer a to it).
+ */
+ struct mbuf m0;
+ u_int af = dst->sa_family;
+
+ m0.m_next = m;
+ m0.m_len = 4;
+ m0.m_data = (char *)&af;
+
+ bpf_mtap(&discif, &m0);
+ }
+ m->m_pkthdr.rcvif = ifp;
+
+ ifp->if_opackets++;
+ ifp->if_obytes += m->m_pkthdr.len;
+
+ m_freem(m);
+ return 0;
+}
+
+/* ARGSUSED */
+static void
+discrtrequest(cmd, rt, sa)
+ int cmd;
+ struct rtentry *rt;
+ struct sockaddr *sa;
+{
+ if (rt)
+ rt->rt_rmx.rmx_mtu = DSMTU;
+}
+
+/*
+ * Process an ioctl request.
+ */
+/* ARGSUSED */
+static int
+discioctl(ifp, cmd, data)
+ register struct ifnet *ifp;
+ u_long cmd;
+ caddr_t data;
+{
+ register struct ifaddr *ifa;
+ register struct ifreq *ifr = (struct ifreq *)data;
+ register int error = 0;
+
+ switch (cmd) {
+
+ case SIOCSIFADDR:
+ ifp->if_flags |= IFF_UP;
+ ifa = (struct ifaddr *)data;
+ if (ifa != 0)
+ ifa->ifa_rtrequest = discrtrequest;
+ /*
+ * Everything else is done at a higher level.
+ */
+ break;
+
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ if (ifr == 0) {
+ error = EAFNOSUPPORT; /* XXX */
+ break;
+ }
+ switch (ifr->ifr_addr.sa_family) {
+
+#ifdef INET
+ case AF_INET:
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ break;
+#endif
+
+ default:
+ error = EAFNOSUPPORT;
+ break;
+ }
+ break;
+
+ case SIOCSIFMTU:
+ ifp->if_mtu = ifr->ifr_mtu;
+ break;
+
+ default:
+ error = EINVAL;
+ }
+ return (error);
+}
diff --git a/sys/net/if_dl.h b/sys/net/if_dl.h
new file mode 100644
index 0000000..aac7c14
--- /dev/null
+++ b/sys/net/if_dl.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)if_dl.h 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#ifndef _NET_IF_DL_H_
+#define _NET_IF_DL_H_
+
+/*
+ * A Link-Level Sockaddr may specify the interface in one of two
+ * ways: either by means of a system-provided index number (computed
+ * anew and possibly differently on every reboot), or by a human-readable
+ * string such as "il0" (for managerial convenience).
+ *
+ * Census taking actions, such as something akin to SIOCGCONF would return
+ * both the index and the human name.
+ *
+ * High volume transactions (such as giving a link-level ``from'' address
+ * in a recvfrom or recvmsg call) may be likely only to provide the indexed
+ * form, (which requires fewer copy operations and less space).
+ *
+ * The form and interpretation of the link-level address is purely a matter
+ * of convention between the device driver and its consumers; however, it is
+ * expected that all drivers for an interface of a given if_type will agree.
+ */
+
+/*
+ * Structure of a Link-Level sockaddr:
+ */
+struct sockaddr_dl {
+ u_char sdl_len; /* Total length of sockaddr */
+ u_char sdl_family; /* AF_DLI */
+ u_short sdl_index; /* if != 0, system given index for interface */
+ u_char sdl_type; /* interface type */
+ u_char sdl_nlen; /* interface name length, no trailing 0 reqd. */
+ u_char sdl_alen; /* link level address length */
+ u_char sdl_slen; /* link layer selector length */
+ char sdl_data[12]; /* minimum work area, can be larger;
+ contains both if name and ll address */
+ u_short sdl_rcf; /* source routing control */
+ u_short sdl_route[16]; /* source routing information */
+};
+
+#define LLADDR(s) ((caddr_t)((s)->sdl_data + (s)->sdl_nlen))
+
+#ifndef KERNEL
+
+#include <sys/cdefs.h>
+
+__BEGIN_DECLS
+void link_addr __P((const char *, struct sockaddr_dl *));
+char *link_ntoa __P((const struct sockaddr_dl *));
+__END_DECLS
+
+#endif /* !KERNEL */
+
+#endif
diff --git a/sys/net/if_ef.c b/sys/net/if_ef.c
new file mode 100644
index 0000000..d726d18
--- /dev/null
+++ b/sys/net/if_ef.c
@@ -0,0 +1,595 @@
+/*-
+ * Copyright (c) 1999, Boris Popov
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include "opt_inet.h"
+#include "opt_ipx.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/sockio.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/socket.h>
+#include <sys/syslog.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+
+#include <net/ethernet.h>
+#include <net/if_llc.h>
+#include <net/if.h>
+#include <net/if_arp.h>
+#include <net/if_dl.h>
+#include <net/if_types.h>
+#include <net/netisr.h>
+#include <net/route.h>
+#include <net/bpf.h>
+
+#ifdef INET
+#include <netinet/in.h>
+#include <netinet/in_var.h>
+#include <netinet/if_ether.h>
+#endif
+
+#ifdef IPX
+#include <netipx/ipx.h>
+#include <netipx/ipx_if.h>
+#endif
+
+/* internal frame types */
+#define ETHER_FT_EII 0 /* Ethernet_II - default */
+#define ETHER_FT_8023 1 /* 802.3 (Novell) */
+#define ETHER_FT_8022 2 /* 802.2 */
+#define ETHER_FT_SNAP 3 /* SNAP */
+#define EF_NFT 4 /* total number of frame types */
+
+#ifdef EF_DEBUG
+#define EFDEBUG(format, args...) printf("%s: "format, __FUNCTION__ ,## args)
+#else
+#define EFDEBUG(format, args...)
+#endif
+
+#define EFERROR(format, args...) printf("%s: "format, __FUNCTION__ ,## args)
+
+struct efnet {
+ struct arpcom ef_ac;
+ struct ifnet * ef_ifp;
+};
+
+struct ef_link {
+ SLIST_ENTRY(ef_link) el_next;
+ struct ifnet *el_ifp; /* raw device for this clones */
+ struct efnet *el_units[EF_NFT]; /* our clones */
+};
+
+static SLIST_HEAD(ef_link_head, ef_link) efdev = {NULL};
+static int efcount;
+
+extern int (*ef_inputp)(struct ifnet*, struct ether_header *eh, struct mbuf *m);
+extern int (*ef_outputp)(struct ifnet *ifp, struct mbuf *m,
+ struct sockaddr *dst, short *tp);
+
+/*
+static void ef_reset (struct ifnet *);
+*/
+static int ef_attach(struct efnet *sc);
+static int ef_detach(struct efnet *sc);
+static void ef_init(void *);
+static int ef_ioctl(struct ifnet *, u_long, caddr_t);
+static void ef_start(struct ifnet *);
+static int ef_input(struct ifnet*, struct ether_header *, struct mbuf *);
+static int ef_output(struct ifnet *ifp, struct mbuf *m,
+ struct sockaddr *dst, short *tp);
+
+static int ef_load(void);
+static int ef_unload(void);
+
+/*
+ * Install the interface, most of structure initialization done in ef_clone()
+ */
+static int
+ef_attach(struct efnet *sc)
+{
+ struct ifnet *ifp = (struct ifnet*)&sc->ef_ac.ac_if;
+ struct ifaddr *ifa1, *ifa2;
+ struct sockaddr_dl *sdl1, *sdl2;
+
+ ifp->if_output = ether_output;
+ ifp->if_start = ef_start;
+ ifp->if_watchdog = NULL;
+ ifp->if_init = ef_init;
+ ifp->if_snd.ifq_maxlen = IFQ_MAXLEN;
+ ifp->if_flags = (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
+ /*
+ * Attach the interface
+ */
+ if_attach(ifp);
+ ether_ifattach(ifp);
+ bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header));
+
+ ifp->if_resolvemulti = 0;
+ ifp->if_type = IFT_XETHER;
+ ifp->if_flags |= IFF_RUNNING;
+
+ ifa1 = ifnet_addrs[ifp->if_index - 1];
+ ifa2 = ifnet_addrs[sc->ef_ifp->if_index - 1];
+ sdl1 = (struct sockaddr_dl *)ifa1->ifa_addr;
+ sdl2 = (struct sockaddr_dl *)ifa2->ifa_addr;
+ sdl1->sdl_type = IFT_ETHER;
+ sdl1->sdl_alen = ETHER_ADDR_LEN;
+ bcopy(LLADDR(sdl2), LLADDR(sdl1), ETHER_ADDR_LEN);
+ bcopy(LLADDR(sdl2), sc->ef_ac.ac_enaddr, ETHER_ADDR_LEN);
+
+ EFDEBUG("%s%d: attached\n", ifp->if_name, ifp->if_unit);
+ return 1;
+}
+
+/*
+ * This is for _testing_only_, just removes interface from interfaces list
+ */
+static int
+ef_detach(struct efnet *sc)
+{
+ struct ifnet *ifp = (struct ifnet*)&sc->ef_ac.ac_if;
+ int s;
+
+ s = splimp();
+
+ if (ifp->if_flags & IFF_UP) {
+ if_down(ifp);
+ if (ifp->if_flags & IFF_RUNNING) {
+ /* find internet addresses and delete routes */
+ register struct ifaddr *ifa;
+ for (ifa = ifp->if_addrhead.tqh_first; ifa;
+ ifa = ifa->ifa_link.tqe_next) {
+ rtinit(ifa, (int)RTM_DELETE, 0);
+ }
+ }
+ }
+
+ TAILQ_REMOVE(&ifnet, ifp, if_link);
+ splx(s);
+ return 0;
+}
+
+static void
+ef_init(void *foo) {
+ return;
+}
+
+static int
+ef_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+/* struct ef_link *sc = (struct ef_link*)ifp->if_softc;*/
+ struct ifaddr *ifa = (struct ifaddr*)data;
+ int s, error;
+
+ EFDEBUG("IOCTL %ld for %s%d\n", cmd, ifp->if_name, ifp->if_unit);
+ error = 0;
+ s = splimp();
+ switch (cmd) {
+ case SIOCSIFADDR:
+ if (ifp->if_unit == ETHER_FT_8023 &&
+ ifa->ifa_addr->sa_family != AF_IPX) {
+ error = EAFNOSUPPORT;
+ break;
+ }
+ ifp->if_flags |= IFF_UP;
+ /* FALL THROUGH */
+ case SIOCGIFADDR:
+ case SIOCSIFMTU:
+ error = ether_ioctl(ifp, cmd, data);
+ break;
+ case SIOCSIFFLAGS:
+ error = 0;
+ break;
+ default:
+ error = EINVAL;
+ }
+ splx(s);
+ return error;
+}
+
+/*
+ * Currently packet prepared in the ether_output(), but this can be a better
+ * place.
+ */
+static void
+ef_start(struct ifnet *ifp)
+{
+ struct efnet *sc = (struct efnet*)ifp->if_softc;
+ struct ifnet *p;
+ struct mbuf *m;
+
+ ifp->if_flags |= IFF_OACTIVE;
+ p = sc->ef_ifp;
+
+ EFDEBUG("\n");
+ for (;;) {
+ IF_DEQUEUE(&ifp->if_snd, m);
+ if (m == 0)
+ break;
+ if (ifp->if_bpf)
+ bpf_mtap(ifp, m);
+ if (IF_QFULL(&p->if_snd)) {
+ IF_DROP(&p->if_snd);
+ /* XXX stats */
+ }
+ IF_ENQUEUE(&p->if_snd, m);
+ if ((p->if_flags & IFF_OACTIVE) == 0)
+ p->if_start(p);
+ }
+ ifp->if_flags &= ~IFF_OACTIVE;
+ return;
+}
+
+/*
+ * Inline functions do not put additional overhead to procedure call or
+ * parameter passing but simplify the code
+ */
+static int __inline
+ef_inputEII(struct mbuf *m, struct ether_header *eh, struct llc* l,
+ u_short ether_type, struct ifqueue **inq)
+{
+ switch(ether_type) {
+#ifdef IPX
+ case ETHERTYPE_IPX:
+ schednetisr(NETISR_IPX);
+ *inq = &ipxintrq;
+ break;
+#endif
+#ifdef INET
+ case ETHERTYPE_IP:
+ if (ipflow_fastforward(m))
+ return 1;
+ schednetisr(NETISR_IP);
+ *inq = &ipintrq;
+ break;
+
+ case ETHERTYPE_ARP:
+ schednetisr(NETISR_ARP);
+ *inq = &arpintrq;
+ break;
+#endif
+ }
+ return 0;
+}
+
+static int __inline
+ef_inputSNAP(struct mbuf *m, struct ether_header *eh, struct llc* l,
+ u_short ether_type, struct ifqueue **inq)
+{
+ switch(ether_type) {
+#ifdef IPX
+ case ETHERTYPE_IPX:
+ m_adj(m, 8);
+ schednetisr(NETISR_IPX);
+ *inq = &ipxintrq;
+ break;
+#endif
+ }
+ return 0;
+}
+
+static int __inline
+ef_input8022(struct mbuf *m, struct ether_header *eh, struct llc* l,
+ u_short ether_type, struct ifqueue **inq)
+{
+ switch(ether_type) {
+#ifdef IPX
+ case 0xe0:
+ m_adj(m, 3);
+ schednetisr(NETISR_IPX);
+ *inq = &ipxintrq;
+ break;
+#endif
+ }
+ return 0;
+}
+/*
+ * Called from ether_input()
+ */
+static int
+ef_input(struct ifnet *ifp, struct ether_header *eh, struct mbuf *m)
+{
+ u_short ether_type;
+ int s, ft = -1;
+ struct ifqueue *inq;
+ struct efnet *efp;
+ struct ifnet *eifp;
+ struct llc *l;
+ struct ef_link *efl;
+
+ ether_type = ntohs(eh->ether_type);
+ if (ether_type < ETHERMTU) {
+ l = mtod(m, struct llc*);
+ if (l->llc_dsap == 0xff && l->llc_ssap == 0xff) {
+ /*
+ * Novell's "802.3" frame
+ */
+ ft = ETHER_FT_8023;
+ } else if (l->llc_dsap == 0xaa && l->llc_ssap == 0xaa) {
+ /*
+ * 802.2/SNAP
+ */
+ ft = ETHER_FT_SNAP;
+ ether_type = ntohs(l->llc_un.type_snap.ether_type);
+ } else if (l->llc_dsap == l->llc_ssap) {
+ /*
+ * 802.3/802.2
+ */
+ ft = ETHER_FT_8022;
+ ether_type = l->llc_ssap;
+ }
+ } else
+ ft = ETHER_FT_EII;
+
+ if (ft == -1) {
+ EFDEBUG("Unrecognised ether_type %x\n", ether_type);
+ return -1;
+ }
+
+ /*
+ * Check if interface configured for the given frame
+ */
+ efp = NULL;
+ SLIST_FOREACH(efl, &efdev, el_next) {
+ if (efl->el_ifp == ifp) {
+ efp = efl->el_units[ft];
+ break;
+ }
+ }
+ if (efp == NULL) {
+ EFDEBUG("Can't find if for %d\n", ft);
+ return -1;
+ }
+ eifp = &efp->ef_ac.ac_if;
+ if ((eifp->if_flags & IFF_UP) == 0)
+ return -1;
+ eifp->if_ibytes += m->m_pkthdr.len + sizeof (*eh);
+ m->m_pkthdr.rcvif = eifp;
+
+ if (eifp->if_bpf) {
+ struct mbuf m0;
+ m0.m_next = m;
+ m0.m_len = sizeof(struct ether_header);
+ m0.m_data = (char *)eh;
+ bpf_mtap(eifp, &m0);
+ }
+ /*
+ * Now we ready to adjust mbufs and pass them to protocol intr's
+ */
+ inq = NULL;
+ switch(ft) {
+ case ETHER_FT_EII:
+ if (ef_inputEII(m, eh, l, ether_type, &inq) == 1)
+ return 0;
+ break;
+#ifdef IPX
+ case ETHER_FT_8023: /* only IPX can be here */
+ schednetisr(NETISR_IPX);
+ inq = &ipxintrq;
+ break;
+#endif
+ case ETHER_FT_SNAP:
+ if (ef_inputSNAP(m, eh, l, ether_type, &inq) == 1)
+ return 0;
+ break;
+ case ETHER_FT_8022:
+ if (ef_input8022(m, eh, l, ether_type, &inq) == 1)
+ return 0;
+ break;
+ }
+
+ if (inq == NULL) {
+ EFDEBUG("No support for frame %d and proto %04x\n",
+ ft, ether_type);
+ return -1;
+ }
+ s = splimp();
+ if (IF_QFULL(inq)) {
+ IF_DROP(inq);
+ m_freem(m);
+ } else
+ IF_ENQUEUE(inq, m);
+ splx(s);
+ return 0;
+}
+
+static int
+ef_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst, short *tp)
+{
+ u_char *cp;
+ short type;
+
+ if (ifp->if_type != IFT_XETHER)
+ return 1;
+ switch (ifp->if_unit) {
+ case ETHER_FT_EII:
+#ifdef IPX
+ type = htons(ETHERTYPE_IPX);
+#else
+ return 1;
+#endif
+ break;
+ case ETHER_FT_8023:
+ type = htons(m->m_pkthdr.len);
+ break;
+ case ETHER_FT_8022:
+ M_PREPEND(m, 3, M_WAIT);
+ type = htons(m->m_pkthdr.len);
+ cp = mtod(m, u_char *);
+ *cp++ = 0xE0;
+ *cp++ = 0xE0;
+ *cp++ = 0x03;
+ break;
+ case ETHER_FT_SNAP:
+ M_PREPEND(m, 8, M_WAIT);
+ type = htons(m->m_pkthdr.len);
+ cp = mtod(m, u_char *);
+ bcopy("\xAA\xAA\x03\x00\x00\x00\x81\x37", cp, 8);
+ break;
+ default:
+ return -1;
+ }
+ *tp = type;
+ return 0;
+}
+
+/*
+ * Create clone from the given interface
+ */
+static int
+ef_clone(struct ef_link *efl, int ft)
+{
+ struct efnet *efp;
+ struct ifnet *eifp;
+ struct ifnet *ifp = efl->el_ifp;
+ char cbuf[IFNAMSIZ], *ifname;
+ int ifnlen;
+
+ efp = (struct efnet*)malloc(sizeof(struct efnet), M_IFADDR, M_WAITOK);
+ if (efp == NULL)
+ return ENOMEM;
+ bzero(efp, sizeof(*efp));
+ efp->ef_ifp = ifp;
+ eifp = &efp->ef_ac.ac_if;
+ ifnlen = 1 + snprintf(cbuf, sizeof(cbuf), "%s%df", ifp->if_name,
+ ifp->if_unit);
+ ifname = (char*)malloc(ifnlen, M_IFADDR, M_WAITOK);
+ eifp->if_name = strcpy(ifname, cbuf);
+ eifp->if_unit = ft;
+ eifp->if_softc = efp;
+ if (ifp->if_ioctl)
+ eifp->if_ioctl = ef_ioctl;
+ efl->el_units[ft] = efp;
+ return 0;
+}
+
+static int
+ef_load(void)
+{
+ struct ifnet *ifp;
+ struct efnet *efp;
+ struct ef_link *efl = NULL;
+ int error = 0, d;
+
+ for (ifp = ifnet.tqh_first; ifp; ifp = ifp->if_link.tqe_next) {
+ if (ifp->if_type != IFT_ETHER) continue;
+ EFDEBUG("Found interface %s%d\n", ifp->if_name, ifp->if_unit);
+ efl = (struct ef_link*)malloc(sizeof(struct ef_link),
+ M_IFADDR, M_WAITOK);
+ if (efl == NULL) {
+ error = ENOMEM;
+ break;
+ }
+ bzero(efl, sizeof(*efl));
+
+ efl->el_ifp = ifp;
+#ifdef ETHER_II
+ error = ef_clone(efl, ETHER_FT_EII);
+ if (error) break;
+#endif
+#ifdef ETHER_8023
+ error = ef_clone(efl, ETHER_FT_8023);
+ if (error) break;
+#endif
+#ifdef ETHER_8022
+ error = ef_clone(efl, ETHER_FT_8022);
+ if (error) break;
+#endif
+#ifdef ETHER_SNAP
+ error = ef_clone(efl, ETHER_FT_SNAP);
+ if (error) break;
+#endif
+ efcount++;
+ SLIST_INSERT_HEAD(&efdev, efl, el_next);
+ }
+ if (error) {
+ if (efl)
+ SLIST_INSERT_HEAD(&efdev, efl, el_next);
+ SLIST_FOREACH(efl, &efdev, el_next) {
+ for (d = 0; d < EF_NFT; d++)
+ if (efl->el_units[d])
+ free(efl->el_units[d], M_IFADDR);
+ free(efl, M_IFADDR);
+ }
+ return error;
+ }
+ SLIST_FOREACH(efl, &efdev, el_next) {
+ for (d = 0; d < EF_NFT; d++) {
+ efp = efl->el_units[d];
+ if (efp)
+ ef_attach(efp);
+ }
+ }
+ ef_inputp = ef_input;
+ ef_outputp = ef_output;
+ EFDEBUG("Loaded\n");
+ return 0;
+}
+
+static int
+ef_unload(void)
+{
+ struct efnet *efp;
+ struct ef_link *efl;
+ int d;
+
+ ef_inputp = NULL;
+ ef_outputp = NULL;
+ SLIST_FOREACH(efl, &efdev, el_next) {
+ for (d = 0; d < EF_NFT; d++) {
+ efp = efl->el_units[d];
+ if (efp) {
+ ef_detach(efp);
+ }
+ }
+ }
+ EFDEBUG("Unloaded\n");
+ return 0;
+}
+
+static int
+if_ef_modevent(module_t mod, int type, void *data)
+{
+ switch ((modeventtype_t)type) {
+ case MOD_LOAD:
+ return ef_load();
+ case MOD_UNLOAD:
+ return ef_unload();
+ default:
+ break;
+ }
+ return 0;
+}
+
+static moduledata_t if_ef_mod = {
+ "if_ef", if_ef_modevent, NULL
+};
+
+DECLARE_MODULE(if_ef, if_ef_mod, SI_SUB_PSEUDO, SI_ORDER_MIDDLE);
diff --git a/sys/net/if_ethersubr.c b/sys/net/if_ethersubr.c
new file mode 100644
index 0000000..37c3d11
--- /dev/null
+++ b/sys/net/if_ethersubr.c
@@ -0,0 +1,1308 @@
+/*
+ * Copyright (c) 1982, 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)if_ethersubr.c 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#include "opt_atalk.h"
+#include "opt_inet.h"
+#include "opt_inet6.h"
+#include "opt_ipx.h"
+#include "opt_bdg.h"
+#include "opt_netgraph.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/sysctl.h>
+
+#include <net/if.h>
+#include <net/netisr.h>
+#include <net/route.h>
+#include <net/if_llc.h>
+#include <net/if_dl.h>
+#include <net/if_types.h>
+
+#if defined(INET) || defined(INET6)
+#include <netinet/in.h>
+#include <netinet/in_var.h>
+#include <netinet/if_ether.h>
+#endif
+#ifdef INET6
+#include <netinet6/nd6.h>
+#include <netinet6/in6_ifattach.h>
+#endif
+
+#ifdef IPX
+#include <netipx/ipx.h>
+#include <netipx/ipx_if.h>
+int (*ef_inputp)(struct ifnet*, struct ether_header *eh, struct mbuf *m);
+int (*ef_outputp)(struct ifnet *ifp, struct mbuf *m,
+ struct sockaddr *dst, short *tp);
+#endif
+
+#ifdef NS
+#include <netns/ns.h>
+#include <netns/ns_if.h>
+ushort ns_nettype;
+int ether_outputdebug = 0;
+int ether_inputdebug = 0;
+#endif
+
+#ifdef ISO
+#include <netiso/argo_debug.h>
+#include <netiso/iso.h>
+#include <netiso/iso_var.h>
+#include <netiso/iso_snpac.h>
+#endif
+
+/*#ifdef LLC
+#include <netccitt/dll.h>
+#include <netccitt/llc_var.h>
+#endif*/
+
+#if defined(LLC) && defined(CCITT)
+extern struct ifqueue pkintrq;
+#endif
+
+#ifdef NETATALK
+#include <netatalk/at.h>
+#include <netatalk/at_var.h>
+#include <netatalk/at_extern.h>
+
+#define llc_snap_org_code llc_un.type_snap.org_code
+#define llc_snap_ether_type llc_un.type_snap.ether_type
+
+extern u_char at_org_code[3];
+extern u_char aarp_org_code[3];
+#endif /* NETATALK */
+
+#ifdef BRIDGE
+#include <net/bridge.h>
+#endif
+
+#include "vlan.h"
+#if NVLAN > 0
+#include <net/if_vlan_var.h>
+#endif /* NVLAN > 0 */
+
+static int ether_resolvemulti __P((struct ifnet *, struct sockaddr **,
+ struct sockaddr *));
+u_char etherbroadcastaddr[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+#define senderr(e) do { error = (e); goto bad;} while (0)
+#define IFP2AC(IFP) ((struct arpcom *)IFP)
+
+#ifdef NETGRAPH
+#include <netgraph/ng_ether.h>
+#include <netgraph/ng_message.h>
+#include <netgraph/netgraph.h>
+
+static void ngether_init(void* ignored);
+static void ngether_send(struct arpcom *ac,
+ struct ether_header *eh, struct mbuf *m);
+static ng_constructor_t ngether_constructor;
+static ng_rcvmsg_t ngether_rcvmsg;
+static ng_shutdown_t ngether_rmnode;
+static ng_newhook_t ngether_newhook;
+static ng_connect_t ngether_connect;
+static ng_rcvdata_t ngether_rcvdata;
+static ng_disconnect_t ngether_disconnect;
+
+static struct ng_type typestruct = {
+ NG_VERSION,
+ NG_ETHER_NODE_TYPE,
+ NULL,
+ ngether_constructor,
+ ngether_rcvmsg,
+ ngether_rmnode,
+ ngether_newhook,
+ NULL,
+ ngether_connect,
+ ngether_rcvdata,
+ ngether_rcvdata,
+ ngether_disconnect,
+ NULL
+};
+
+#define AC2NG(AC) ((node_p)((AC)->ac_ng))
+#define NGEF_DIVERT NGF_TYPE1 /* all packets sent to netgraph */
+#endif /* NETGRAPH */
+
+/*
+ * Ethernet output routine.
+ * Encapsulate a packet of type family for the local net.
+ * Use trailer local net encapsulation if enough data in first
+ * packet leaves a multiple of 512 bytes of data in remainder.
+ * Assumes that ifp is actually pointer to arpcom structure.
+ */
+int
+ether_output(ifp, m, dst, rt0)
+ register struct ifnet *ifp;
+ struct mbuf *m;
+ struct sockaddr *dst;
+ struct rtentry *rt0;
+{
+ short type;
+ int s, error = 0, hdrcmplt = 0;
+ u_char esrc[6], edst[6];
+ register struct rtentry *rt;
+ register struct ether_header *eh;
+ int off, len = m->m_pkthdr.len, loop_copy = 0;
+ int hlen; /* link layer header lenght */
+ struct arpcom *ac = IFP2AC(ifp);
+
+ if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING))
+ senderr(ENETDOWN);
+ rt = rt0;
+ if (rt) {
+ if ((rt->rt_flags & RTF_UP) == 0) {
+ rt0 = rt = rtalloc1(dst, 1, 0UL);
+ if (rt0)
+ rt->rt_refcnt--;
+ else
+ senderr(EHOSTUNREACH);
+ }
+ if (rt->rt_flags & RTF_GATEWAY) {
+ if (rt->rt_gwroute == 0)
+ goto lookup;
+ if (((rt = rt->rt_gwroute)->rt_flags & RTF_UP) == 0) {
+ rtfree(rt); rt = rt0;
+ lookup: rt->rt_gwroute = rtalloc1(rt->rt_gateway, 1,
+ 0UL);
+ if ((rt = rt->rt_gwroute) == 0)
+ senderr(EHOSTUNREACH);
+ }
+ }
+ if (rt->rt_flags & RTF_REJECT)
+ if (rt->rt_rmx.rmx_expire == 0 ||
+ time_second < rt->rt_rmx.rmx_expire)
+ senderr(rt == rt0 ? EHOSTDOWN : EHOSTUNREACH);
+ }
+ hlen = ETHER_HDR_LEN;
+ switch (dst->sa_family) {
+#ifdef INET
+ case AF_INET:
+ if (!arpresolve(ac, rt, m, dst, edst, rt0))
+ return (0); /* if not yet resolved */
+ off = m->m_pkthdr.len - m->m_len;
+ type = htons(ETHERTYPE_IP);
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ if (!nd6_storelladdr(&ac->ac_if, rt, m, dst, (u_char *)edst)) {
+ /* this must be impossible, so we bark */
+ printf("nd6_storelladdr failed\n");
+ return(0);
+ }
+ off = m->m_pkthdr.len - m->m_len;
+ type = htons(ETHERTYPE_IPV6);
+ break;
+#endif
+#ifdef IPX
+ case AF_IPX:
+ if (ef_outputp) {
+ error = ef_outputp(ifp, m, dst, &type);
+ if (error < 0)
+ senderr(EPFNOSUPPORT);
+ if (error > 0)
+ type = htons(ETHERTYPE_IPX);
+ } else
+ type = htons(ETHERTYPE_IPX);
+ bcopy((caddr_t)&(((struct sockaddr_ipx *)dst)->sipx_addr.x_host),
+ (caddr_t)edst, sizeof (edst));
+ break;
+#endif
+#ifdef NETATALK
+ case AF_APPLETALK:
+ {
+ struct at_ifaddr *aa;
+
+ if ((aa = at_ifawithnet((struct sockaddr_at *)dst)) == NULL) {
+ goto bad;
+ }
+ if (!aarpresolve(ac, m, (struct sockaddr_at *)dst, edst))
+ return (0);
+ /*
+ * In the phase 2 case, need to prepend an mbuf for the llc header.
+ * Since we must preserve the value of m, which is passed to us by
+ * value, we m_copy() the first mbuf, and use it for our llc header.
+ */
+ if ( aa->aa_flags & AFA_PHASE2 ) {
+ struct llc llc;
+
+ M_PREPEND(m, sizeof(struct llc), M_WAIT);
+ len += sizeof(struct llc);
+ llc.llc_dsap = llc.llc_ssap = LLC_SNAP_LSAP;
+ llc.llc_control = LLC_UI;
+ bcopy(at_org_code, llc.llc_snap_org_code, sizeof(at_org_code));
+ llc.llc_snap_ether_type = htons( ETHERTYPE_AT );
+ bcopy(&llc, mtod(m, caddr_t), sizeof(struct llc));
+ type = htons(m->m_pkthdr.len);
+ hlen = sizeof(struct llc) + ETHER_HDR_LEN;
+ } else {
+ type = htons(ETHERTYPE_AT);
+ }
+ break;
+ }
+#endif NETATALK
+#ifdef NS
+ case AF_NS:
+ switch(ns_nettype){
+ default:
+ case 0x8137: /* Novell Ethernet_II Ethernet TYPE II */
+ type = 0x8137;
+ break;
+ case 0x0: /* Novell 802.3 */
+ type = htons( m->m_pkthdr.len);
+ break;
+ case 0xe0e0: /* Novell 802.2 and Token-Ring */
+ M_PREPEND(m, 3, M_WAIT);
+ type = htons( m->m_pkthdr.len);
+ cp = mtod(m, u_char *);
+ *cp++ = 0xE0;
+ *cp++ = 0xE0;
+ *cp++ = 0x03;
+ break;
+ }
+ bcopy((caddr_t)&(((struct sockaddr_ns *)dst)->sns_addr.x_host),
+ (caddr_t)edst, sizeof (edst));
+ /*
+ * XXX if ns_thishost is the same as the node's ethernet
+ * address then just the default code will catch this anyhow.
+ * So I'm not sure if this next clause should be here at all?
+ * [JRE]
+ */
+ if (!bcmp((caddr_t)edst, (caddr_t)&ns_thishost, sizeof(edst))){
+ m->m_pkthdr.rcvif = ifp;
+ schednetisr(NETISR_NS);
+ inq = &nsintrq;
+ s = splimp();
+ if (IF_QFULL(inq)) {
+ IF_DROP(inq);
+ m_freem(m);
+ } else
+ IF_ENQUEUE(inq, m);
+ splx(s);
+ return (error);
+ }
+ if (!bcmp((caddr_t)edst, (caddr_t)&ns_broadhost, sizeof(edst))){
+ m->m_flags |= M_BCAST;
+ }
+ break;
+#endif /* NS */
+#ifdef ISO
+ case AF_ISO: {
+ int snpalen;
+ struct llc *l;
+ register struct sockaddr_dl *sdl;
+
+ if (rt && (sdl = (struct sockaddr_dl *)rt->rt_gateway) &&
+ sdl->sdl_family == AF_LINK && sdl->sdl_alen > 0) {
+ bcopy(LLADDR(sdl), (caddr_t)edst, sizeof(edst));
+ } else if (error =
+ iso_snparesolve(ifp, (struct sockaddr_iso *)dst,
+ (char *)edst, &snpalen))
+ goto bad; /* Not Resolved */
+ /* If broadcasting on a simplex interface, loopback a copy */
+ if (*edst & 1)
+ m->m_flags |= (M_BCAST|M_MCAST);
+ M_PREPEND(m, 3, M_DONTWAIT);
+ if (m == NULL)
+ return (0);
+ type = htons(m->m_pkthdr.len);
+ l = mtod(m, struct llc *);
+ l->llc_dsap = l->llc_ssap = LLC_ISO_LSAP;
+ l->llc_control = LLC_UI;
+ len += 3;
+ IFDEBUG(D_ETHER)
+ int i;
+ printf("unoutput: sending pkt to: ");
+ for (i=0; i<6; i++)
+ printf("%x ", edst[i] & 0xff);
+ printf("\n");
+ ENDDEBUG
+ } break;
+#endif /* ISO */
+#ifdef LLC
+/* case AF_NSAP: */
+ case AF_CCITT: {
+ register struct sockaddr_dl *sdl =
+ (struct sockaddr_dl *) rt -> rt_gateway;
+
+ if (sdl && sdl->sdl_family == AF_LINK
+ && sdl->sdl_alen > 0) {
+ bcopy(LLADDR(sdl), (char *)edst, sizeof(edst));
+ } else goto bad; /* Not a link interface ? Funny ... */
+ if (*edst & 1)
+ loop_copy = 1;
+ type = htons(m->m_pkthdr.len);
+#ifdef LLC_DEBUG
+ {
+ int i;
+ register struct llc *l = mtod(m, struct llc *);
+
+ printf("ether_output: sending LLC2 pkt to: ");
+ for (i=0; i<6; i++)
+ printf("%x ", edst[i] & 0xff);
+ printf(" len 0x%x dsap 0x%x ssap 0x%x control 0x%x\n",
+ type & 0xff, l->llc_dsap & 0xff, l->llc_ssap &0xff,
+ l->llc_control & 0xff);
+
+ }
+#endif /* LLC_DEBUG */
+ } break;
+#endif /* LLC */
+
+ case pseudo_AF_HDRCMPLT:
+ hdrcmplt = 1;
+ eh = (struct ether_header *)dst->sa_data;
+ (void)memcpy(esrc, eh->ether_shost, sizeof (esrc));
+ /* FALLTHROUGH */
+
+ case AF_UNSPEC:
+ loop_copy = -1; /* if this is for us, don't do it */
+ eh = (struct ether_header *)dst->sa_data;
+ (void)memcpy(edst, eh->ether_dhost, sizeof (edst));
+ type = eh->ether_type;
+ break;
+
+ default:
+ printf("%s%d: can't handle af%d\n", ifp->if_name, ifp->if_unit,
+ dst->sa_family);
+ senderr(EAFNOSUPPORT);
+ }
+
+ /*
+ * Add local net header. If no space in first mbuf,
+ * allocate another.
+ */
+ M_PREPEND(m, sizeof (struct ether_header), M_DONTWAIT);
+ if (m == 0)
+ senderr(ENOBUFS);
+ eh = mtod(m, struct ether_header *);
+ (void)memcpy(&eh->ether_type, &type,
+ sizeof(eh->ether_type));
+ (void)memcpy(eh->ether_dhost, edst, sizeof (edst));
+ if (hdrcmplt)
+ (void)memcpy(eh->ether_shost, esrc,
+ sizeof(eh->ether_shost));
+ else
+ (void)memcpy(eh->ether_shost, ac->ac_enaddr,
+ sizeof(eh->ether_shost));
+
+ /*
+ * If a simplex interface, and the packet is being sent to our
+ * Ethernet address or a broadcast address, loopback a copy.
+ * XXX To make a simplex device behave exactly like a duplex
+ * device, we should copy in the case of sending to our own
+ * ethernet address (thus letting the original actually appear
+ * on the wire). However, we don't do that here for security
+ * reasons and compatibility with the original behavior.
+ */
+ if ((ifp->if_flags & IFF_SIMPLEX) && (loop_copy != -1)) {
+ if ((m->m_flags & M_BCAST) || (loop_copy > 0)) {
+ struct mbuf *n = m_copy(m, 0, (int)M_COPYALL);
+
+ (void) if_simloop(ifp, n, dst, hlen);
+ } else if (bcmp(eh->ether_dhost,
+ eh->ether_shost, ETHER_ADDR_LEN) == 0) {
+ (void) if_simloop(ifp, m, dst, hlen);
+ return (0); /* XXX */
+ }
+ }
+#ifdef BRIDGE
+ if (do_bridge) {
+ struct mbuf *m0 = m ;
+
+ if (m->m_pkthdr.rcvif)
+ m->m_pkthdr.rcvif = NULL ;
+ ifp = bridge_dst_lookup(m);
+ bdg_forward(&m0, ifp);
+ if (m0)
+ m_freem(m0);
+ return (0);
+ }
+#endif
+ s = splimp();
+ /*
+ * Queue message on interface, and start output if interface
+ * not yet active.
+ */
+ if (IF_QFULL(&ifp->if_snd)) {
+ IF_DROP(&ifp->if_snd);
+ splx(s);
+ senderr(ENOBUFS);
+ }
+ IF_ENQUEUE(&ifp->if_snd, m);
+ if ((ifp->if_flags & IFF_OACTIVE) == 0)
+ (*ifp->if_start)(ifp);
+ splx(s);
+ ifp->if_obytes += len + sizeof (struct ether_header);
+ if (m->m_flags & M_MCAST)
+ ifp->if_omcasts++;
+ return (error);
+
+bad:
+ if (m)
+ m_freem(m);
+ return (error);
+}
+
+/*
+ * Process a received Ethernet packet;
+ * the packet is in the mbuf chain m without
+ * the ether header, which is provided separately.
+ */
+void
+ether_input(ifp, eh, m)
+ struct ifnet *ifp;
+ register struct ether_header *eh;
+ struct mbuf *m;
+{
+ register struct ifqueue *inq;
+ u_short ether_type;
+ int s;
+#if defined (ISO) || defined (LLC) || defined(NETATALK)
+ register struct llc *l;
+#endif
+
+ if ((ifp->if_flags & IFF_UP) == 0) {
+ m_freem(m);
+ return;
+ }
+ ifp->if_ibytes += m->m_pkthdr.len + sizeof (*eh);
+ if (eh->ether_dhost[0] & 1) {
+ if (bcmp((caddr_t)etherbroadcastaddr, (caddr_t)eh->ether_dhost,
+ sizeof(etherbroadcastaddr)) == 0)
+ m->m_flags |= M_BCAST;
+ else
+ m->m_flags |= M_MCAST;
+ }
+ if (m->m_flags & (M_BCAST|M_MCAST))
+ ifp->if_imcasts++;
+
+ ether_type = ntohs(eh->ether_type);
+
+#ifdef NETGRAPH
+ {
+ struct arpcom *ac = IFP2AC(ifp);
+ if (AC2NG(ac) && (AC2NG(ac)->flags & NGEF_DIVERT)) {
+ ngether_send(ac, eh, m);
+ return;
+ }
+ }
+#endif /* NETGRAPH */
+
+#if NVLAN > 0
+ if (ether_type == vlan_proto) {
+ if (vlan_input(eh, m) < 0)
+ ifp->if_data.ifi_noproto++;
+ return;
+ }
+#endif /* NVLAN > 0 */
+
+ switch (ether_type) {
+#ifdef INET
+ case ETHERTYPE_IP:
+ if (ipflow_fastforward(m))
+ return;
+ schednetisr(NETISR_IP);
+ inq = &ipintrq;
+ break;
+
+ case ETHERTYPE_ARP:
+ schednetisr(NETISR_ARP);
+ inq = &arpintrq;
+ break;
+#endif
+#ifdef IPX
+ case ETHERTYPE_IPX:
+ if (ef_inputp && ef_inputp(ifp, eh, m) == 0)
+ return;
+ schednetisr(NETISR_IPX);
+ inq = &ipxintrq;
+ break;
+#endif
+#ifdef INET6
+ case ETHERTYPE_IPV6:
+ schednetisr(NETISR_IPV6);
+ inq = &ip6intrq;
+ break;
+#endif
+#ifdef NS
+ case 0x8137: /* Novell Ethernet_II Ethernet TYPE II */
+ schednetisr(NETISR_NS);
+ inq = &nsintrq;
+ break;
+
+#endif /* NS */
+#ifdef NETATALK
+ case ETHERTYPE_AT:
+ schednetisr(NETISR_ATALK);
+ inq = &atintrq1;
+ break;
+ case ETHERTYPE_AARP:
+ /* probably this should be done with a NETISR as well */
+ aarpinput(IFP2AC(ifp), m); /* XXX */
+ return;
+#endif NETATALK
+ default:
+#ifdef IPX
+ if (ef_inputp && ef_inputp(ifp, eh, m) == 0)
+ return;
+#endif /* IPX */
+#ifdef NS
+ checksum = mtod(m, ushort *);
+ /* Novell 802.3 */
+ if ((ether_type <= ETHERMTU) &&
+ ((*checksum == 0xffff) || (*checksum == 0xE0E0))){
+ if(*checksum == 0xE0E0) {
+ m->m_pkthdr.len -= 3;
+ m->m_len -= 3;
+ m->m_data += 3;
+ }
+ schednetisr(NETISR_NS);
+ inq = &nsintrq;
+ break;
+ }
+#endif /* NS */
+#if defined (ISO) || defined (LLC) || defined(NETATALK)
+ if (ether_type > ETHERMTU)
+ goto dropanyway;
+ l = mtod(m, struct llc *);
+ switch (l->llc_dsap) {
+#ifdef NETATALK
+ case LLC_SNAP_LSAP:
+ switch (l->llc_control) {
+ case LLC_UI:
+ if (l->llc_ssap != LLC_SNAP_LSAP)
+ goto dropanyway;
+
+ if (Bcmp(&(l->llc_snap_org_code)[0], at_org_code,
+ sizeof(at_org_code)) == 0 &&
+ ntohs(l->llc_snap_ether_type) == ETHERTYPE_AT) {
+ inq = &atintrq2;
+ m_adj( m, sizeof( struct llc ));
+ schednetisr(NETISR_ATALK);
+ break;
+ }
+
+ if (Bcmp(&(l->llc_snap_org_code)[0], aarp_org_code,
+ sizeof(aarp_org_code)) == 0 &&
+ ntohs(l->llc_snap_ether_type) == ETHERTYPE_AARP) {
+ m_adj( m, sizeof( struct llc ));
+ aarpinput(IFP2AC(ifp), m); /* XXX */
+ return;
+ }
+
+ default:
+ goto dropanyway;
+ }
+ break;
+#endif NETATALK
+#ifdef ISO
+ case LLC_ISO_LSAP:
+ switch (l->llc_control) {
+ case LLC_UI:
+ /* LLC_UI_P forbidden in class 1 service */
+ if ((l->llc_dsap == LLC_ISO_LSAP) &&
+ (l->llc_ssap == LLC_ISO_LSAP)) {
+ /* LSAP for ISO */
+ if (m->m_pkthdr.len > ether_type)
+ m_adj(m, ether_type - m->m_pkthdr.len);
+ m->m_data += 3; /* XXX */
+ m->m_len -= 3; /* XXX */
+ m->m_pkthdr.len -= 3; /* XXX */
+ M_PREPEND(m, sizeof *eh, M_DONTWAIT);
+ if (m == 0)
+ return;
+ *mtod(m, struct ether_header *) = *eh;
+ IFDEBUG(D_ETHER)
+ printf("clnp packet");
+ ENDDEBUG
+ schednetisr(NETISR_ISO);
+ inq = &clnlintrq;
+ break;
+ }
+ goto dropanyway;
+
+ case LLC_XID:
+ case LLC_XID_P:
+ if(m->m_len < 6)
+ goto dropanyway;
+ l->llc_window = 0;
+ l->llc_fid = 9;
+ l->llc_class = 1;
+ l->llc_dsap = l->llc_ssap = 0;
+ /* Fall through to */
+ case LLC_TEST:
+ case LLC_TEST_P:
+ {
+ struct sockaddr sa;
+ register struct ether_header *eh2;
+ int i;
+ u_char c = l->llc_dsap;
+
+ l->llc_dsap = l->llc_ssap;
+ l->llc_ssap = c;
+ if (m->m_flags & (M_BCAST | M_MCAST))
+ bcopy((caddr_t)ac->ac_enaddr,
+ (caddr_t)eh->ether_dhost, 6);
+ sa.sa_family = AF_UNSPEC;
+ sa.sa_len = sizeof(sa);
+ eh2 = (struct ether_header *)sa.sa_data;
+ for (i = 0; i < 6; i++) {
+ eh2->ether_shost[i] = c = eh->ether_dhost[i];
+ eh2->ether_dhost[i] =
+ eh->ether_dhost[i] = eh->ether_shost[i];
+ eh->ether_shost[i] = c;
+ }
+ ifp->if_output(ifp, m, &sa, NULL);
+ return;
+ }
+ default:
+ m_freem(m);
+ return;
+ }
+ break;
+#endif /* ISO */
+#ifdef LLC
+ case LLC_X25_LSAP:
+ {
+ if (m->m_pkthdr.len > ether_type)
+ m_adj(m, ether_type - m->m_pkthdr.len);
+ M_PREPEND(m, sizeof(struct sdl_hdr) , M_DONTWAIT);
+ if (m == 0)
+ return;
+ if ( !sdl_sethdrif(ifp, eh->ether_shost, LLC_X25_LSAP,
+ eh->ether_dhost, LLC_X25_LSAP, 6,
+ mtod(m, struct sdl_hdr *)))
+ panic("ETHER cons addr failure");
+ mtod(m, struct sdl_hdr *)->sdlhdr_len = ether_type;
+#ifdef LLC_DEBUG
+ printf("llc packet\n");
+#endif /* LLC_DEBUG */
+ schednetisr(NETISR_CCITT);
+ inq = &llcintrq;
+ break;
+ }
+#endif /* LLC */
+ dropanyway:
+ default:
+#ifdef NETGRAPH
+ ngether_send(IFP2AC(ifp), eh, m);
+#else /* NETGRAPH */
+ m_freem(m);
+#endif /* NETGRAPH */
+ return;
+ }
+#else /* ISO || LLC || NETATALK */
+#ifdef NETGRAPH
+ ngether_send(IFP2AC(ifp), eh, m);
+#else /* NETGRAPH */
+ m_freem(m);
+#endif /* NETGRAPH */
+ return;
+#endif /* ISO || LLC || NETATALK */
+ }
+
+ s = splimp();
+ if (IF_QFULL(inq)) {
+ IF_DROP(inq);
+ m_freem(m);
+ } else
+ IF_ENQUEUE(inq, m);
+ splx(s);
+}
+
+/*
+ * Perform common duties while attaching to interface list
+ */
+void
+ether_ifattach(ifp)
+ register struct ifnet *ifp;
+{
+ register struct ifaddr *ifa;
+ register struct sockaddr_dl *sdl;
+
+ ifp->if_type = IFT_ETHER;
+ ifp->if_addrlen = 6;
+ ifp->if_hdrlen = 14;
+ ifp->if_mtu = ETHERMTU;
+ ifp->if_resolvemulti = ether_resolvemulti;
+ if (ifp->if_baudrate == 0)
+ ifp->if_baudrate = 10000000;
+ ifa = ifnet_addrs[ifp->if_index - 1];
+ if (ifa == 0) {
+ printf("ether_ifattach: no lladdr!\n");
+ return;
+ }
+ sdl = (struct sockaddr_dl *)ifa->ifa_addr;
+ sdl->sdl_type = IFT_ETHER;
+ sdl->sdl_alen = ifp->if_addrlen;
+ bcopy((IFP2AC(ifp))->ac_enaddr, LLADDR(sdl), ifp->if_addrlen);
+#ifdef NETGRAPH
+ ngether_init(ifp);
+#endif /* NETGRAPH */
+#ifdef INET6
+ in6_ifattach_getifid(ifp);
+#endif
+}
+
+SYSCTL_DECL(_net_link);
+SYSCTL_NODE(_net_link, IFT_ETHER, ether, CTLFLAG_RW, 0, "Ethernet");
+
+int
+ether_ioctl(ifp, command, data)
+ struct ifnet *ifp;
+ int command;
+ caddr_t data;
+{
+ struct ifaddr *ifa = (struct ifaddr *) data;
+ struct ifreq *ifr = (struct ifreq *) data;
+ int error = 0;
+
+ switch (command) {
+ case SIOCSIFADDR:
+ ifp->if_flags |= IFF_UP;
+
+ switch (ifa->ifa_addr->sa_family) {
+#ifdef INET
+ case AF_INET:
+ ifp->if_init(ifp->if_softc); /* before arpwhohas */
+ arp_ifinit(IFP2AC(ifp), ifa);
+ break;
+#endif
+#ifdef IPX
+ /*
+ * XXX - This code is probably wrong
+ */
+ case AF_IPX:
+ {
+ register struct ipx_addr *ina = &(IA_SIPX(ifa)->sipx_addr);
+ struct arpcom *ac = IFP2AC(ifp);
+
+ if (ipx_nullhost(*ina))
+ ina->x_host =
+ *(union ipx_host *)
+ ac->ac_enaddr;
+ else {
+ bcopy((caddr_t) ina->x_host.c_host,
+ (caddr_t) ac->ac_enaddr,
+ sizeof(ac->ac_enaddr));
+ }
+
+ /*
+ * Set new address
+ */
+ ifp->if_init(ifp->if_softc);
+ break;
+ }
+#endif
+#ifdef NS
+ /*
+ * XXX - This code is probably wrong
+ */
+ case AF_NS:
+ {
+ register struct ns_addr *ina = &(IA_SNS(ifa)->sns_addr);
+ struct arpcom *ac = IFP2AC(ifp);
+
+ if (ns_nullhost(*ina))
+ ina->x_host =
+ *(union ns_host *) (ac->ac_enaddr);
+ else {
+ bcopy((caddr_t) ina->x_host.c_host,
+ (caddr_t) ac->ac_enaddr,
+ sizeof(ac->ac_enaddr));
+ }
+
+ /*
+ * Set new address
+ */
+ ifp->if_init(ifp->if_softc);
+ break;
+ }
+#endif
+ default:
+ ifp->if_init(ifp->if_softc);
+ break;
+ }
+ break;
+
+ case SIOCGIFADDR:
+ {
+ struct sockaddr *sa;
+
+ sa = (struct sockaddr *) & ifr->ifr_data;
+ bcopy(IFP2AC(ifp)->ac_enaddr,
+ (caddr_t) sa->sa_data, ETHER_ADDR_LEN);
+ }
+ break;
+
+ case SIOCSIFMTU:
+ /*
+ * Set the interface MTU.
+ */
+ if (ifr->ifr_mtu > ETHERMTU) {
+ error = EINVAL;
+ } else {
+ ifp->if_mtu = ifr->ifr_mtu;
+ }
+ break;
+ }
+ return (error);
+}
+
+int
+ether_resolvemulti(ifp, llsa, sa)
+ struct ifnet *ifp;
+ struct sockaddr **llsa;
+ struct sockaddr *sa;
+{
+ struct sockaddr_dl *sdl;
+ struct sockaddr_in *sin;
+#ifdef INET6
+ struct sockaddr_in6 *sin6;
+#endif
+ u_char *e_addr;
+
+ switch(sa->sa_family) {
+ case AF_LINK:
+ /*
+ * No mapping needed. Just check that it's a valid MC address.
+ */
+ sdl = (struct sockaddr_dl *)sa;
+ e_addr = LLADDR(sdl);
+ if ((e_addr[0] & 1) != 1)
+ return EADDRNOTAVAIL;
+ *llsa = 0;
+ return 0;
+
+#ifdef INET
+ case AF_INET:
+ sin = (struct sockaddr_in *)sa;
+ if (!IN_MULTICAST(ntohl(sin->sin_addr.s_addr)))
+ return EADDRNOTAVAIL;
+ MALLOC(sdl, struct sockaddr_dl *, sizeof *sdl, M_IFMADDR,
+ M_WAITOK);
+ sdl->sdl_len = sizeof *sdl;
+ sdl->sdl_family = AF_LINK;
+ sdl->sdl_index = ifp->if_index;
+ sdl->sdl_type = IFT_ETHER;
+ sdl->sdl_nlen = 0;
+ sdl->sdl_alen = ETHER_ADDR_LEN;
+ sdl->sdl_slen = 0;
+ e_addr = LLADDR(sdl);
+ ETHER_MAP_IP_MULTICAST(&sin->sin_addr, e_addr);
+ *llsa = (struct sockaddr *)sdl;
+ return 0;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ sin6 = (struct sockaddr_in6 *)sa;
+ if (!IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr))
+ return EADDRNOTAVAIL;
+ MALLOC(sdl, struct sockaddr_dl *, sizeof *sdl, M_IFMADDR,
+ M_WAITOK);
+ sdl->sdl_len = sizeof *sdl;
+ sdl->sdl_family = AF_LINK;
+ sdl->sdl_index = ifp->if_index;
+ sdl->sdl_type = IFT_ETHER;
+ sdl->sdl_nlen = 0;
+ sdl->sdl_alen = ETHER_ADDR_LEN;
+ sdl->sdl_slen = 0;
+ e_addr = LLADDR(sdl);
+ ETHER_MAP_IPV6_MULTICAST(&sin6->sin6_addr, e_addr);
+ *llsa = (struct sockaddr *)sdl;
+ return 0;
+#endif
+
+ default:
+ /*
+ * Well, the text isn't quite right, but it's the name
+ * that counts...
+ */
+ return EAFNOSUPPORT;
+ }
+}
+
+#ifdef NETGRAPH
+
+/***********************************************************************
+ * This section contains the methods for the Netgraph interface
+ ***********************************************************************/
+/* It's Ascii-art time!
+ * The ifnet is the first part of the arpcom which must be
+ * the first part of the device's softc.. yuk.
+ *
+ * +--------------------------+-----+---------+
+ * | struct ifnet (*ifp) | | |
+ * | | | |
+ * +--------------------------+ | |
+ * +--|[ac_ng] struct arpcom (*ac) | |
+ * | +--------------------------------+ |
+ * | | struct softc (*ifp->if_softc) (device) |
+ * | +------------------------------------------+
+ * | ^
+ * AC2NG() |
+ * | v
+ * | +----------------------+
+ * | | [private] [flags] |
+ * +------>| struct ng_node |
+ * | [hooks] | ** we only allow one hook
+ * +----------------------+
+ * ^
+ * |
+ * v
+ * +-------------+
+ * | [node] |
+ * | hook |
+ * | [private]|-- *unused*
+ * +-------------+
+ */
+
+/*
+ * called during interface attaching
+ */
+static void
+ngether_init(void *ifpvoid)
+{
+ struct ifnet *ifp = ifpvoid;
+ struct arpcom *ac = IFP2AC(ifp);
+ static int ngether_done_init;
+ char namebuf[32];
+ node_p node;
+
+ /*
+ * we have found a node, make sure our 'type' is availabe.
+ */
+ if (ngether_done_init == 0) {
+ if (ng_newtype(&typestruct)) {
+ printf("ngether install failed\n");
+ return;
+ }
+ ngether_done_init = 1;
+ }
+ if (ng_make_node_common(&typestruct, &node) != 0)
+ return;
+ ac->ac_ng = node;
+ node->private = ifp;
+ sprintf(namebuf, "%s%d", ifp->if_name, ifp->if_unit);
+ ng_name_node(AC2NG(ac), namebuf);
+}
+
+/*
+ * It is not possible or allowable to create a node of this type.
+ * If the hardware exists, it will already have created it.
+ */
+static int
+ngether_constructor(node_p *nodep)
+{
+ return (EINVAL);
+}
+
+/*
+ * Give our ok for a hook to be added...
+ *
+ * Allow one hook at a time (rawdata).
+ * It can eiteh rdivert everything or only unclaimed packets.
+ */
+static int
+ngether_newhook(node_p node, hook_p hook, const char *name)
+{
+
+ /* check if there is already a hook */
+ if (LIST_FIRST(&(node->hooks)))
+ return(EISCONN);
+ /*
+ * Check for which mode hook we want.
+ */
+ if (strcmp(name, NG_ETHER_HOOK_ORPHAN) != 0) {
+ if (strcmp(name, NG_ETHER_HOOK_DIVERT) != 0) {
+ return (EINVAL);
+ }
+ node->flags |= NGEF_DIVERT;
+ } else {
+ node->flags &= ~NGEF_DIVERT;
+ }
+ return (0);
+}
+
+/*
+ * incoming messages.
+ * Just respond to the generic TEXT_STATUS message
+ */
+static int
+ngether_rcvmsg(node_p node,
+ struct ng_mesg *msg, const char *retaddr, struct ng_mesg **resp)
+{
+ struct ifnet *ifp;
+ int error = 0;
+
+ ifp = node->private;
+ switch (msg->header.typecookie) {
+ case NGM_ETHER_COOKIE:
+ error = EINVAL;
+ break;
+ case NGM_GENERIC_COOKIE:
+ switch(msg->header.cmd) {
+ case NGM_TEXT_STATUS: {
+ char *arg;
+ int pos = 0;
+ int resplen = sizeof(struct ng_mesg) + 512;
+ MALLOC(*resp, struct ng_mesg *, resplen,
+ M_NETGRAPH, M_NOWAIT);
+ if (*resp == NULL) {
+ error = ENOMEM;
+ break;
+ }
+ bzero(*resp, resplen);
+ arg = (*resp)->data;
+
+ /*
+ * Put in the throughput information.
+ */
+ pos = sprintf(arg, "%ld bytes in, %ld bytes out\n",
+ ifp->if_ibytes, ifp->if_obytes);
+ pos += sprintf(arg + pos,
+ "%ld output errors\n",
+ ifp->if_oerrors);
+ pos += sprintf(arg + pos,
+ "ierrors = %ld\n",
+ ifp->if_ierrors);
+
+ (*resp)->header.version = NG_VERSION;
+ (*resp)->header.arglen = strlen(arg) + 1;
+ (*resp)->header.token = msg->header.token;
+ (*resp)->header.typecookie = NGM_ETHER_COOKIE;
+ (*resp)->header.cmd = msg->header.cmd;
+ strncpy((*resp)->header.cmdstr, "status",
+ NG_CMDSTRLEN);
+ }
+ break;
+ default:
+ error = EINVAL;
+ break;
+ }
+ break;
+ default:
+ error = EINVAL;
+ break;
+ }
+ free(msg, M_NETGRAPH);
+ return (error);
+}
+
+/*
+ * Receive a completed ethernet packet.
+ * Queue it for output.
+ */
+static int
+ngether_rcvdata(hook_p hook, struct mbuf *m, meta_p meta)
+{
+ struct ifnet *ifp;
+ int error = 0;
+ int s;
+ struct ether_header *eh;
+
+ ifp = hook->node->private;
+
+ if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING))
+ senderr(ENETDOWN);
+ /* drop in the MAC address */
+ eh = mtod(m, struct ether_header *);
+ bcopy(IFP2AC(ifp)->ac_enaddr, eh->ether_shost, 6);
+ /*
+ * If a simplex interface, and the packet is being sent to our
+ * Ethernet address or a broadcast address, loopback a copy.
+ * XXX To make a simplex device behave exactly like a duplex
+ * device, we should copy in the case of sending to our own
+ * ethernet address (thus letting the original actually appear
+ * on the wire). However, we don't do that here for security
+ * reasons and compatibility with the original behavior.
+ */
+ if (ifp->if_flags & IFF_SIMPLEX) {
+ if (m->m_flags & M_BCAST) {
+ struct mbuf *n = m_copy(m, 0, (int)M_COPYALL);
+
+ ng_queue_data(hook, n, meta);
+ } else if (bcmp(eh->ether_dhost,
+ eh->ether_shost, ETHER_ADDR_LEN) == 0) {
+ ng_queue_data(hook, m, meta);
+ return (0); /* XXX */
+ }
+ }
+ s = splimp();
+ /*
+ * Queue message on interface, and start output if interface
+ * not yet active.
+ * XXX if we lookead at the priority in the meta data we could
+ * queue high priority items at the head.
+ */
+ if (IF_QFULL(&ifp->if_snd)) {
+ IF_DROP(&ifp->if_snd);
+ splx(s);
+ senderr(ENOBUFS);
+ }
+ IF_ENQUEUE(&ifp->if_snd, m);
+ if ((ifp->if_flags & IFF_OACTIVE) == 0)
+ (*ifp->if_start)(ifp);
+ splx(s);
+ ifp->if_obytes += m->m_pkthdr.len;
+ if (m->m_flags & M_MCAST)
+ ifp->if_omcasts++;
+ return (error);
+
+bad:
+ NG_FREE_DATA(m, meta);
+ return (error);
+}
+
+/*
+ * pass an mbuf out to the connected hook
+ * More complicated than just an m_prepend, as it tries to save later nodes
+ * from needing to do lots of m_pullups.
+ */
+static void
+ngether_send(struct arpcom *ac, struct ether_header *eh, struct mbuf *m)
+{
+ int room;
+ node_p node = AC2NG(ac);
+ struct ether_header *eh2;
+
+ if (node && LIST_FIRST(&(node->hooks))) {
+ /*
+ * Possibly the header is already on the front,
+ */
+ eh2 = mtod(m, struct ether_header *) - 1;
+ if ( eh == eh2) {
+ /*
+ * This is the case so just move the markers back to
+ * re-include it. We lucked out.
+ * This allows us to avoid a yucky m_pullup
+ * in later nodes if it works.
+ */
+ m->m_len += sizeof(*eh);
+ m->m_data -= sizeof(*eh);
+ m->m_pkthdr.len += sizeof(*eh);
+ } else {
+ /*
+ * Alternatively there may be room even though
+ * it is stored somewhere else. If so, copy it in.
+ * This only safe because we KNOW that this packet has
+ * just been generated by an ethernet card, so there
+ * are no aliases to the buffer. (unlike in outgoing
+ * packets).
+ * Nearly all ethernet cards will end up producing mbufs
+ * that fall into these cases. So we are not optimising
+ * contorted cases.
+ */
+
+ if (m->m_flags & M_EXT) {
+ room = (mtod(m, caddr_t) - m->m_ext.ext_buf);
+ if (room > m->m_ext.ext_size) /* garbage */
+ room = 0; /* fail immediatly */
+ } else {
+ room = (mtod(m, caddr_t) - m->m_pktdat);
+ }
+ if (room > sizeof (*eh)) {
+ /* we have room, just copy it and adjust */
+ m->m_len += sizeof(*eh);
+ m->m_data -= sizeof(*eh);
+ m->m_pkthdr.len += sizeof(*eh);
+ } else {
+ /*
+ * Doing anything more is likely to get more
+ * expensive than it's worth..
+ * it's probable that everything else is in one
+ * big lump. The next node will do an m_pullup()
+ * for exactly the amount of data it needs and
+ * hopefully everything after that will not
+ * need one. So let's just use M_PREPEND.
+ */
+ M_PREPEND(m, sizeof (*eh), M_DONTWAIT);
+ if (m == NULL)
+ return;
+ }
+ bcopy ((caddr_t)eh, mtod(m, struct ether_header *),
+ sizeof(*eh));
+ }
+ ng_queue_data(LIST_FIRST(&(node->hooks)), m, NULL);
+ } else {
+ m_freem(m);
+ }
+}
+
+/*
+ * do local shutdown processing..
+ * This node will refuse to go away, unless the hardware says to..
+ * don't unref the node, or remove our name. just clear our links up.
+ */
+static int
+ngether_rmnode(node_p node)
+{
+ ng_cutlinks(node);
+ node->flags &= ~NG_INVALID; /* bounce back to life */
+ return (0);
+}
+
+/* already linked */
+static int
+ngether_connect(hook_p hook)
+{
+ /* be really amiable and just say "YUP that's OK by me! " */
+ return (0);
+}
+
+/*
+ * notify on hook disconnection (destruction)
+ *
+ * For this type, removal of the last lins no effect. The interface can run
+ * independently.
+ * Since we have no per-hook information, this is rather simple.
+ */
+static int
+ngether_disconnect(hook_p hook)
+{
+ hook->node->flags &= ~NGEF_DIVERT;
+ return (0);
+}
+#endif /* NETGRAPH */
+
+/********************************** END *************************************/
diff --git a/sys/net/if_faith.c b/sys/net/if_faith.c
new file mode 100644
index 0000000..0e21af7
--- /dev/null
+++ b/sys/net/if_faith.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+/*
+ * derived from
+ * @(#)if_loop.c 8.1 (Berkeley) 6/10/93
+ * Id: if_loop.c,v 1.22 1996/06/19 16:24:10 wollman Exp
+ */
+
+/*
+ * Loopback interface driver for protocol testing and timing.
+ */
+
+#include "faith.h"
+#if NFAITH > 0
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/mbuf.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+
+#include <net/if.h>
+#include <net/if_types.h>
+#include <net/netisr.h>
+#include <net/route.h>
+#include <net/bpf.h>
+
+extern int loioctl __P((struct ifnet *, u_long, caddr_t));
+extern int looutput __P((struct ifnet *ifp,
+ struct mbuf *m, struct sockaddr *dst, struct rtentry *rt));
+
+void faithattach __P((void *));
+PSEUDO_SET(faithattach, if_faith);
+static struct ifnet faithif[NFAITH];
+
+#define FAITHMTU 1500
+
+/* ARGSUSED */
+void
+faithattach(faith)
+ void *faith;
+{
+ register struct ifnet *ifp;
+ register int i;
+
+ for (i = 0; i < NFAITH; i++) {
+ ifp = &faithif[i];
+ bzero(ifp, sizeof(faithif[i]));
+ ifp->if_name = "faith";
+ ifp->if_unit = i;
+ ifp->if_mtu = FAITHMTU;
+ /* Change to BROADCAST experimentaly to announce its prefix. */
+ ifp->if_flags = /* IFF_LOOPBACK */ IFF_BROADCAST | IFF_MULTICAST;
+ ifp->if_ioctl = loioctl;
+ ifp->if_output = looutput;
+ ifp->if_type = IFT_FAITH;
+ ifp->if_snd.ifq_maxlen = ifqmaxlen;
+ ifp->if_hdrlen = 0;
+ ifp->if_addrlen = 0;
+ if_attach(ifp);
+ bpfattach(ifp, DLT_NULL, sizeof(u_int));
+ }
+}
+#endif /* NFAITH > 0 */
diff --git a/sys/net/if_fddisubr.c b/sys/net/if_fddisubr.c
new file mode 100644
index 0000000..fc9a75c
--- /dev/null
+++ b/sys/net/if_fddisubr.c
@@ -0,0 +1,730 @@
+/*
+ * Copyright (c) 1995, 1996
+ * Matt Thomas <matt@3am-software.com>. All rights reserved.
+ * Copyright (c) 1982, 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: if_ethersubr.c,v 1.5 1994/12/13 22:31:45 wollman Exp
+ * $FreeBSD$
+ */
+
+#include "opt_atalk.h"
+#include "opt_inet.h"
+#include "opt_inet6.h"
+#include "opt_ipx.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/mbuf.h>
+#include <sys/socket.h>
+
+#include <net/if.h>
+#include <net/netisr.h>
+#include <net/route.h>
+#include <net/if_llc.h>
+#include <net/if_dl.h>
+#include <net/if_types.h>
+
+#if defined(INET) || defined(INET6)
+#include <netinet/in.h>
+#include <netinet/in_var.h>
+#include <netinet/if_ether.h>
+#endif
+#ifdef INET6
+#include <netinet6/nd6.h>
+#endif
+#if defined(__FreeBSD__)
+#include <netinet/if_fddi.h>
+#else
+#include <net/if_fddi.h>
+#endif
+
+#ifdef IPX
+#include <netipx/ipx.h>
+#include <netipx/ipx_if.h>
+#endif
+
+#ifdef NS
+#include <netns/ns.h>
+#include <netns/ns_if.h>
+#endif
+
+#ifdef DECNET
+#include <netdnet/dn.h>
+#endif
+
+#ifdef ISO
+#include <netiso/argo_debug.h>
+#include <netiso/iso.h>
+#include <netiso/iso_var.h>
+#include <netiso/iso_snpac.h>
+#endif
+
+#ifdef LLC
+#include <netccitt/dll.h>
+#include <netccitt/llc_var.h>
+#endif
+
+#ifdef NETATALK
+#include <netatalk/at.h>
+#include <netatalk/at_var.h>
+#include <netatalk/at_extern.h>
+
+#define llc_snap_org_code llc_un.type_snap.org_code
+#define llc_snap_ether_type llc_un.type_snap.ether_type
+
+extern u_char at_org_code[ 3 ];
+extern u_char aarp_org_code[ 3 ];
+#endif /* NETATALK */
+
+#if defined(LLC) && defined(CCITT)
+extern struct ifqueue pkintrq;
+#endif
+
+#define senderr(e) { error = (e); goto bad;}
+
+/*
+ * This really should be defined in if_llc.h but in case it isn't.
+ */
+#ifndef llc_snap
+#define llc_snap llc_un.type_snap
+#endif
+
+#if defined(__bsdi__) || defined(__NetBSD__)
+#define RTALLOC1(a, b) rtalloc1(a, b)
+#define ARPRESOLVE(a, b, c, d, e, f) arpresolve(a, b, c, d, e)
+#elif defined(__FreeBSD__)
+#define RTALLOC1(a, b) rtalloc1(a, b, 0UL)
+#define ARPRESOLVE(a, b, c, d, e, f) arpresolve(a, b, c, d, e, f)
+#endif
+/*
+ * FDDI output routine.
+ * Encapsulate a packet of type family for the local net.
+ * Use trailer local net encapsulation if enough data in first
+ * packet leaves a multiple of 512 bytes of data in remainder.
+ * Assumes that ifp is actually pointer to arpcom structure.
+ */
+int
+fddi_output(ifp, m, dst, rt0)
+ register struct ifnet *ifp;
+ struct mbuf *m;
+ struct sockaddr *dst;
+ struct rtentry *rt0;
+{
+ u_int16_t type;
+ int s, loop_copy = 0, error = 0, hdrcmplt = 0;
+ u_char esrc[6], edst[6];
+ register struct rtentry *rt;
+ register struct fddi_header *fh;
+ struct arpcom *ac = (struct arpcom *)ifp;
+
+ if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING))
+ senderr(ENETDOWN);
+ getmicrotime(&ifp->if_lastchange);
+#if !defined(__bsdi__) || _BSDI_VERSION >= 199401
+ if ((rt = rt0) != NULL) {
+ if ((rt->rt_flags & RTF_UP) == 0) {
+ if ((rt0 = rt = RTALLOC1(dst, 1)) != NULL)
+ rt->rt_refcnt--;
+ else
+ senderr(EHOSTUNREACH);
+ }
+ if (rt->rt_flags & RTF_GATEWAY) {
+ if (rt->rt_gwroute == 0)
+ goto lookup;
+ if (((rt = rt->rt_gwroute)->rt_flags & RTF_UP) == 0) {
+ rtfree(rt); rt = rt0;
+ lookup: rt->rt_gwroute = RTALLOC1(rt->rt_gateway, 1);
+ if ((rt = rt->rt_gwroute) == 0)
+ senderr(EHOSTUNREACH);
+ }
+ }
+ if (rt->rt_flags & RTF_REJECT)
+ if (rt->rt_rmx.rmx_expire == 0 ||
+ time_second < rt->rt_rmx.rmx_expire)
+ senderr(rt == rt0 ? EHOSTDOWN : EHOSTUNREACH);
+ }
+#endif
+ switch (dst->sa_family) {
+
+#ifdef INET
+ case AF_INET: {
+#if !defined(__bsdi__) || _BSDI_VERSION >= 199401
+ if (!ARPRESOLVE(ac, rt, m, dst, edst, rt0))
+ return (0); /* if not yet resolved */
+#else
+ int usetrailers;
+ if (!arpresolve(ac, m, &((struct sockaddr_in *)dst)->sin_addr, edst, &usetrailers))
+ return (0); /* if not yet resolved */
+#endif
+ type = htons(ETHERTYPE_IP);
+ break;
+ }
+#endif
+#ifdef INET6
+ case AF_INET6:
+ if (!nd6_storelladdr(&ac->ac_if, rt, m, dst, (u_char *)edst)) {
+ /* this must be impossible, so we bark */
+ printf("nd6_storelladdr failed\n");
+ return(0);
+ }
+ type = htons(ETHERTYPE_IPV6);
+ break;
+#endif
+#ifdef IPX
+ case AF_IPX:
+ type = htons(ETHERTYPE_IPX);
+ bcopy((caddr_t)&(((struct sockaddr_ipx *)dst)->sipx_addr.x_host),
+ (caddr_t)edst, sizeof (edst));
+ break;
+#endif
+#ifdef NETATALK
+ case AF_APPLETALK: {
+ struct at_ifaddr *aa;
+ if (!aarpresolve(ac, m, (struct sockaddr_at *)dst, edst))
+ return (0);
+ /*
+ * ifaddr is the first thing in at_ifaddr
+ */
+ if ((aa = at_ifawithnet( (struct sockaddr_at *)dst)) == 0)
+ goto bad;
+
+ /*
+ * In the phase 2 case, we need to prepend an mbuf for the llc header.
+ * Since we must preserve the value of m, which is passed to us by
+ * value, we m_copy() the first mbuf, and use it for our llc header.
+ */
+ if (aa->aa_flags & AFA_PHASE2) {
+ struct llc llc;
+
+ M_PREPEND(m, sizeof(struct llc), M_WAIT);
+ if (m == 0)
+ senderr(ENOBUFS);
+ llc.llc_dsap = llc.llc_ssap = LLC_SNAP_LSAP;
+ llc.llc_control = LLC_UI;
+ bcopy(at_org_code, llc.llc_snap_org_code, sizeof(at_org_code));
+ llc.llc_snap_ether_type = htons(ETHERTYPE_AT);
+ bcopy(&llc, mtod(m, caddr_t), sizeof(struct llc));
+ type = 0;
+ } else {
+ type = htons(ETHERTYPE_AT);
+ }
+ break;
+ }
+#endif /* NETATALK */
+#ifdef NS
+ case AF_NS:
+ type = htons(ETHERTYPE_NS);
+ bcopy((caddr_t)&(((struct sockaddr_ns *)dst)->sns_addr.x_host),
+ (caddr_t)edst, sizeof (edst));
+ break;
+#endif
+#ifdef ISO
+ case AF_ISO: {
+ int snpalen;
+ struct llc *l;
+ register struct sockaddr_dl *sdl;
+
+ if (rt && (sdl = (struct sockaddr_dl *)rt->rt_gateway) &&
+ sdl->sdl_family == AF_LINK && sdl->sdl_alen > 0) {
+ bcopy(LLADDR(sdl), (caddr_t)edst, sizeof(edst));
+ } else if (error =
+ iso_snparesolve(ifp, (struct sockaddr_iso *)dst,
+ (char *)edst, &snpalen))
+ goto bad; /* Not Resolved */
+ /* If broadcasting on a simplex interface, loopback a copy */
+ if (*edst & 1)
+ m->m_flags |= (M_BCAST|M_MCAST);
+ M_PREPEND(m, 3, M_DONTWAIT);
+ if (m == NULL)
+ return (0);
+ type = 0;
+ l = mtod(m, struct llc *);
+ l->llc_dsap = l->llc_ssap = LLC_ISO_LSAP;
+ l->llc_control = LLC_UI;
+ IFDEBUG(D_ETHER)
+ int i;
+ printf("unoutput: sending pkt to: ");
+ for (i=0; i<6; i++)
+ printf("%x ", edst[i] & 0xff);
+ printf("\n");
+ ENDDEBUG
+ } break;
+#endif /* ISO */
+#ifdef LLC
+/* case AF_NSAP: */
+ case AF_CCITT: {
+ register struct sockaddr_dl *sdl =
+ (struct sockaddr_dl *) rt -> rt_gateway;
+
+ if (sdl && sdl->sdl_family != AF_LINK && sdl->sdl_alen <= 0)
+ goto bad; /* Not a link interface ? Funny ... */
+ bcopy(LLADDR(sdl), (char *)edst, sizeof(edst));
+ if (*edst & 1)
+ loop_copy = 1;
+ type = 0;
+#ifdef LLC_DEBUG
+ {
+ int i;
+ register struct llc *l = mtod(m, struct llc *);
+
+ printf("fddi_output: sending LLC2 pkt to: ");
+ for (i=0; i<6; i++)
+ printf("%x ", edst[i] & 0xff);
+ printf(" len 0x%x dsap 0x%x ssap 0x%x control 0x%x\n",
+ type & 0xff, l->llc_dsap & 0xff, l->llc_ssap &0xff,
+ l->llc_control & 0xff);
+
+ }
+#endif /* LLC_DEBUG */
+ } break;
+#endif /* LLC */
+
+ case pseudo_AF_HDRCMPLT:
+ {
+ struct ether_header *eh;
+ hdrcmplt = 1;
+ eh = (struct ether_header *)dst->sa_data;
+ (void)memcpy((caddr_t)esrc, (caddr_t)eh->ether_shost, sizeof (esrc));
+ /* FALLTHROUGH */
+ }
+
+ case AF_UNSPEC:
+ {
+ struct ether_header *eh;
+ loop_copy = -1;
+ eh = (struct ether_header *)dst->sa_data;
+ (void)memcpy((caddr_t)edst, (caddr_t)eh->ether_dhost, sizeof (edst));
+ if (*edst & 1)
+ m->m_flags |= (M_BCAST|M_MCAST);
+ type = eh->ether_type;
+ break;
+ }
+
+ case AF_IMPLINK:
+ {
+ fh = mtod(m, struct fddi_header *);
+ error = EPROTONOSUPPORT;
+ switch (fh->fddi_fc & (FDDIFC_C|FDDIFC_L|FDDIFC_F)) {
+ case FDDIFC_LLC_ASYNC: {
+ /* legal priorities are 0 through 7 */
+ if ((fh->fddi_fc & FDDIFC_Z) > 7)
+ goto bad;
+ break;
+ }
+ case FDDIFC_LLC_SYNC: {
+ /* FDDIFC_Z bits reserved, must be zero */
+ if (fh->fddi_fc & FDDIFC_Z)
+ goto bad;
+ break;
+ }
+ case FDDIFC_SMT: {
+ /* FDDIFC_Z bits must be non zero */
+ if ((fh->fddi_fc & FDDIFC_Z) == 0)
+ goto bad;
+ break;
+ }
+ default: {
+ /* anything else is too dangerous */
+ goto bad;
+ }
+ }
+ error = 0;
+ if (fh->fddi_dhost[0] & 1)
+ m->m_flags |= (M_BCAST|M_MCAST);
+ goto queue_it;
+ }
+ default:
+ printf("%s%d: can't handle af%d\n", ifp->if_name, ifp->if_unit,
+ dst->sa_family);
+ senderr(EAFNOSUPPORT);
+ }
+
+ if (type != 0) {
+ register struct llc *l;
+ M_PREPEND(m, sizeof (struct llc), M_DONTWAIT);
+ if (m == 0)
+ senderr(ENOBUFS);
+ l = mtod(m, struct llc *);
+ l->llc_control = LLC_UI;
+ l->llc_dsap = l->llc_ssap = LLC_SNAP_LSAP;
+ l->llc_snap.org_code[0] = l->llc_snap.org_code[1] = l->llc_snap.org_code[2] = 0;
+ (void)memcpy((caddr_t) &l->llc_snap.ether_type, (caddr_t) &type,
+ sizeof(u_int16_t));
+ }
+
+ /*
+ * Add local net header. If no space in first mbuf,
+ * allocate another.
+ */
+ M_PREPEND(m, sizeof (struct fddi_header), M_DONTWAIT);
+ if (m == 0)
+ senderr(ENOBUFS);
+ fh = mtod(m, struct fddi_header *);
+ fh->fddi_fc = FDDIFC_LLC_ASYNC|FDDIFC_LLC_PRIO4;
+ (void)memcpy((caddr_t)fh->fddi_dhost, (caddr_t)edst, sizeof (edst));
+ queue_it:
+ if (hdrcmplt)
+ (void)memcpy((caddr_t)fh->fddi_shost, (caddr_t)esrc,
+ sizeof(fh->fddi_shost));
+ else
+ (void)memcpy((caddr_t)fh->fddi_shost, (caddr_t)ac->ac_enaddr,
+ sizeof(fh->fddi_shost));
+ /*
+ * If a simplex interface, and the packet is being sent to our
+ * Ethernet address or a broadcast address, loopback a copy.
+ * XXX To make a simplex device behave exactly like a duplex
+ * device, we should copy in the case of sending to our own
+ * ethernet address (thus letting the original actually appear
+ * on the wire). However, we don't do that here for security
+ * reasons and compatibility with the original behavior.
+ */
+ if ((ifp->if_flags & IFF_SIMPLEX) &&
+ (loop_copy != -1)) {
+ if ((m->m_flags & M_BCAST) || loop_copy) {
+ struct mbuf *n = m_copy(m, 0, (int)M_COPYALL);
+
+ (void) if_simloop(ifp,
+ n, dst, sizeof(struct fddi_header));
+ } else if (bcmp(fh->fddi_dhost,
+ fh->fddi_shost, sizeof(fh->fddi_shost)) == 0) {
+ (void) if_simloop(ifp,
+ m, dst, sizeof(struct fddi_header));
+ return(0); /* XXX */
+ }
+ }
+
+ s = splimp();
+ /*
+ * Queue message on interface, and start output if interface
+ * not yet active.
+ */
+ if (IF_QFULL(&ifp->if_snd)) {
+ IF_DROP(&ifp->if_snd);
+ splx(s);
+ senderr(ENOBUFS);
+ }
+ ifp->if_obytes += m->m_pkthdr.len;
+ IF_ENQUEUE(&ifp->if_snd, m);
+ if ((ifp->if_flags & IFF_OACTIVE) == 0)
+ (*ifp->if_start)(ifp);
+ splx(s);
+ if (m->m_flags & M_MCAST)
+ ifp->if_omcasts++;
+ return (error);
+
+bad:
+ if (m)
+ m_freem(m);
+ return (error);
+}
+
+/*
+ * Process a received FDDI packet;
+ * the packet is in the mbuf chain m without
+ * the fddi header, which is provided separately.
+ */
+void
+fddi_input(ifp, fh, m)
+ struct ifnet *ifp;
+ register struct fddi_header *fh;
+ struct mbuf *m;
+{
+ register struct ifqueue *inq;
+ register struct llc *l;
+ int s;
+
+ if ((ifp->if_flags & IFF_UP) == 0) {
+ m_freem(m);
+ return;
+ }
+ getmicrotime(&ifp->if_lastchange);
+ ifp->if_ibytes += m->m_pkthdr.len + sizeof (*fh);
+ if (fh->fddi_dhost[0] & 1) {
+ if (bcmp((caddr_t)fddibroadcastaddr, (caddr_t)fh->fddi_dhost,
+ sizeof(fddibroadcastaddr)) == 0)
+ m->m_flags |= M_BCAST;
+ else
+ m->m_flags |= M_MCAST;
+ ifp->if_imcasts++;
+ } else if ((ifp->if_flags & IFF_PROMISC)
+ && bcmp(((struct arpcom *)ifp)->ac_enaddr, (caddr_t)fh->fddi_dhost,
+ sizeof(fh->fddi_dhost)) != 0) {
+ m_freem(m);
+ return;
+ }
+
+#ifdef M_LINK0
+ /*
+ * If this has a LLC priority of 0, then mark it so upper
+ * layers have a hint that it really came via a FDDI/Ethernet
+ * bridge.
+ */
+ if ((fh->fddi_fc & FDDIFC_LLC_PRIO7) == FDDIFC_LLC_PRIO0)
+ m->m_flags |= M_LINK0;
+#endif
+
+ l = mtod(m, struct llc *);
+ switch (l->llc_dsap) {
+#if defined(INET) || defined(INET6) || defined(NS) || defined(DECNET) || defined(IPX) || defined(NETATALK)
+ case LLC_SNAP_LSAP:
+ {
+ u_int16_t type;
+ if (l->llc_control != LLC_UI || l->llc_ssap != LLC_SNAP_LSAP)
+ goto dropanyway;
+#ifdef NETATALK
+ if (Bcmp(&(l->llc_snap_org_code)[0], at_org_code,
+ sizeof(at_org_code)) == 0 &&
+ ntohs(l->llc_snap_ether_type) == ETHERTYPE_AT) {
+ inq = &atintrq2;
+ m_adj( m, sizeof( struct llc ));
+ schednetisr(NETISR_ATALK);
+ break;
+ }
+
+ if (Bcmp(&(l->llc_snap_org_code)[0], aarp_org_code,
+ sizeof(aarp_org_code)) == 0 &&
+ ntohs(l->llc_snap_ether_type) == ETHERTYPE_AARP) {
+ m_adj( m, sizeof( struct llc ));
+ aarpinput((struct arpcom *)ifp, m); /* XXX */
+ return;
+ }
+#endif /* NETATALK */
+ if (l->llc_snap.org_code[0] != 0 || l->llc_snap.org_code[1] != 0|| l->llc_snap.org_code[2] != 0)
+ goto dropanyway;
+ type = ntohs(l->llc_snap.ether_type);
+ m_adj(m, 8);
+ switch (type) {
+#ifdef INET
+ case ETHERTYPE_IP:
+ if (ipflow_fastforward(m))
+ return;
+ schednetisr(NETISR_IP);
+ inq = &ipintrq;
+ break;
+
+ case ETHERTYPE_ARP:
+#if !defined(__bsdi__) || _BSDI_VERSION >= 199401
+ schednetisr(NETISR_ARP);
+ inq = &arpintrq;
+ break;
+#else
+ arpinput((struct arpcom *)ifp, m);
+ return;
+#endif
+#endif
+#ifdef INET6
+ case ETHERTYPE_IPV6:
+ schednetisr(NETISR_IPV6);
+ inq = &ip6intrq;
+ break;
+#endif
+#ifdef IPX
+ case ETHERTYPE_IPX:
+ schednetisr(NETISR_IPX);
+ inq = &ipxintrq;
+ break;
+#endif
+#ifdef NS
+ case ETHERTYPE_NS:
+ schednetisr(NETISR_NS);
+ inq = &nsintrq;
+ break;
+#endif
+#ifdef DECNET
+ case ETHERTYPE_DECNET:
+ schednetisr(NETISR_DECNET);
+ inq = &decnetintrq;
+ break;
+#endif
+#ifdef NETATALK
+ case ETHERTYPE_AT:
+ schednetisr(NETISR_ATALK);
+ inq = &atintrq1;
+ break;
+ case ETHERTYPE_AARP:
+ /* probably this should be done with a NETISR as well */
+ aarpinput((struct arpcom *)ifp, m); /* XXX */
+ return;
+#endif /* NETATALK */
+ default:
+ /* printf("fddi_input: unknown protocol 0x%x\n", type); */
+ ifp->if_noproto++;
+ goto dropanyway;
+ }
+ break;
+ }
+#endif /* INET || NS */
+#ifdef ISO
+ case LLC_ISO_LSAP:
+ switch (l->llc_control) {
+ case LLC_UI:
+ /* LLC_UI_P forbidden in class 1 service */
+ if ((l->llc_dsap == LLC_ISO_LSAP) &&
+ (l->llc_ssap == LLC_ISO_LSAP)) {
+ /* LSAP for ISO */
+ m->m_data += 3; /* XXX */
+ m->m_len -= 3; /* XXX */
+ m->m_pkthdr.len -= 3; /* XXX */
+ M_PREPEND(m, sizeof *fh, M_DONTWAIT);
+ if (m == 0)
+ return;
+ *mtod(m, struct fddi_header *) = *fh;
+ IFDEBUG(D_ETHER)
+ printf("clnp packet");
+ ENDDEBUG
+ schednetisr(NETISR_ISO);
+ inq = &clnlintrq;
+ break;
+ }
+ goto dropanyway;
+
+ case LLC_XID:
+ case LLC_XID_P:
+ if(m->m_len < 6)
+ goto dropanyway;
+ l->llc_window = 0;
+ l->llc_fid = 9;
+ l->llc_class = 1;
+ l->llc_dsap = l->llc_ssap = 0;
+ /* Fall through to */
+ case LLC_TEST:
+ case LLC_TEST_P:
+ {
+ struct sockaddr sa;
+ register struct ether_header *eh;
+ struct arpcom *ac = (struct arpcom *) ifp;
+ int i;
+ u_char c = l->llc_dsap;
+
+ l->llc_dsap = l->llc_ssap;
+ l->llc_ssap = c;
+ if (m->m_flags & (M_BCAST | M_MCAST))
+ bcopy((caddr_t)ac->ac_enaddr,
+ (caddr_t)eh->ether_dhost, 6);
+ sa.sa_family = AF_UNSPEC;
+ sa.sa_len = sizeof(sa);
+ eh = (struct ether_header *)sa.sa_data;
+ for (i = 0; i < 6; i++) {
+ eh->ether_shost[i] = fh->fddi_dhost[i];
+ eh->ether_dhost[i] = fh->fddi_shost[i];
+ }
+ eh->ether_type = 0;
+ ifp->if_output(ifp, m, &sa, NULL);
+ return;
+ }
+ default:
+ m_freem(m);
+ return;
+ }
+ break;
+#endif /* ISO */
+#ifdef LLC
+ case LLC_X25_LSAP:
+ {
+ M_PREPEND(m, sizeof(struct sdl_hdr) , M_DONTWAIT);
+ if (m == 0)
+ return;
+ if ( !sdl_sethdrif(ifp, fh->fddi_shost, LLC_X25_LSAP,
+ fh->fddi_dhost, LLC_X25_LSAP, 6,
+ mtod(m, struct sdl_hdr *)))
+ panic("ETHER cons addr failure");
+ mtod(m, struct sdl_hdr *)->sdlhdr_len = m->m_pkthdr.len - sizeof(struct sdl_hdr);
+#ifdef LLC_DEBUG
+ printf("llc packet\n");
+#endif /* LLC_DEBUG */
+ schednetisr(NETISR_CCITT);
+ inq = &llcintrq;
+ break;
+ }
+#endif /* LLC */
+
+ default:
+ /* printf("fddi_input: unknown dsap 0x%x\n", l->llc_dsap); */
+ ifp->if_noproto++;
+ dropanyway:
+ m_freem(m);
+ return;
+ }
+
+ s = splimp();
+ if (IF_QFULL(inq)) {
+ IF_DROP(inq);
+ m_freem(m);
+ } else
+ IF_ENQUEUE(inq, m);
+ splx(s);
+}
+/*
+ * Perform common duties while attaching to interface list
+ */
+#ifdef __NetBSD__
+#define ifa_next ifa_list.tqe_next
+#endif
+
+void
+fddi_ifattach(ifp)
+ register struct ifnet *ifp;
+{
+ register struct ifaddr *ifa;
+ register struct sockaddr_dl *sdl;
+
+ ifp->if_type = IFT_FDDI;
+ ifp->if_addrlen = 6;
+ ifp->if_hdrlen = 21;
+ ifp->if_mtu = FDDIMTU;
+ ifp->if_baudrate = 100000000;
+#ifdef IFF_NOTRAILERS
+ ifp->if_flags |= IFF_NOTRAILERS;
+#endif
+#if defined(__FreeBSD__)
+ ifa = ifnet_addrs[ifp->if_index - 1];
+ sdl = (struct sockaddr_dl *)ifa->ifa_addr;
+ sdl->sdl_type = IFT_FDDI;
+ sdl->sdl_alen = ifp->if_addrlen;
+ bcopy(((struct arpcom *)ifp)->ac_enaddr, LLADDR(sdl), ifp->if_addrlen);
+#elif defined(__NetBSD__)
+ LIST_INIT(&((struct arpcom *)ifp)->ac_multiaddrs);
+ for (ifa = ifp->if_addrlist.tqh_first; ifa != NULL; ifa = ifa->ifa_list.tqe_next)
+#else
+ for (ifa = ifp->if_addrlist; ifa != NULL; ifa = ifa->ifa_next)
+#endif
+#if !defined(__FreeBSD__)
+ if ((sdl = (struct sockaddr_dl *)ifa->ifa_addr) &&
+ sdl->sdl_family == AF_LINK) {
+ sdl->sdl_type = IFT_FDDI;
+ sdl->sdl_alen = ifp->if_addrlen;
+ bcopy((caddr_t)((struct arpcom *)ifp)->ac_enaddr,
+ LLADDR(sdl), ifp->if_addrlen);
+ break;
+ }
+#endif
+}
diff --git a/sys/net/if_gif.c b/sys/net/if_gif.c
new file mode 100644
index 0000000..3eaa703
--- /dev/null
+++ b/sys/net/if_gif.c
@@ -0,0 +1,468 @@
+/*
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * gif.c
+ */
+
+#include "opt_inet.h"
+#include "opt_inet6.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/errno.h>
+#include <sys/time.h>
+#include <sys/syslog.h>
+#include <machine/cpu.h>
+
+#include <net/if.h>
+#include <net/if_types.h>
+#include <net/netisr.h>
+#include <net/route.h>
+#include <net/bpf.h>
+
+#ifdef INET
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/in_var.h>
+#include <netinet/ip.h>
+#include <netinet/in_gif.h>
+#endif /* INET */
+
+#ifdef INET6
+#ifndef INET
+#include <netinet/in.h>
+#endif
+#include <netinet6/in6_var.h>
+#include <netinet/ip6.h>
+#include <netinet6/ip6_var.h>
+#include <netinet6/in6_gif.h>
+#endif /* INET6 */
+
+#include <net/if_gif.h>
+
+#include "gif.h"
+
+#include <net/net_osdep.h>
+
+#if NGIF > 0
+
+void gifattach __P((void *));
+
+/*
+ * gif global variable definitions
+ */
+int ngif = NGIF; /* number of interfaces */
+struct gif_softc *gif = 0;
+
+void
+gifattach(dummy)
+ void *dummy;
+{
+ register struct gif_softc *sc;
+ register int i;
+
+ gif = sc = malloc (ngif * sizeof(struct gif_softc), M_DEVBUF, M_WAIT);
+ bzero(sc, ngif * sizeof(struct gif_softc));
+ for (i = 0; i < ngif; sc++, i++) {
+ sc->gif_if.if_name = "gif";
+ sc->gif_if.if_unit = i;
+ sc->gif_if.if_mtu = GIF_MTU;
+ sc->gif_if.if_flags = IFF_POINTOPOINT | IFF_MULTICAST;
+ sc->gif_if.if_ioctl = gif_ioctl;
+ sc->gif_if.if_output = gif_output;
+ sc->gif_if.if_type = IFT_GIF;
+ sc->gif_if.if_snd.ifq_maxlen = ifqmaxlen;
+ if_attach(&sc->gif_if);
+ bpfattach(&sc->gif_if, DLT_NULL, sizeof(u_int));
+ }
+}
+
+PSEUDO_SET(gifattach, if_gif);
+
+int
+gif_output(ifp, m, dst, rt)
+ struct ifnet *ifp;
+ struct mbuf *m;
+ struct sockaddr *dst;
+ struct rtentry *rt; /* added in net2 */
+{
+ register struct gif_softc *sc = (struct gif_softc*)ifp;
+ int error = 0;
+ static int called = 0; /* XXX: MUTEX */
+ int calllimit = 10; /* XXX: adhoc */
+
+ /*
+ * gif may cause infinite recursion calls when misconfigured.
+ * We'll prevent this by introducing upper limit.
+ * XXX: this mechanism may introduce another problem about
+ * mutual exclusion of the variable CALLED, especially if we
+ * use kernel thread.
+ */
+ if (++called >= calllimit) {
+ log(LOG_NOTICE,
+ "gif_output: recursively called too many times(%d)\n",
+ called);
+ m_freem(m);
+ error = EIO; /* is there better errno? */
+ goto end;
+ }
+ getmicrotime(&ifp->if_lastchange);
+ m->m_flags &= ~(M_BCAST|M_MCAST);
+ if (!(ifp->if_flags & IFF_UP) ||
+ sc->gif_psrc == NULL || sc->gif_pdst == NULL) {
+ m_freem(m);
+ error = ENETDOWN;
+ goto end;
+ }
+
+ if (ifp->if_bpf) {
+ /*
+ * We need to prepend the address family as
+ * a four byte field. Cons up a dummy header
+ * to pacify bpf. This is safe because bpf
+ * will only read from the mbuf (i.e., it won't
+ * try to free it or keep a pointer a to it).
+ */
+ struct mbuf m0;
+ u_int af = dst->sa_family;
+
+ m0.m_next = m;
+ m0.m_len = 4;
+ m0.m_data = (char *)&af;
+
+ bpf_mtap(ifp, &m0);
+ }
+ ifp->if_opackets++;
+ ifp->if_obytes += m->m_pkthdr.len;
+
+ switch (sc->gif_psrc->sa_family) {
+#ifdef INET
+ case AF_INET:
+ error = in_gif_output(ifp, dst->sa_family, m, rt);
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ error = in6_gif_output(ifp, dst->sa_family, m, rt);
+ break;
+#endif
+ default:
+ m_freem(m);
+ error = ENETDOWN;
+ }
+
+ end:
+ called = 0; /* reset recursion counter */
+ if (error) ifp->if_oerrors++;
+ return error;
+}
+
+void
+gif_input(m, af, gifp)
+ struct mbuf *m;
+ int af;
+ struct ifnet *gifp;
+{
+ int s, isr;
+ register struct ifqueue *ifq = 0;
+
+ if (gifp == NULL) {
+ /* just in case */
+ m_freem(m);
+ return;
+ }
+
+ if (m->m_pkthdr.rcvif)
+ m->m_pkthdr.rcvif = gifp;
+
+ if (gifp->if_bpf) {
+ /*
+ * We need to prepend the address family as
+ * a four byte field. Cons up a dummy header
+ * to pacify bpf. This is safe because bpf
+ * will only read from the mbuf (i.e., it won't
+ * try to free it or keep a pointer a to it).
+ */
+ struct mbuf m0;
+ u_int af = AF_INET6;
+
+ m0.m_next = m;
+ m0.m_len = 4;
+ m0.m_data = (char *)&af;
+
+ bpf_mtap(gifp, &m0);
+ }
+
+ /*
+ * Put the packet to the network layer input queue according to the
+ * specified address family.
+ * Note: older versions of gif_input directly called network layer
+ * input functions, e.g. ip6_input, here. We changed the policy to
+ * prevent too many recursive calls of such input functions, which
+ * might cause kernel panic. But the change may introduce another
+ * problem; if the input queue is full, packets are discarded.
+ * We believed it rarely occurs and changed the policy. If we find
+ * it occurs more times than we thought, we may change the policy
+ * again.
+ */
+ switch (af) {
+#ifdef INET
+ case AF_INET:
+ ifq = &ipintrq;
+ isr = NETISR_IP;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ ifq = &ip6intrq;
+ isr = NETISR_IPV6;
+ break;
+#endif
+ default:
+ m_freem(m);
+ return;
+ }
+
+ s = splimp();
+ if (IF_QFULL(ifq)) {
+ IF_DROP(ifq); /* update statistics */
+ m_freem(m);
+ splx(s);
+ return;
+ }
+ IF_ENQUEUE(ifq, m);
+ /* we need schednetisr since the address family may change */
+ schednetisr(isr);
+ gifp->if_ipackets++;
+ gifp->if_ibytes += m->m_pkthdr.len;
+ splx(s);
+
+ return;
+}
+
+
+int
+gif_ioctl(ifp, cmd, data)
+ struct ifnet *ifp;
+ u_long cmd;
+ caddr_t data;
+{
+ struct gif_softc *sc = (struct gif_softc*)ifp;
+ struct ifreq *ifr = (struct ifreq*)data;
+ int error = 0, size;
+ struct sockaddr *sa, *dst, *src;
+
+ switch (cmd) {
+ case SIOCSIFADDR:
+ break;
+
+ case SIOCSIFDSTADDR:
+ break;
+
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ break;
+
+ case SIOCGIFMTU:
+ break;
+ case SIOCSIFMTU:
+ {
+ u_long mtu;
+ mtu = ifr->ifr_mtu;
+ if (mtu < GIF_MTU_MIN || mtu > GIF_MTU_MAX) {
+ return (EINVAL);
+ }
+ ifp->if_mtu = mtu;
+ }
+ break;
+
+ case SIOCSIFPHYADDR:
+#ifdef INET6
+ case SIOCSIFPHYADDR_IN6:
+#endif /* INET6 */
+ switch (ifr->ifr_addr.sa_family) {
+#ifdef INET
+ case AF_INET:
+ src = (struct sockaddr *)
+ &(((struct in_aliasreq *)data)->ifra_addr);
+ dst = (struct sockaddr *)
+ &(((struct in_aliasreq *)data)->ifra_dstaddr);
+
+ /* only one gif can have dst = INADDR_ANY */
+#define satosaddr(sa) (((struct sockaddr_in *)(sa))->sin_addr.s_addr)
+
+ if (satosaddr(dst) == INADDR_ANY) {
+ int i;
+ struct gif_softc *sc2;
+
+ for (i = 0, sc2 = gif; i < ngif; i++, sc2++) {
+ if (sc2 == sc) continue;
+ if (sc2->gif_pdst &&
+ satosaddr(sc2->gif_pdst)
+ == INADDR_ANY) {
+ error = EADDRNOTAVAIL;
+ goto bad;
+ }
+ }
+ }
+ size = sizeof(struct sockaddr_in);
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ src = (struct sockaddr *)
+ &(((struct in6_aliasreq *)data)->ifra_addr);
+ dst = (struct sockaddr *)
+ &(((struct in6_aliasreq *)data)->ifra_dstaddr);
+
+ /* only one gif can have dst = in6addr_any */
+#define satoin6(sa) (&((struct sockaddr_in6 *)(sa))->sin6_addr)
+
+ if (IN6_IS_ADDR_UNSPECIFIED(satoin6(dst))) {
+ int i;
+ struct gif_softc *sc2;
+
+ for (i = 0, sc2 = gif; i < ngif; i++, sc2++) {
+ if (sc2 == sc) continue;
+ if (sc2->gif_pdst &&
+ IN6_IS_ADDR_UNSPECIFIED(
+ satoin6(sc2->gif_pdst)
+ )) {
+ error = EADDRNOTAVAIL;
+ goto bad;
+ }
+ }
+ }
+ size = sizeof(struct sockaddr_in6);
+ break;
+#endif /* INET6 */
+ default:
+ error = EPROTOTYPE;
+ goto bad;
+ break;
+ }
+ if (sc->gif_psrc != NULL)
+ free((caddr_t)sc->gif_psrc, M_IFADDR);
+ if (sc->gif_pdst != NULL)
+ free((caddr_t)sc->gif_pdst, M_IFADDR);
+
+ sa = (struct sockaddr *)malloc(size, M_IFADDR, M_WAITOK);
+ bzero((caddr_t)sa, size);
+ bcopy((caddr_t)src, (caddr_t)sa, size);
+ sc->gif_psrc = sa;
+
+ sa = (struct sockaddr *)malloc(size, M_IFADDR, M_WAITOK);
+ bzero((caddr_t)sa, size);
+ bcopy((caddr_t)dst, (caddr_t)sa, size);
+ sc->gif_pdst = sa;
+
+ ifp->if_flags |= (IFF_UP|IFF_RUNNING);
+ if_up(ifp); /* send up RTM_IFINFO */
+
+ break;
+
+ case SIOCGIFPSRCADDR:
+#ifdef INET6
+ case SIOCGIFPSRCADDR_IN6:
+#endif /* INET6 */
+ if (sc->gif_psrc == NULL) {
+ error = EADDRNOTAVAIL;
+ goto bad;
+ }
+ src = sc->gif_psrc;
+ switch (sc->gif_psrc->sa_family) {
+#ifdef INET
+ case AF_INET:
+ dst = &ifr->ifr_addr;
+ size = sizeof(struct sockaddr_in);
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ dst = (struct sockaddr *)
+ &(((struct in6_ifreq *)data)->ifr_addr);
+ size = sizeof(struct sockaddr_in6);
+ break;
+#endif /* INET6 */
+ default:
+ error = EADDRNOTAVAIL;
+ goto bad;
+ }
+ bcopy((caddr_t)src, (caddr_t)dst, size);
+ break;
+
+ case SIOCGIFPDSTADDR:
+#ifdef INET6
+ case SIOCGIFPDSTADDR_IN6:
+#endif /* INET6 */
+ if (sc->gif_pdst == NULL) {
+ error = EADDRNOTAVAIL;
+ goto bad;
+ }
+ src = sc->gif_pdst;
+ switch (sc->gif_pdst->sa_family) {
+#ifdef INET
+ case AF_INET:
+ dst = &ifr->ifr_addr;
+ size = sizeof(struct sockaddr_in);
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ dst = (struct sockaddr *)
+ &(((struct in6_ifreq *)data)->ifr_addr);
+ size = sizeof(struct sockaddr_in6);
+ break;
+#endif /* INET6 */
+ default:
+ error = EADDRNOTAVAIL;
+ goto bad;
+ }
+ bcopy((caddr_t)src, (caddr_t)dst, size);
+ break;
+
+ case SIOCSIFFLAGS:
+ break;
+
+ default:
+ error = EINVAL;
+ break;
+ }
+ bad:
+ return error;
+}
+#endif /*NGIF > 0*/
diff --git a/sys/net/if_gif.h b/sys/net/if_gif.h
new file mode 100644
index 0000000..cc26938
--- /dev/null
+++ b/sys/net/if_gif.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * if_gif.h
+ */
+
+#ifndef _NET_IF_GIF_H_
+#define _NET_IF_GIF_H_
+
+struct gif_softc {
+ struct ifnet gif_if; /* common area */
+ struct sockaddr *gif_psrc; /* Physical src addr */
+ struct sockaddr *gif_pdst; /* Physical dst addr */
+ union {
+ struct route gifscr_ro; /* xxx */
+ struct route_in6 gifscr_ro6; /* xxx */
+ } gifsc_gifscr;
+ int gif_flags;
+};
+
+#define gif_ro gifsc_gifscr.gifscr_ro
+#define gif_ro6 gifsc_gifscr.gifscr_ro6
+
+#define GIFF_INUSE 0x1 /* gif is in use */
+
+#define GIF_MTU (1280) /* Default MTU */
+#define GIF_MTU_MIN (1280) /* Minimum MTU */
+#define GIF_MTU_MAX (8192) /* Maximum MTU */
+
+extern int ngif;
+extern struct gif_softc *gif;
+
+/* Prototypes */
+void gif_input __P((struct mbuf *, int, struct ifnet *));
+int gif_output __P((struct ifnet *, struct mbuf *,
+ struct sockaddr *, struct rtentry *));
+int gif_ioctl __P((struct ifnet *, u_long, caddr_t));
+
+#endif /* _NET_IF_GIF_H_ */
diff --git a/sys/net/if_iso88025subr.c b/sys/net/if_iso88025subr.c
new file mode 100644
index 0000000..fae8799
--- /dev/null
+++ b/sys/net/if_iso88025subr.c
@@ -0,0 +1,406 @@
+/*
+ * Copyright (c) 1998, Larry Lile
+ * All rights reserved.
+ *
+ * For latest sources and information on this driver, please
+ * go to http://anarchy.stdio.com.
+ *
+ * Questions, comments or suggestions should be directed to
+ * Larry Lile <lile@stdio.com>.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+/*
+ *
+ * General ISO 802.5 (Token Ring) support routines
+ *
+ */
+
+#include "opt_inet.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/sysctl.h>
+
+#include <net/if.h>
+#include <net/netisr.h>
+#include <net/route.h>
+#include <net/if_llc.h>
+#include <net/if_dl.h>
+#include <net/if_types.h>
+
+#include <net/if_arp.h>
+
+#include <net/iso88025.h>
+
+#ifdef INET
+#include <netinet/in.h>
+#include <netinet/in_var.h>
+#include <netinet/if_ether.h>
+#endif
+
+#include <net/bpf.h>
+
+#include <machine/clock.h>
+#include <machine/md_var.h>
+
+#include <i386/isa/isa_device.h>
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/pmap.h>
+
+#include <sys/kernel.h>
+#include <net/iso88025.h>
+
+void
+iso88025_ifattach(ifp)
+ register struct ifnet *ifp;
+{
+ register struct ifaddr *ifa = NULL;
+ register struct sockaddr_dl *sdl;
+
+ ifp->if_type = IFT_ISO88025;
+ ifp->if_addrlen = 6;
+ ifp->if_hdrlen=18;
+ if (ifp->if_baudrate == 0)
+ ifp->if_baudrate = 16000000; /* 1, 4, or 16Mbit default? */
+ if (ifp->if_mtu == 0)
+ ifp->if_mtu = ISO88025_DEFAULT_MTU;
+
+ ifa = ifnet_addrs[ifp->if_index - 1];
+ if (ifa == 0) {
+ printf("iso88025_ifattach: no lladdr!\n");
+ return;
+ }
+ sdl = (struct sockaddr_dl *)ifa->ifa_addr;
+ sdl->sdl_type = IFT_ISO88025;
+ sdl->sdl_alen = ifp->if_addrlen;
+ bcopy(((struct arpcom *)ifp)->ac_enaddr, LLADDR(sdl), ifp->if_addrlen);
+}
+
+int
+iso88025_ioctl(struct ifnet *ifp, int command, caddr_t data)
+{
+ struct ifaddr *ifa = (struct ifaddr *) data;
+ struct ifreq *ifr = (struct ifreq *) data;
+ int error = 0;
+
+ switch (command) {
+ case SIOCSIFADDR:
+ ifp->if_flags |= IFF_UP;
+
+ switch (ifa->ifa_addr->sa_family) {
+#ifdef INET
+ case AF_INET:
+ ifp->if_init(ifp->if_softc); /* before arpwhohas */
+ arp_ifinit((struct arpcom *)ifp, ifa);
+ break;
+#endif
+ default:
+ ifp->if_init(ifp->if_softc);
+ break;
+ }
+ break;
+
+ case SIOCGIFADDR:
+ {
+ struct sockaddr *sa;
+
+ sa = (struct sockaddr *) & ifr->ifr_data;
+ bcopy(((struct arpcom *)ifp->if_softc)->ac_enaddr,
+ (caddr_t) sa->sa_data, ISO88025_ADDR_LEN);
+ }
+ break;
+
+ case SIOCSIFMTU:
+ /*
+ * Set the interface MTU.
+ */
+ if (ifr->ifr_mtu > ISO88025MTU) {
+ error = EINVAL;
+ } else {
+ ifp->if_mtu = ifr->ifr_mtu;
+ }
+ break;
+ }
+ return (error);
+}
+
+/*
+ * ISO88025 encapsulation
+ */
+int
+iso88025_output(ifp, m, dst, rt0)
+ register struct ifnet *ifp;
+ struct mbuf *m;
+ struct sockaddr *dst;
+ struct rtentry *rt0;
+{
+ register struct iso88025_header *th;
+ struct iso88025_header gen_th;
+ register struct iso88025_sockaddr_data *sd = (struct iso88025_sockaddr_data *)dst->sa_data;
+ register struct llc *l;
+ register struct sockaddr_dl *sdl = NULL;
+ int s, error = 0, rif_len = 0;
+ u_char edst[6];
+ register struct rtentry *rt;
+ int len = m->m_pkthdr.len, loop_copy = 0;
+ struct arpcom *ac = (struct arpcom *)ifp;
+
+ if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING))
+ senderr(ENETDOWN);
+ rt = rt0;
+ if (rt) {
+ if ((rt->rt_flags & RTF_UP) == 0) {
+ rt0 = rt = rtalloc1(dst, 1, 0UL);
+ if (rt0)
+ rt->rt_refcnt--;
+ else
+ senderr(EHOSTUNREACH);
+ }
+ if (rt->rt_flags & RTF_GATEWAY) {
+ if (rt->rt_gwroute == 0)
+ goto lookup;
+ if (((rt = rt->rt_gwroute)->rt_flags & RTF_UP) == 0) {
+ rtfree(rt); rt = rt0;
+ lookup: rt->rt_gwroute = rtalloc1(rt->rt_gateway, 1,
+ 0UL);
+ if ((rt = rt->rt_gwroute) == 0)
+ senderr(EHOSTUNREACH);
+ }
+ }
+ if (rt->rt_flags & RTF_REJECT)
+ if (rt->rt_rmx.rmx_expire == 0 ||
+ time_second < rt->rt_rmx.rmx_expire)
+ senderr(rt == rt0 ? EHOSTDOWN : EHOSTUNREACH);
+ }
+
+ /* Calculate routing info length based on arp table entry */
+ if (rt && (sdl = (struct sockaddr_dl *)rt->rt_gateway))
+ if (sdl->sdl_rcf != NULL)
+ rif_len = (ntohs(sdl->sdl_rcf) & 0x1f00) >> 8;
+
+ /* Generate a generic 802.5 header for the packet */
+ gen_th.ac = 0x10;
+ gen_th.fc = 0x40;
+ memcpy(gen_th.iso88025_shost, ac->ac_enaddr, sizeof(ac->ac_enaddr));
+ if (rif_len) {
+ gen_th.iso88025_shost[0] |= 0x80;
+ if (rif_len > 2) {
+ gen_th.rcf = sdl->sdl_rcf;
+ memcpy(gen_th.rseg, sdl->sdl_route, rif_len - 2);
+ }
+ }
+
+
+ switch (dst->sa_family) {
+#ifdef INET
+ case AF_INET:
+ if (!arpresolve(ac, rt, m, dst, edst, rt0))
+ return (0); /* if not yet resolved */
+ /* Add LLC and SNAP headers */
+ M_PREPEND(m, 8, M_DONTWAIT);
+ if (m == 0)
+ senderr(ENOBUFS);
+ l = mtod(m, struct llc *);
+ l->llc_un.type_snap.ether_type = htons(ETHERTYPE_IP);
+ l->llc_dsap = 0xaa;
+ l->llc_ssap = 0xaa;
+ l->llc_un.type_snap.control = 0x3;
+ l->llc_un.type_snap.org_code[0] = 0x0;
+ l->llc_un.type_snap.org_code[1] = 0x0;
+ l->llc_un.type_snap.org_code[2] = 0x0;
+ memcpy(gen_th.iso88025_dhost, edst, sizeof(edst));
+ break;
+#endif
+
+ case AF_UNSPEC:
+ /*
+ * For AF_UNSPEC sockaddr.sa_data must contain all of the
+ * mac information needed to send the packet. This allows
+ * full mac, llc, and source routing function to be controlled.
+ * llc and source routing information must already be in the
+ * mbuf provided, ac/fc are set in sa_data. sockaddr.sa_data
+ * should be a iso88025_sockaddr_data structure see iso88025.h
+ */
+ loop_copy = -1;
+ sd = (struct iso88025_sockaddr_data *)dst->sa_data;
+ gen_th.ac = sd->ac;
+ gen_th.fc = sd->fc;
+ memcpy(gen_th.iso88025_dhost, sd->ether_dhost, sizeof(sd->ether_dhost));
+ memcpy(gen_th.iso88025_shost, sd->ether_shost, sizeof(sd->ether_shost));
+ rif_len = 0;
+ break;
+
+ default:
+ printf("%s%d: can't handle af%d\n", ifp->if_name, ifp->if_unit,
+ dst->sa_family);
+ senderr(EAFNOSUPPORT);
+ }
+
+ /*
+ * Add local net header. If no space in first mbuf,
+ * allocate another.
+ */
+
+ M_PREPEND(m, ISO88025_HDR_LEN + rif_len, M_DONTWAIT);
+ if (m == 0)
+ senderr(ENOBUFS);
+
+ /* Copy as much of the generic header as is needed into the mbuf */
+ th = mtod(m, struct iso88025_header *);
+ memcpy(th, &gen_th, ISO88025_HDR_LEN + rif_len);
+
+ /*
+ * If a simplex interface, and the packet is being sent to our
+ * Ethernet address or a broadcast address, loopback a copy.
+ * XXX To make a simplex device behave exactly like a duplex
+ * device, we should copy in the case of sending to our own
+ * ethernet address (thus letting the original actually appear
+ * on the wire). However, we don't do that here for security
+ * reasons and compatibility with the original behavior.
+ */
+ if ((ifp->if_flags & IFF_SIMPLEX) &&
+ (loop_copy != -1)) {
+ if ((m->m_flags & M_BCAST) || (loop_copy > 0)) {
+ struct mbuf *n = m_copy(m, 0, (int)M_COPYALL);
+ /*printf("iso88025_output: if_simloop broadcast.\n");*/
+ (void) if_simloop(ifp, n, dst, ISO88025_HDR_LEN);
+ } else if (bcmp(th->iso88025_dhost,
+ th->iso88025_shost, ETHER_ADDR_LEN) == 0) {
+ /*printf("iso88025_output: if_simloop to ourselves.\n");*/
+ (void) if_simloop(ifp, m, dst, ISO88025_HDR_LEN);
+ return(0); /* XXX */
+ }
+ }
+
+ s = splimp();
+ /*
+ * Queue message on interface, and start output if interface
+ * not yet active.
+ */
+ if (IF_QFULL(&ifp->if_snd)) {
+ printf("iso88025_output: packet dropped QFULL.\n");
+ IF_DROP(&ifp->if_snd);
+ splx(s);
+ senderr(ENOBUFS);
+ }
+ IF_ENQUEUE(&ifp->if_snd, m);
+ /*printf("iso88025_output: packet queued.\n");*/
+ if ((ifp->if_flags & IFF_OACTIVE) == 0)
+ (*ifp->if_start)(ifp);
+ splx(s);
+ ifp->if_obytes += len + ISO88025_HDR_LEN + 8;
+ if (m->m_flags & M_MCAST)
+ ifp->if_omcasts++;
+ return (error);
+
+bad:
+ if (m)
+ m_freem(m);
+ /*printf("iso88025_output: something went wrong, bailing to bad.\n");*/
+ return (error);
+}
+
+/*
+ * ISO 88025 de-encapsulation
+ */
+void
+iso88025_input(ifp, th, m)
+ struct ifnet *ifp;
+ register struct iso88025_header *th;
+ struct mbuf *m;
+{
+ register struct ifqueue *inq;
+ u_short ether_type;
+ int s;
+ register struct llc *l = mtod(m, struct llc *);
+
+ /*printf("iso88025_input: entered.\n");*/
+
+ /*m->m_pkthdr.len = m->m_len = m->m_len - 8;*/ /* Length of LLC header in our case */
+ m->m_pkthdr.len -= 8;
+ m->m_len -= 8;
+ m->m_data += 8; /* Length of LLC header in our case */
+
+ if ((ifp->if_flags & IFF_UP) == 0) {
+ m_freem(m);
+ return;
+ }
+ ifp->if_ibytes += m->m_pkthdr.len + sizeof (*th);
+ if (th->iso88025_dhost[0] & 1) {
+ if (bcmp((caddr_t)etherbroadcastaddr, (caddr_t)th->iso88025_dhost,
+ sizeof(etherbroadcastaddr)) == 0)
+ m->m_flags |= M_BCAST;
+ else
+ m->m_flags |= M_MCAST;
+ }
+ if (m->m_flags & (M_BCAST|M_MCAST))
+ ifp->if_imcasts++;
+
+ ether_type = ntohs(l->llc_un.type_snap.ether_type);
+
+ /*printf("iso88025_input: source %6D dest %6D ethertype %x\n", th->iso88025_shost, ":", th->iso88025_dhost, ":", ether_type);*/
+
+ switch (ether_type) {
+#ifdef INET
+ case ETHERTYPE_IP:
+ /*printf("iso88025_input: IP Packet\n");*/
+ th->iso88025_shost[0] &= ~(0x80); /* Turn off source route bit XXX */
+ if (ipflow_fastforward(m))
+ return;
+ schednetisr(NETISR_IP);
+ inq = &ipintrq;
+ break;
+
+ case ETHERTYPE_ARP:
+ /*printf("iso88025_input: ARP Packet\n");*/
+ schednetisr(NETISR_ARP);
+ inq = &arpintrq;
+ break;
+#endif
+ default:
+ m_freem(m);
+ return;
+ }
+
+ s = splimp();
+ if (IF_QFULL(inq)) {
+ IF_DROP(inq);
+ m_freem(m);
+ printf("iso88025_input: Packet dropped (Queue full).\n");
+ } else
+ IF_ENQUEUE(inq, m);
+ /*printf("iso88025_input: Packet queued.\n");*/
+ splx(s);
+}
diff --git a/sys/net/if_llc.h b/sys/net/if_llc.h
new file mode 100644
index 0000000..8bcd63e
--- /dev/null
+++ b/sys/net/if_llc.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 1988, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)if_llc.h 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#ifndef _NET_IF_LLC_H_
+#define _NET_IF_LLC_H_
+
+/*
+ * IEEE 802.2 Link Level Control headers, for use in conjunction with
+ * 802.{3,4,5} media access control methods.
+ *
+ * Headers here do not use bit fields due to shortcomings in many
+ * compilers.
+ */
+
+struct llc {
+ u_char llc_dsap;
+ u_char llc_ssap;
+ union {
+ struct {
+ u_char control;
+ u_char format_id;
+ u_char class;
+ u_char window_x2;
+ } type_u;
+ struct {
+ u_char num_snd_x2;
+ u_char num_rcv_x2;
+ } type_i;
+ struct {
+ u_char control;
+ u_char num_rcv_x2;
+ } type_s;
+ struct {
+ u_char control;
+ struct frmrinfo {
+ u_char rej_pdu_0;
+ u_char rej_pdu_1;
+ u_char frmr_control;
+ u_char frmr_control_ext;
+ u_char frmr_cause;
+ } frmrinfo;
+ } type_frmr;
+ struct {
+ u_char control;
+ u_char org_code[3];
+ u_short ether_type;
+ } type_snap;
+ struct {
+ u_char control;
+ u_char control_ext;
+ } type_raw;
+ } llc_un;
+};
+#define llc_control llc_un.type_u.control
+#define llc_control_ext llc_un.type_raw.control_ext
+#define llc_fid llc_un.type_u.format_id
+#define llc_class llc_un.type_u.class
+#define llc_window llc_un.type_u.window_x2
+#define llc_frmrinfo llc_un.type_frmr.frmrinfo
+#define llc_frmr_pdu0 llc_un.type_frmr.frmrinfo.rej_pdu0
+#define llc_frmr_pdu1 llc_un.type_frmr.frmrinfo.rej_pdu1
+#define llc_frmr_control llc_un.type_frmr.frmrinfo.frmr_control
+#define llc_frmr_control_ext llc_un.type_frmr.frmrinfo.frmr_control_ext
+#define llc_frmr_cause llc_un.type_frmr.frmrinfo.frmr_control_ext
+
+/*
+ * Don't use sizeof(struct llc_un) for LLC header sizes
+ */
+#define LLC_ISFRAMELEN 4
+#define LLC_UFRAMELEN 3
+#define LLC_FRMRLEN 7
+
+/*
+ * Unnumbered LLC format commands
+ */
+#define LLC_UI 0x3
+#define LLC_UI_P 0x13
+#define LLC_DISC 0x43
+#define LLC_DISC_P 0x53
+#define LLC_UA 0x63
+#define LLC_UA_P 0x73
+#define LLC_TEST 0xe3
+#define LLC_TEST_P 0xf3
+#define LLC_FRMR 0x87
+#define LLC_FRMR_P 0x97
+#define LLC_DM 0x0f
+#define LLC_DM_P 0x1f
+#define LLC_XID 0xaf
+#define LLC_XID_P 0xbf
+#define LLC_SABME 0x6f
+#define LLC_SABME_P 0x7f
+
+/*
+ * Supervisory LLC commands
+ */
+#define LLC_RR 0x01
+#define LLC_RNR 0x05
+#define LLC_REJ 0x09
+
+/*
+ * Info format - dummy only
+ */
+#define LLC_INFO 0x00
+
+/*
+ * ISO PDTR 10178 contains among others
+ */
+#define LLC_X25_LSAP 0x7e
+#define LLC_SNAP_LSAP 0xaa
+#define LLC_ISO_LSAP 0xfe
+
+#endif
diff --git a/sys/net/if_loop.c b/sys/net/if_loop.c
new file mode 100644
index 0000000..a714bf1
--- /dev/null
+++ b/sys/net/if_loop.c
@@ -0,0 +1,401 @@
+/*
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)if_loop.c 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+/*
+ * Loopback interface driver for protocol testing and timing.
+ */
+#include "loop.h"
+#if NLOOP > 0
+
+#include "opt_atalk.h"
+#include "opt_inet.h"
+#include "opt_inet6.h"
+#include "opt_ipx.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/mbuf.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+
+#include <net/if.h>
+#include <net/if_types.h>
+#include <net/netisr.h>
+#include <net/route.h>
+#include <net/bpf.h>
+
+#ifdef INET
+#include <netinet/in.h>
+#include <netinet/in_var.h>
+#endif
+
+#ifdef IPX
+#include <netipx/ipx.h>
+#include <netipx/ipx_if.h>
+#endif
+
+#ifdef INET6
+#ifndef INET
+#include <netinet/in.h>
+#endif
+#include <netinet6/in6_var.h>
+#include <netinet6/ip6.h>
+#endif
+
+#ifdef NS
+#include <netns/ns.h>
+#include <netns/ns_if.h>
+#endif
+
+#ifdef ISO
+#include <netiso/iso.h>
+#include <netiso/iso_var.h>
+#endif
+
+#ifdef NETATALK
+#include <netatalk/at.h>
+#include <netatalk/at_var.h>
+#endif NETATALK
+
+int loioctl __P((struct ifnet *, u_long, caddr_t));
+static void lortrequest __P((int, struct rtentry *, struct sockaddr *));
+
+static void loopattach __P((void *));
+PSEUDO_SET(loopattach, if_loop);
+
+int looutput __P((struct ifnet *ifp,
+ struct mbuf *m, struct sockaddr *dst, struct rtentry *rt));
+
+#ifdef TINY_LOMTU
+#define LOMTU (1024+512)
+#elif defined(LARGE_LOMTU)
+#define LOMTU 131072
+#else
+#define LOMTU 16384
+#endif
+
+struct ifnet loif[NLOOP];
+
+/* ARGSUSED */
+static void
+loopattach(dummy)
+ void *dummy;
+{
+ register struct ifnet *ifp;
+ register int i = 0;
+
+ for (ifp = loif; i < NLOOP; ifp++) {
+ ifp->if_name = "lo";
+ ifp->if_unit = i++;
+ ifp->if_mtu = LOMTU;
+ ifp->if_flags = IFF_LOOPBACK | IFF_MULTICAST;
+ ifp->if_ioctl = loioctl;
+ ifp->if_output = looutput;
+ ifp->if_type = IFT_LOOP;
+ ifp->if_snd.ifq_maxlen = ifqmaxlen;
+ if_attach(ifp);
+ bpfattach(ifp, DLT_NULL, sizeof(u_int));
+ }
+}
+
+int
+looutput(ifp, m, dst, rt)
+ struct ifnet *ifp;
+ register struct mbuf *m;
+ struct sockaddr *dst;
+ register struct rtentry *rt;
+{
+ if ((m->m_flags & M_PKTHDR) == 0)
+ panic("looutput no HDR");
+
+ if (rt && rt->rt_flags & (RTF_REJECT|RTF_BLACKHOLE)) {
+ m_freem(m);
+ return (rt->rt_flags & RTF_BLACKHOLE ? 0 :
+ rt->rt_flags & RTF_HOST ? EHOSTUNREACH : ENETUNREACH);
+ }
+ /*
+ * KAME requires that the packet to be contiguous on the
+ * mbuf. We need to make that sure.
+ * this kind of code should be avoided.
+ * XXX: fails to join if interface MTU > MCLBYTES. jumbogram?
+ */
+ if (m && m->m_next != NULL && m->m_pkthdr.len < MCLBYTES) {
+ struct mbuf *n;
+
+ MGETHDR(n, M_DONTWAIT, MT_HEADER);
+ if (!n)
+ goto contiguousfail;
+ MCLGET(n, M_DONTWAIT);
+ if (! (n->m_flags & M_EXT)) {
+ m_freem(n);
+ goto contiguousfail;
+ }
+
+ m_copydata(m, 0, m->m_pkthdr.len, mtod(n, caddr_t));
+ n->m_pkthdr = m->m_pkthdr;
+ n->m_len = m->m_pkthdr.len;
+ m_freem(m);
+ m = n;
+ }
+ if (0) {
+contiguousfail:
+ printf("looutput: mbuf allocation failed\n");
+ }
+
+ ifp->if_opackets++;
+ ifp->if_obytes += m->m_pkthdr.len;
+#if 1 /* XXX */
+ switch (dst->sa_family) {
+ case AF_INET:
+ case AF_INET6:
+ case AF_IPX:
+ case AF_NS:
+ case AF_ISO:
+ case AF_APPLETALK:
+ break;
+ default:
+ printf("looutput: af=%d unexpected", dst->sa_family);
+ m_freem(m);
+ return (EAFNOSUPPORT);
+ }
+#endif
+ return(if_simloop(ifp, m, dst, 0));
+}
+
+/*
+ * if_simloop()
+ *
+ * This function is to support software emulation of hardware loopback,
+ * i.e., for interfaces with the IFF_SIMPLEX attribute. Since they can't
+ * hear their own broadcasts, we create a copy of the packet that we
+ * would normally receive via a hardware loopback.
+ *
+ * This function expects the packet to include the media header of length hlen.
+ */
+
+int
+if_simloop(ifp, m, dst, hlen)
+ struct ifnet *ifp;
+ register struct mbuf *m;
+ struct sockaddr *dst;
+ int hlen;
+{
+ int s, isr;
+ register struct ifqueue *ifq = 0;
+
+ if ((m->m_flags & M_PKTHDR) == 0)
+ panic("if_simloop: no HDR");
+ m->m_pkthdr.rcvif = ifp;
+ /* BPF write needs to be handled specially */
+ if (dst->sa_family == AF_UNSPEC) {
+ dst->sa_family = *(mtod(m, int *));
+ m->m_len -= sizeof(int);
+ m->m_pkthdr.len -= sizeof(int);
+ m->m_data += sizeof(int);
+ }
+
+ if (ifp->if_bpf) {
+ struct mbuf m0, *n = m;
+ u_int af = dst->sa_family;
+
+ /*
+ * We need to prepend the address family as
+ * a four byte field. Cons up a dummy header
+ * to pacify bpf. This is safe because bpf
+ * will only read from the mbuf (i.e., it won't
+ * try to free it or keep a pointer a to it).
+ */
+ m0.m_next = m;
+ m0.m_len = 4;
+ m0.m_data = (char *)&af;
+ n = &m0;
+ bpf_mtap(ifp, n);
+ }
+
+ /* Strip away media header */
+ if (hlen > 0) {
+#ifdef __alpha__
+ /* The alpha doesn't like unaligned data.
+ * We move data down in the first mbuf */
+ if (hlen & 3) {
+ bcopy(m->m_data + hlen, m->m_data, m->m_len - hlen);
+ m->m_len -= hlen;
+ if (m->m_flags & M_PKTHDR)
+ m->m_pkthdr.len -= hlen;
+ } else
+#endif
+ m_adj(m, hlen);
+ }
+
+ switch (dst->sa_family) {
+#ifdef INET
+ case AF_INET:
+ ifq = &ipintrq;
+ isr = NETISR_IP;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ m->m_flags |= M_LOOP;
+ ifq = &ip6intrq;
+ isr = NETISR_IPV6;
+ break;
+#endif
+#ifdef IPX
+ case AF_IPX:
+ ifq = &ipxintrq;
+ isr = NETISR_IPX;
+ break;
+#endif
+#ifdef NS
+ case AF_NS:
+ ifq = &nsintrq;
+ isr = NETISR_NS;
+ break;
+#endif
+#ifdef ISO
+ case AF_ISO:
+ ifq = &clnlintrq;
+ isr = NETISR_ISO;
+ break;
+#endif
+#ifdef NETATALK
+ case AF_APPLETALK:
+ ifq = &atintrq2;
+ isr = NETISR_ATALK;
+ break;
+#endif NETATALK
+ default:
+ printf("if_simloop: can't handle af=%d\n", dst->sa_family);
+ m_freem(m);
+ return (EAFNOSUPPORT);
+ }
+ s = splimp();
+ if (IF_QFULL(ifq)) {
+ IF_DROP(ifq);
+ m_freem(m);
+ splx(s);
+ return (ENOBUFS);
+ }
+ IF_ENQUEUE(ifq, m);
+ schednetisr(isr);
+ ifp->if_ipackets++;
+ ifp->if_ibytes += m->m_pkthdr.len;
+ splx(s);
+ return (0);
+}
+
+/* ARGSUSED */
+static void
+lortrequest(cmd, rt, sa)
+ int cmd;
+ struct rtentry *rt;
+ struct sockaddr *sa;
+{
+ if (rt) {
+ rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu; /* for ISO */
+ /*
+ * For optimal performance, the send and receive buffers
+ * should be at least twice the MTU plus a little more for
+ * overhead.
+ */
+ rt->rt_rmx.rmx_recvpipe =
+ rt->rt_rmx.rmx_sendpipe = 3 * LOMTU;
+ }
+}
+
+/*
+ * Process an ioctl request.
+ */
+/* ARGSUSED */
+int
+loioctl(ifp, cmd, data)
+ register struct ifnet *ifp;
+ u_long cmd;
+ caddr_t data;
+{
+ register struct ifaddr *ifa;
+ register struct ifreq *ifr = (struct ifreq *)data;
+ register int error = 0;
+
+ switch (cmd) {
+
+ case SIOCSIFADDR:
+ ifp->if_flags |= IFF_UP | IFF_RUNNING;
+ ifa = (struct ifaddr *)data;
+ ifa->ifa_rtrequest = lortrequest;
+ /*
+ * Everything else is done at a higher level.
+ */
+ break;
+
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ if (ifr == 0) {
+ error = EAFNOSUPPORT; /* XXX */
+ break;
+ }
+ switch (ifr->ifr_addr.sa_family) {
+
+#ifdef INET
+ case AF_INET:
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ break;
+#endif
+
+ default:
+ error = EAFNOSUPPORT;
+ break;
+ }
+ break;
+
+ case SIOCSIFMTU:
+ ifp->if_mtu = ifr->ifr_mtu;
+ break;
+
+ case SIOCSIFFLAGS:
+ break;
+
+ default:
+ error = EINVAL;
+ }
+ return (error);
+}
+#endif /* NLOOP > 0 */
diff --git a/sys/net/if_media.c b/sys/net/if_media.c
index 1dcf18f..128b924 100644
--- a/sys/net/if_media.c
+++ b/sys/net/if_media.c
@@ -1,4 +1,5 @@
/* $NetBSD: if_media.c,v 1.1 1997/03/17 02:55:15 thorpej Exp $ */
+/* $FreeBSD$ */
/*
* Copyright (c) 1997
@@ -47,14 +48,12 @@
#include <sys/param.h>
#include <sys/systm.h>
-#include <sys/errno.h>
-#include <sys/ioctl.h>
#include <sys/socket.h>
+#include <sys/sockio.h>
#include <sys/malloc.h>
#include <net/if.h>
#include <net/if_media.h>
-#include <net/netisr.h>
/*
* Compile-time options:
@@ -63,7 +62,7 @@
* Useful for debugging newly-ported drivers.
*/
-struct ifmedia_entry *ifmedia_match __P((struct ifmedia *ifm,
+static struct ifmedia_entry *ifmedia_match __P((struct ifmedia *ifm,
int flags, int mask));
#ifdef IFMEDIA_DEBUG
@@ -90,6 +89,19 @@ ifmedia_init(ifm, dontcare_mask, change_callback, status_callback)
ifm->ifm_status = status_callback;
}
+void
+ifmedia_removeall(ifm)
+ struct ifmedia *ifm;
+{
+ struct ifmedia_entry *entry;
+
+ for (entry = LIST_FIRST(&ifm->ifm_list); entry;
+ entry = LIST_FIRST(&ifm->ifm_list)) {
+ LIST_REMOVE(entry, ifm_list);
+ free(entry, M_IFADDR);
+ }
+}
+
/*
* Add a media configuration to the list of supported media
* for a specific interface instance.
@@ -234,8 +246,8 @@ ifmedia_ioctl(ifp, ifr, ifm, cmd)
*/
#ifdef IFMEDIA_DEBUG
if (ifmedia_debug) {
- printf("ifmedia_ioctl: switching %s to ",
- ifp->if_xname);
+ printf("ifmedia_ioctl: switching %s%d to ",
+ ifp->if_name, ifp->if_unit);
ifmedia_printword(match->ifm_media);
}
#endif
@@ -328,7 +340,7 @@ ifmedia_ioctl(ifp, ifr, ifm, cmd)
* Find media entry matching a given ifm word.
*
*/
-struct ifmedia_entry *
+static struct ifmedia_entry *
ifmedia_match(ifm, target, mask)
struct ifmedia *ifm;
int target;
diff --git a/sys/net/if_media.h b/sys/net/if_media.h
index 08d0561..1708559 100644
--- a/sys/net/if_media.h
+++ b/sys/net/if_media.h
@@ -1,4 +1,5 @@
/* $NetBSD: if_media.h,v 1.3 1997/03/26 01:19:27 thorpej Exp $ */
+/* $FreeBSD$ */
/*
* Copyright (c) 1997
@@ -49,7 +50,7 @@
* to implement this interface.
*/
-#ifdef _KERNEL
+#ifdef KERNEL
#include <sys/queue.h>
@@ -86,6 +87,9 @@ struct ifmedia {
void ifmedia_init __P((struct ifmedia *ifm, int dontcare_mask,
ifm_change_cb_t change_callback, ifm_stat_cb_t status_callback));
+/* Remove all mediums from a struct ifmedia. */
+void ifmedia_removeall __P(( struct ifmedia *ifm));
+
/* Add one supported medium to a struct ifmedia. */
void ifmedia_add __P((struct ifmedia *ifm, int mword, int data, void *aux));
@@ -100,14 +104,13 @@ void ifmedia_set __P((struct ifmedia *ifm, int mword));
int ifmedia_ioctl __P((struct ifnet *ifp, struct ifreq *ifr,
struct ifmedia *ifm, u_long cmd));
-#endif /*_KERNEL */
+#endif /*KERNEL */
/*
* if_media Options word:
* Bits Use
* ---- -------
- * 0-3 Media variant
- * 4 RFU
+ * 0-4 Media variant
* 5-7 Media type
* 8-15 Type specific options
* 16-19 RFU
@@ -127,6 +130,15 @@ int ifmedia_ioctl __P((struct ifnet *ifp, struct ifreq *ifr,
#define IFM_100_T4 8 /* 100BaseT4 - 4 pair cat 3 */
#define IFM_100_VG 9 /* 100VG-AnyLAN */
#define IFM_100_T2 10 /* 100BaseT2 */
+#define IFM_1000_FX 11 /* 1000BaseFX - gigabit over fiber */
+#define IFM_10_STP 12 /* 10BaseT over shielded TP */
+#define IFM_10_FL 13 /* 10baseFL - Fiber */
+#define IFM_1000_SX 14 /* 1000BaseSX Multi-mode Fiber */
+#define IFM_1000_LX 15 /* 1000BaseLX Single-mode Fiber */
+#define IFM_1000_CX 16 /* 1000BaseCX 150ohm STP */
+#define IFM_1000_TX 17 /* 1000BaseTX 4 pair cat 5 */
+#define IFM_homePNA 18 /* HomePNA media for ethernet frames */
+/* note 31 is the max! */
/*
* Token ring
@@ -136,9 +148,14 @@ int ifmedia_ioctl __P((struct ifnet *ifp, struct ifreq *ifr,
#define IFM_TOK_STP16 4 /* Shielded twisted pair 16m - DB9 */
#define IFM_TOK_UTP4 5 /* Unshielded twisted pair 4m - RJ45 */
#define IFM_TOK_UTP16 6 /* Unshielded twisted pair 16m - RJ45 */
+#define IFM_TOK_STP100 7 /* Shielded twisted pair 100m - DB9 */
+#define IFM_TOK_UTP100 8 /* Unshielded twisted pair 100m - RJ45 */
#define IFM_TOK_ETR 0x00000200 /* Early token release */
#define IFM_TOK_SRCRT 0x00000400 /* Enable source routing features */
#define IFM_TOK_ALLR 0x00000800 /* All routes / Single route bcast */
+#define IFM_TOK_DTR 0x00002000 /* Dedicated token ring */
+#define IFM_TOK_CLASSIC 0x00004000 /* Classic token ring */
+#define IFM_TOK_AUTO 0x00008000 /* Automatic Dedicate/Classic token ring */
/*
* FDDI
@@ -170,7 +187,7 @@ int ifmedia_ioctl __P((struct ifnet *ifp, struct ifreq *ifr,
* Masks
*/
#define IFM_NMASK 0x000000e0 /* Network type */
-#define IFM_TMASK 0x0000000f /* Media sub-type */
+#define IFM_TMASK 0x0000001f /* Media sub-type */
#define IFM_IMASK 0xf0000000 /* Instance */
#define IFM_ISHIFT 28 /* Instance shift */
#define IFM_OMASK 0x0000ff00 /* Type specific options */
@@ -185,9 +202,19 @@ int ifmedia_ioctl __P((struct ifnet *ifp, struct ifreq *ifr,
/*
* Macros to extract various bits of information from the media word.
*/
-#define IFM_TYPE(x) ((x) & IFM_NMASK)
-#define IFM_SUBTYPE(x) ((x) & IFM_TMASK)
-#define IFM_INST(x) (((x) & IFM_IMASK) >> IFM_ISHIFT)
+#define IFM_TYPE(x) ((x) & IFM_NMASK)
+#define IFM_SUBTYPE(x) ((x) & IFM_TMASK)
+#define IFM_TYPE_OPTIONS(x) ((x) & IFM_OMASK)
+#define IFM_INST(x) (((x) & IFM_IMASK) >> IFM_ISHIFT)
+#define IFM_OPTIONS(x) ((x) & (IFM_OMASK|IFM_GMASK))
+
+#define IFM_INST_MAX IFM_INST(IFM_IMASK)
+
+/*
+ * Macro to create a media word.
+ */
+#define IFM_MAKEWORD(type, subtype, options, instance) \
+ ((type) | (subtype) | (options) | ((instance) << IFM_ISHIFT))
/*
* NetBSD extension not defined in the BSDI API. This is used in various
@@ -217,6 +244,14 @@ struct ifmedia_description {
{ IFM_100_T4, "100baseT4" }, \
{ IFM_100_VG, "100baseVG" }, \
{ IFM_100_T2, "100baseT2" }, \
+ { IFM_1000_FX, "1000baseFX" }, \
+ { IFM_10_STP, "10baseSTP" }, \
+ { IFM_10_FL, "10baseFL" }, \
+ { IFM_1000_SX, "1000baseSX" }, \
+ { IFM_1000_LX, "1000baseLX" }, \
+ { IFM_1000_CX, "1000baseCX" }, \
+ { IFM_1000_TX, "1000baseTX" }, \
+ { IFM_homePNA, "homePNA" }, \
{ 0, NULL }, \
}
@@ -232,6 +267,13 @@ struct ifmedia_description {
{ IFM_100_T4, "100T4" }, \
{ IFM_100_VG, "100VG" }, \
{ IFM_100_T2, "100T2" }, \
+ { IFM_1000_FX, "1000FX" }, \
+ { IFM_10_STP, "10STP" }, \
+ { IFM_10_FL, "10FL" }, \
+ { IFM_1000_FX, "1000SX" }, \
+ { IFM_1000_FX, "1000LX" }, \
+ { IFM_1000_FX, "1000CX" }, \
+ { IFM_1000_FX, "1000TX" }, \
{ 0, NULL }, \
}
@@ -244,6 +286,8 @@ struct ifmedia_description {
{ IFM_TOK_STP16, "DB9/16Mbit" }, \
{ IFM_TOK_UTP4, "UTP/4Mbit" }, \
{ IFM_TOK_UTP16, "UTP/16Mbit" }, \
+ { IFM_TOK_STP100, "STP/100Mbit" }, \
+ { IFM_TOK_UTP100, "UTP/100Mbit" }, \
{ 0, NULL }, \
}
@@ -252,6 +296,8 @@ struct ifmedia_description {
{ IFM_TOK_STP16, "16STP" }, \
{ IFM_TOK_UTP4, "4UTP" }, \
{ IFM_TOK_UTP16, "16UTP" }, \
+ { IFM_TOK_STP100, "100STP" }, \
+ { IFM_TOK_UTP100, "100UTP" }, \
{ 0, NULL }, \
}
@@ -259,6 +305,9 @@ struct ifmedia_description {
{ IFM_TOK_ETR, "EarlyTokenRelease" }, \
{ IFM_TOK_SRCRT, "SourceRouting" }, \
{ IFM_TOK_ALLR, "AllRoutes" }, \
+ { IFM_TOK_DTR, "Dedicated" }, \
+ { IFM_TOK_CLASSIC,"Classic" }, \
+ { IFM_TOK_AUTO, " " }, \
{ 0, NULL }, \
}
diff --git a/sys/net/if_mib.c b/sys/net/if_mib.c
new file mode 100644
index 0000000..a4489ca
--- /dev/null
+++ b/sys/net/if_mib.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright 1996 Massachusetts Institute of Technology
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby
+ * granted, provided that both the above copyright notice and this
+ * permission notice appear in all copies, that both the above
+ * copyright notice and this permission notice appear in all
+ * supporting documentation, and that the name of M.I.T. not be used
+ * in advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission. M.I.T. makes
+ * no representations about the suitability of this software for any
+ * purpose. It is provided "as is" without express or implied
+ * warranty.
+ *
+ * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
+ * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
+ * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/socket.h>
+#include <sys/sysctl.h>
+
+#include <net/if.h>
+#include <net/if_mib.h>
+
+/*
+ * A sysctl(3) MIB for generic interface information. This information
+ * is exported in the net.link.generic branch, which has the following
+ * structure:
+ *
+ * net.link.generic .system - system-wide control variables
+ * and statistics (node)
+ * .ifdata.<ifindex>.general
+ * - what's in `struct ifdata'
+ * plus some other info
+ * .ifdata.<ifindex>.linkspecific
+ * - a link-type-specific data
+ * structure (as might be used
+ * by an SNMP agent
+ *
+ * Perhaps someday we will make addresses accessible via this interface
+ * as well (then there will be four such...). The reason that the
+ * index comes before the last element in the name is because it
+ * seems more orthogonal that way, particularly with the possibility
+ * of other per-interface data living down here as well (e.g., integrated
+ * services stuff).
+ */
+
+SYSCTL_DECL(_net_link_generic);
+SYSCTL_NODE(_net_link_generic, IFMIB_SYSTEM, system, CTLFLAG_RW, 0,
+ "Variables global to all interfaces");
+SYSCTL_INT(_net_link_generic_system, IFMIB_IFCOUNT, ifcount, CTLFLAG_RD,
+ &if_index, 0, "Number of configured interfaces");
+
+static int
+sysctl_ifdata SYSCTL_HANDLER_ARGS /* XXX bad syntax! */
+{
+ int *name = (int *)arg1;
+ int error, ifnlen;
+ u_int namelen = arg2;
+ struct ifnet *ifp;
+ char workbuf[64];
+ struct ifmibdata ifmd;
+
+ if (namelen != 2)
+ return EINVAL;
+
+ if (name[0] <= 0 || name[0] > if_index)
+ return ENOENT;
+
+ ifp = ifnet_addrs[name[0] - 1]->ifa_ifp;
+
+ switch(name[1]) {
+ default:
+ return ENOENT;
+
+ case IFDATA_GENERAL:
+ ifnlen = snprintf(workbuf, sizeof(workbuf),
+ "%s%d", ifp->if_name, ifp->if_unit);
+ if(ifnlen + 1 > sizeof ifmd.ifmd_name) {
+ return ENAMETOOLONG;
+ } else {
+ strcpy(ifmd.ifmd_name, workbuf);
+ }
+
+#define COPY(fld) ifmd.ifmd_##fld = ifp->if_##fld
+ COPY(pcount);
+ COPY(flags);
+ COPY(data);
+#undef COPY
+ ifmd.ifmd_snd_len = ifp->if_snd.ifq_len;
+ ifmd.ifmd_snd_maxlen = ifp->if_snd.ifq_maxlen;
+ ifmd.ifmd_snd_drops = ifp->if_snd.ifq_drops;
+
+ error = SYSCTL_OUT(req, &ifmd, sizeof ifmd);
+ if (error || !req->newptr)
+ return error;
+
+ error = SYSCTL_IN(req, &ifmd, sizeof ifmd);
+ if (error)
+ return error;
+
+#define DONTCOPY(fld) ifmd.ifmd_data.ifi_##fld = ifp->if_data.ifi_##fld
+ DONTCOPY(type);
+ DONTCOPY(physical);
+ DONTCOPY(addrlen);
+ DONTCOPY(hdrlen);
+ DONTCOPY(mtu);
+ DONTCOPY(metric);
+ DONTCOPY(baudrate);
+#undef DONTCOPY
+#define COPY(fld) ifp->if_##fld = ifmd.ifmd_##fld
+ COPY(data);
+ ifp->if_snd.ifq_maxlen = ifmd.ifmd_snd_maxlen;
+ ifp->if_snd.ifq_drops = ifmd.ifmd_snd_drops;
+#undef COPY
+ break;
+
+ case IFDATA_LINKSPECIFIC:
+ error = SYSCTL_OUT(req, ifp->if_linkmib, ifp->if_linkmiblen);
+ if (error || !req->newptr)
+ return error;
+
+ error = SYSCTL_IN(req, ifp->if_linkmib, ifp->if_linkmiblen);
+ if (error)
+ return error;
+
+ }
+ return 0;
+}
+
+SYSCTL_NODE(_net_link_generic, IFMIB_IFDATA, ifdata, CTLFLAG_RW,
+ sysctl_ifdata, "Interface table");
+
diff --git a/sys/net/if_mib.h b/sys/net/if_mib.h
new file mode 100644
index 0000000..6c29389
--- /dev/null
+++ b/sys/net/if_mib.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright 1996 Massachusetts Institute of Technology
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby
+ * granted, provided that both the above copyright notice and this
+ * permission notice appear in all copies, that both the above
+ * copyright notice and this permission notice appear in all
+ * supporting documentation, and that the name of M.I.T. not be used
+ * in advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission. M.I.T. makes
+ * no representations about the suitability of this software for any
+ * purpose. It is provided "as is" without express or implied
+ * warranty.
+ *
+ * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
+ * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
+ * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NET_IF_MIB_H
+#define _NET_IF_MIB_H 1
+
+struct ifmibdata {
+ char ifmd_name[IFNAMSIZ]; /* name of interface */
+ int ifmd_pcount; /* number of promiscuous listeners */
+ int ifmd_flags; /* interface flags */
+ int ifmd_snd_len; /* instantaneous length of send queue */
+ int ifmd_snd_maxlen; /* maximum length of send queue */
+ int ifmd_snd_drops; /* number of drops in send queue */
+ int ifmd_filler[4]; /* for future expansion */
+ struct if_data ifmd_data; /* generic information and statistics */
+};
+
+/*
+ * sysctl MIB tags at the net.link.generic level
+ */
+#define IFMIB_SYSTEM 1 /* non-interface-specific */
+#define IFMIB_IFDATA 2 /* per-interface data table */
+
+/*
+ * MIB tags for the various net.link.generic.ifdata tables
+ */
+#define IFDATA_GENERAL 1 /* generic stats for all kinds of ifaces */
+#define IFDATA_LINKSPECIFIC 2 /* specific to the type of interface */
+
+/*
+ * MIB tags at the net.link.generic.system level
+ */
+#define IFMIB_IFCOUNT 1 /* number of interfaces configured */
+
+/*
+ * MIB tags as the net.link level
+ * All of the other values are IFT_* names defined in if_types.h.
+ */
+#define NETLINK_GENERIC 0 /* functions not specific to a type of iface */
+
+/*
+ * The reason why the IFDATA_LINKSPECIFIC stuff is not under the
+ * net.link.<iftype> branches is twofold:
+ * 1) It's easier to code this way, and doesn't require duplication.
+ * 2) The fourth level under net.link.<iftype> is <pf>; that is to say,
+ * the net.link.<iftype> tree instruments the adaptation layers between
+ * <iftype> and a particular protocol family (e.g., net.link.ether.inet
+ * instruments ARP). This does not really leave room for anything else
+ * that needs to have a well-known number.
+ */
+
+/*
+ * Link-specific MIB structures for various link types.
+ */
+
+/* For IFT_ETHER, IFT_ISO88023, and IFT_STARLAN, as used by RFC 1650 */
+struct ifmib_iso_8802_3 {
+ u_int32_t dot3StatsAlignmentErrors;
+ u_int32_t dot3StatsFCSErrors;
+ u_int32_t dot3StatsSingleCollisionFrames;
+ u_int32_t dot3StatsMultipleCollisionFrames;
+ u_int32_t dot3StatsSQETestErrors;
+ u_int32_t dot3StatsDeferredTransmissions;
+ u_int32_t dot3StatsLateCollisions;
+ u_int32_t dot3StatsExcessiveCollisions;
+ u_int32_t dot3StatsInternalMacTransmitErrors;
+ u_int32_t dot3StatsCarrierSenseErrors;
+ u_int32_t dot3StatsFrameTooLongs;
+ u_int32_t dot3StatsInternalMacReceiveErrors;
+ u_int32_t dot3StatsEtherChipSet;
+ /* Matt Thomas wants this one, not included in RFC 1650: */
+ u_int32_t dot3StatsMissedFrames;
+
+ u_int32_t dot3StatsCollFrequencies[16]; /* NB: index origin */
+
+ u_int32_t dot3Compliance;
+#define DOT3COMPLIANCE_STATS 1
+#define DOT3COMPLIANCE_COLLS 2
+};
+
+/*
+ * Chipset identifiers are normally part of the vendor's enterprise MIB.
+ * However, we don't want to be trying to represent arbitrary-length
+ * OBJECT IDENTIFIERs here (ick!), and the right value is not necessarily
+ * obvious to the driver implementor. So, we define our own identification
+ * mechanism here, and let the agent writer deal with the translation.
+ */
+#define DOT3CHIPSET_VENDOR(x) ((x) >> 16)
+#define DOT3CHIPSET_PART(x) ((x) & 0xffff)
+#define DOT3CHIPSET(v,p) (((v) << 16) + ((p) & 0xffff))
+
+/* Driver writers! Add your vendors here! */
+enum dot3Vendors {
+ dot3VendorAMD = 1,
+ dot3VendorIntel = 2,
+ dot3VendorNational = 4,
+ dot3VendorFujitsu = 5,
+ dot3VendorDigital = 6,
+ dot3VendorWesternDigital = 7
+};
+
+/* Driver writers! Add your chipsets here! */
+enum {
+ dot3ChipSetAMD7990 = 1,
+ dot3ChipSetAMD79900 = 2,
+ dot3ChipSetAMD79C940 = 3
+};
+
+enum {
+ dot3ChipSetIntel82586 = 1,
+ dot3ChipSetIntel82596 = 2,
+ dot3ChipSetIntel82557 = 3
+};
+
+enum {
+ dot3ChipSetNational8390 = 1,
+ dot3ChipSetNationalSonic = 2
+};
+
+enum {
+ dot3ChipSetFujitsu86950 = 1
+};
+
+enum {
+ dot3ChipSetDigitalDC21040 = 1,
+ dot3ChipSetDigitalDC21140 = 2,
+ dot3ChipSetDigitalDC21041 = 3,
+ dot3ChipSetDigitalDC21140A = 4,
+ dot3ChipSetDigitalDC21142 = 5
+};
+
+enum {
+ dot3ChipSetWesternDigital83C690 = 1,
+ dot3ChipSetWesternDigital83C790 = 2
+};
+/* END of Ethernet-link MIB stuff */
+
+/*
+ * Put other types of interface MIBs here, or in interface-specific
+ * header files if convenient ones already exist.
+ */
+#endif /* _NET_IF_MIB_H */
diff --git a/sys/net/if_ppp.c b/sys/net/if_ppp.c
new file mode 100644
index 0000000..1db8237
--- /dev/null
+++ b/sys/net/if_ppp.c
@@ -0,0 +1,1576 @@
+/*
+ * if_ppp.c - Point-to-Point Protocol (PPP) Asynchronous driver.
+ *
+ * Copyright (c) 1989 Carnegie Mellon University.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms are permitted
+ * provided that the above copyright notice and this paragraph are
+ * duplicated in all such forms and that any documentation,
+ * advertising materials, and other materials related to such
+ * distribution and use acknowledge that the software was developed
+ * by Carnegie Mellon University. The name of the
+ * University may not be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Drew D. Perkins
+ * Carnegie Mellon University
+ * 4910 Forbes Ave.
+ * Pittsburgh, PA 15213
+ * (412) 268-8576
+ * ddp@andrew.cmu.edu
+ *
+ * Based on:
+ * @(#)if_sl.c 7.6.1.2 (Berkeley) 2/15/89
+ *
+ * Copyright (c) 1987 Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms are permitted
+ * provided that the above copyright notice and this paragraph are
+ * duplicated in all such forms and that any documentation,
+ * advertising materials, and other materials related to such
+ * distribution and use acknowledge that the software was developed
+ * by the University of California, Berkeley. The name of the
+ * University may not be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Serial Line interface
+ *
+ * Rick Adams
+ * Center for Seismic Studies
+ * 1300 N 17th Street, Suite 1450
+ * Arlington, Virginia 22209
+ * (703)276-7900
+ * rick@seismo.ARPA
+ * seismo!rick
+ *
+ * Pounded on heavily by Chris Torek (chris@mimsy.umd.edu, umcp-cs!chris).
+ * Converted to 4.3BSD Beta by Chris Torek.
+ * Other changes made at Berkeley, based in part on code by Kirk Smith.
+ *
+ * Converted to 4.3BSD+ 386BSD by Brad Parker (brad@cayman.com)
+ * Added VJ tcp header compression; more unified ioctls
+ *
+ * Extensively modified by Paul Mackerras (paulus@cs.anu.edu.au).
+ * Cleaned up a lot of the mbuf-related code to fix bugs that
+ * caused system crashes and packet corruption. Changed pppstart
+ * so that it doesn't just give up with a collision if the whole
+ * packet doesn't fit in the output ring buffer.
+ *
+ * Added priority queueing for interactive IP packets, following
+ * the model of if_sl.c, plus hooks for bpf.
+ * Paul Mackerras (paulus@cs.anu.edu.au).
+ */
+
+/* $FreeBSD$ */
+/* from if_sl.c,v 1.11 84/10/04 12:54:47 rick Exp */
+/* from NetBSD: if_ppp.c,v 1.15.2.2 1994/07/28 05:17:58 cgd Exp */
+
+#include "ppp.h"
+#if NPPP > 0
+
+#include "opt_inet.h"
+#include "opt_ipx.h"
+#include "opt_ppp.h"
+
+#ifdef INET
+#define VJC
+#endif
+#define PPP_COMPRESS
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/mbuf.h>
+#include <sys/socket.h>
+#include <sys/filio.h>
+#include <sys/sockio.h>
+#include <sys/kernel.h>
+#include <sys/time.h>
+#include <sys/malloc.h>
+
+#include <net/if.h>
+#include <net/if_types.h>
+#include <net/netisr.h>
+#include <net/bpf.h>
+
+#if INET
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/in_var.h>
+#include <netinet/ip.h>
+#endif
+
+#if IPX
+#include <netipx/ipx.h>
+#include <netipx/ipx_if.h>
+#endif
+
+#ifdef VJC
+#include <net/slcompress.h>
+#endif
+
+#include <net/if_ppp.h>
+#include <net/if_pppvar.h>
+
+/* minimise diffs */
+#ifndef splsoftnet
+#define splsoftnet splnet
+#endif
+
+#ifdef PPP_COMPRESS
+#define PACKETPTR struct mbuf *
+#include <net/ppp_comp.h>
+#endif
+
+struct ppp_softc ppp_softc[NPPP];
+
+/* XXX layering violation */
+extern void pppasyncattach __P((void *));
+
+static void pppattach __P((void *));
+PSEUDO_SET(pppattach, if_ppp);
+
+static int pppsioctl __P((struct ifnet *ifp, u_long cmd, caddr_t data));
+static void pppintr __P((void));
+
+static void ppp_requeue __P((struct ppp_softc *));
+static void ppp_ccp __P((struct ppp_softc *, struct mbuf *m, int rcvd));
+static void ppp_ccp_closed __P((struct ppp_softc *));
+static void ppp_inproc __P((struct ppp_softc *, struct mbuf *));
+static void pppdumpm __P((struct mbuf *m0));
+
+/*
+ * Some useful mbuf macros not in mbuf.h.
+ */
+#define M_IS_CLUSTER(m) ((m)->m_flags & M_EXT)
+
+#define M_DATASTART(m) \
+ (M_IS_CLUSTER(m) ? (m)->m_ext.ext_buf : \
+ (m)->m_flags & M_PKTHDR ? (m)->m_pktdat : (m)->m_dat)
+
+#define M_DATASIZE(m) \
+ (M_IS_CLUSTER(m) ? (m)->m_ext.ext_size : \
+ (m)->m_flags & M_PKTHDR ? MHLEN: MLEN)
+
+/*
+ * We steal two bits in the mbuf m_flags, to mark high-priority packets
+ * for output, and received packets following lost/corrupted packets.
+ */
+#define M_HIGHPRI 0x2000 /* output packet for sc_fastq */
+#define M_ERRMARK 0x4000 /* steal a bit in mbuf m_flags */
+
+
+#ifdef PPP_COMPRESS
+/*
+ * List of compressors we know about.
+ * We leave some space so maybe we can modload compressors.
+ */
+
+extern struct compressor ppp_bsd_compress;
+extern struct compressor ppp_deflate, ppp_deflate_draft;
+
+static struct compressor *ppp_compressors[8] = {
+#if DO_BSD_COMPRESS && defined(PPP_BSDCOMP)
+ &ppp_bsd_compress,
+#endif
+#if DO_DEFLATE && defined(PPP_DEFLATE)
+ &ppp_deflate,
+ &ppp_deflate_draft,
+#endif
+ NULL
+};
+#endif /* PPP_COMPRESS */
+
+/*
+ * Called from boot code to establish ppp interfaces.
+ */
+static void
+pppattach(dummy)
+ void *dummy;
+{
+ register struct ppp_softc *sc;
+ register int i = 0;
+
+ for (sc = ppp_softc; i < NPPP; sc++) {
+ sc->sc_if.if_name = "ppp";
+ sc->sc_if.if_unit = i++;
+ sc->sc_if.if_mtu = PPP_MTU;
+ sc->sc_if.if_flags = IFF_POINTOPOINT | IFF_MULTICAST;
+ sc->sc_if.if_type = IFT_PPP;
+ sc->sc_if.if_hdrlen = PPP_HDRLEN;
+ sc->sc_if.if_ioctl = pppsioctl;
+ sc->sc_if.if_output = pppoutput;
+ sc->sc_if.if_snd.ifq_maxlen = IFQ_MAXLEN;
+ sc->sc_inq.ifq_maxlen = IFQ_MAXLEN;
+ sc->sc_fastq.ifq_maxlen = IFQ_MAXLEN;
+ sc->sc_rawq.ifq_maxlen = IFQ_MAXLEN;
+ if_attach(&sc->sc_if);
+ bpfattach(&sc->sc_if, DLT_PPP, PPP_HDRLEN);
+ }
+ register_netisr(NETISR_PPP, pppintr);
+ /*
+ * XXX layering violation - if_ppp can work over any lower level
+ * transport that cares to attach to it.
+ */
+ pppasyncattach(dummy);
+}
+
+/*
+ * Allocate a ppp interface unit and initialize it.
+ */
+struct ppp_softc *
+pppalloc(pid)
+ pid_t pid;
+{
+ int nppp, i;
+ struct ppp_softc *sc;
+
+ for (nppp = 0, sc = ppp_softc; nppp < NPPP; nppp++, sc++)
+ if (sc->sc_xfer == pid) {
+ sc->sc_xfer = 0;
+ return sc;
+ }
+ for (nppp = 0, sc = ppp_softc; nppp < NPPP; nppp++, sc++)
+ if (sc->sc_devp == NULL)
+ break;
+ if (nppp >= NPPP)
+ return NULL;
+
+ sc->sc_flags = 0;
+ sc->sc_mru = PPP_MRU;
+ sc->sc_relinq = NULL;
+ bzero((char *)&sc->sc_stats, sizeof(sc->sc_stats));
+#ifdef VJC
+ MALLOC(sc->sc_comp, struct slcompress *, sizeof(struct slcompress),
+ M_DEVBUF, M_NOWAIT);
+ if (sc->sc_comp)
+ sl_compress_init(sc->sc_comp, -1);
+#endif
+#ifdef PPP_COMPRESS
+ sc->sc_xc_state = NULL;
+ sc->sc_rc_state = NULL;
+#endif /* PPP_COMPRESS */
+ for (i = 0; i < NUM_NP; ++i)
+ sc->sc_npmode[i] = NPMODE_ERROR;
+ sc->sc_npqueue = NULL;
+ sc->sc_npqtail = &sc->sc_npqueue;
+ sc->sc_last_sent = sc->sc_last_recv = time_second;
+
+ return sc;
+}
+
+/*
+ * Deallocate a ppp unit. Must be called at splsoftnet or higher.
+ */
+void
+pppdealloc(sc)
+ struct ppp_softc *sc;
+{
+ struct mbuf *m;
+
+ if_down(&sc->sc_if);
+ sc->sc_if.if_flags &= ~(IFF_UP|IFF_RUNNING);
+ getmicrotime(&sc->sc_if.if_lastchange);
+ sc->sc_devp = NULL;
+ sc->sc_xfer = 0;
+ for (;;) {
+ IF_DEQUEUE(&sc->sc_rawq, m);
+ if (m == NULL)
+ break;
+ m_freem(m);
+ }
+ for (;;) {
+ IF_DEQUEUE(&sc->sc_inq, m);
+ if (m == NULL)
+ break;
+ m_freem(m);
+ }
+ for (;;) {
+ IF_DEQUEUE(&sc->sc_fastq, m);
+ if (m == NULL)
+ break;
+ m_freem(m);
+ }
+ while ((m = sc->sc_npqueue) != NULL) {
+ sc->sc_npqueue = m->m_nextpkt;
+ m_freem(m);
+ }
+#ifdef PPP_COMPRESS
+ ppp_ccp_closed(sc);
+ sc->sc_xc_state = NULL;
+ sc->sc_rc_state = NULL;
+#endif /* PPP_COMPRESS */
+#ifdef PPP_FILTER
+ if (sc->sc_pass_filt.bf_insns != 0) {
+ FREE(sc->sc_pass_filt.bf_insns, M_DEVBUF);
+ sc->sc_pass_filt.bf_insns = 0;
+ sc->sc_pass_filt.bf_len = 0;
+ }
+ if (sc->sc_active_filt.bf_insns != 0) {
+ FREE(sc->sc_active_filt.bf_insns, M_DEVBUF);
+ sc->sc_active_filt.bf_insns = 0;
+ sc->sc_active_filt.bf_len = 0;
+ }
+#endif /* PPP_FILTER */
+#ifdef VJC
+ if (sc->sc_comp != 0) {
+ FREE(sc->sc_comp, M_DEVBUF);
+ sc->sc_comp = 0;
+ }
+#endif
+}
+
+/*
+ * Ioctl routine for generic ppp devices.
+ */
+int
+pppioctl(sc, cmd, data, flag, p)
+ struct ppp_softc *sc;
+ u_long cmd;
+ caddr_t data;
+ int flag;
+ struct proc *p;
+{
+ int s, error, flags, mru, nb, npx;
+ struct ppp_option_data *odp;
+ struct compressor **cp;
+ struct npioctl *npi;
+ time_t t;
+#ifdef PPP_FILTER
+ struct bpf_program *bp, *nbp;
+ struct bpf_insn *newcode, *oldcode;
+ int newcodelen;
+#endif /* PPP_FILTER */
+#ifdef PPP_COMPRESS
+ u_char ccp_option[CCP_MAX_OPTION_LENGTH];
+#endif
+
+ switch (cmd) {
+ case FIONREAD:
+ *(int *)data = sc->sc_inq.ifq_len;
+ break;
+
+ case PPPIOCGUNIT:
+ *(int *)data = sc->sc_if.if_unit;
+ break;
+
+ case PPPIOCGFLAGS:
+ *(u_int *)data = sc->sc_flags;
+ break;
+
+ case PPPIOCSFLAGS:
+ if ((error = suser(p)) != 0)
+ return (error);
+ flags = *(int *)data & SC_MASK;
+ s = splsoftnet();
+#ifdef PPP_COMPRESS
+ if (sc->sc_flags & SC_CCP_OPEN && !(flags & SC_CCP_OPEN))
+ ppp_ccp_closed(sc);
+#endif
+ splimp();
+ sc->sc_flags = (sc->sc_flags & ~SC_MASK) | flags;
+ splx(s);
+ break;
+
+ case PPPIOCSMRU:
+ if ((error = suser(p)) != 0)
+ return (error);
+ mru = *(int *)data;
+ if (mru >= PPP_MRU && mru <= PPP_MAXMRU)
+ sc->sc_mru = mru;
+ break;
+
+ case PPPIOCGMRU:
+ *(int *)data = sc->sc_mru;
+ break;
+
+#ifdef VJC
+ case PPPIOCSMAXCID:
+ if ((error = suser(p)) != 0)
+ return (error);
+ if (sc->sc_comp) {
+ s = splsoftnet();
+ sl_compress_init(sc->sc_comp, *(int *)data);
+ splx(s);
+ }
+ break;
+#endif
+
+ case PPPIOCXFERUNIT:
+ if ((error = suser(p)) != 0)
+ return (error);
+ sc->sc_xfer = p->p_pid;
+ break;
+
+#ifdef PPP_COMPRESS
+ case PPPIOCSCOMPRESS:
+ if ((error = suser(p)) != 0)
+ return (error);
+ odp = (struct ppp_option_data *) data;
+ nb = odp->length;
+ if (nb > sizeof(ccp_option))
+ nb = sizeof(ccp_option);
+ if ((error = copyin(odp->ptr, ccp_option, nb)) != 0)
+ return (error);
+ if (ccp_option[1] < 2) /* preliminary check on the length byte */
+ return (EINVAL);
+ for (cp = ppp_compressors; *cp != NULL; ++cp)
+ if ((*cp)->compress_proto == ccp_option[0]) {
+ /*
+ * Found a handler for the protocol - try to allocate
+ * a compressor or decompressor.
+ */
+ error = 0;
+ if (odp->transmit) {
+ s = splsoftnet();
+ if (sc->sc_xc_state != NULL)
+ (*sc->sc_xcomp->comp_free)(sc->sc_xc_state);
+ sc->sc_xcomp = *cp;
+ sc->sc_xc_state = (*cp)->comp_alloc(ccp_option, nb);
+ if (sc->sc_xc_state == NULL) {
+ if (sc->sc_flags & SC_DEBUG)
+ printf("ppp%d: comp_alloc failed\n",
+ sc->sc_if.if_unit);
+ error = ENOBUFS;
+ }
+ splimp();
+ sc->sc_flags &= ~SC_COMP_RUN;
+ splx(s);
+ } else {
+ s = splsoftnet();
+ if (sc->sc_rc_state != NULL)
+ (*sc->sc_rcomp->decomp_free)(sc->sc_rc_state);
+ sc->sc_rcomp = *cp;
+ sc->sc_rc_state = (*cp)->decomp_alloc(ccp_option, nb);
+ if (sc->sc_rc_state == NULL) {
+ if (sc->sc_flags & SC_DEBUG)
+ printf("ppp%d: decomp_alloc failed\n",
+ sc->sc_if.if_unit);
+ error = ENOBUFS;
+ }
+ splimp();
+ sc->sc_flags &= ~SC_DECOMP_RUN;
+ splx(s);
+ }
+ return (error);
+ }
+ if (sc->sc_flags & SC_DEBUG)
+ printf("ppp%d: no compressor for [%x %x %x], %x\n",
+ sc->sc_if.if_unit, ccp_option[0], ccp_option[1],
+ ccp_option[2], nb);
+ return (EINVAL); /* no handler found */
+#endif /* PPP_COMPRESS */
+
+ case PPPIOCGNPMODE:
+ case PPPIOCSNPMODE:
+ npi = (struct npioctl *) data;
+ switch (npi->protocol) {
+ case PPP_IP:
+ npx = NP_IP;
+ break;
+ default:
+ return EINVAL;
+ }
+ if (cmd == PPPIOCGNPMODE) {
+ npi->mode = sc->sc_npmode[npx];
+ } else {
+ if ((error = suser(p)) != 0)
+ return (error);
+ if (npi->mode != sc->sc_npmode[npx]) {
+ s = splsoftnet();
+ sc->sc_npmode[npx] = npi->mode;
+ if (npi->mode != NPMODE_QUEUE) {
+ ppp_requeue(sc);
+ (*sc->sc_start)(sc);
+ }
+ splx(s);
+ }
+ }
+ break;
+
+ case PPPIOCGIDLE:
+ s = splsoftnet();
+ t = time_second;
+ ((struct ppp_idle *)data)->xmit_idle = t - sc->sc_last_sent;
+ ((struct ppp_idle *)data)->recv_idle = t - sc->sc_last_recv;
+ splx(s);
+ break;
+
+#ifdef PPP_FILTER
+ case PPPIOCSPASS:
+ case PPPIOCSACTIVE:
+ nbp = (struct bpf_program *) data;
+ if ((unsigned) nbp->bf_len > BPF_MAXINSNS)
+ return EINVAL;
+ newcodelen = nbp->bf_len * sizeof(struct bpf_insn);
+ if (newcodelen != 0) {
+ MALLOC(newcode, struct bpf_insn *, newcodelen, M_DEVBUF, M_WAITOK);
+ if (newcode == 0) {
+ return EINVAL; /* or sumpin */
+ }
+ if ((error = copyin((caddr_t)nbp->bf_insns, (caddr_t)newcode,
+ newcodelen)) != 0) {
+ FREE(newcode, M_DEVBUF);
+ return error;
+ }
+ if (!bpf_validate(newcode, nbp->bf_len)) {
+ FREE(newcode, M_DEVBUF);
+ return EINVAL;
+ }
+ } else
+ newcode = 0;
+ bp = (cmd == PPPIOCSPASS)? &sc->sc_pass_filt: &sc->sc_active_filt;
+ oldcode = bp->bf_insns;
+ s = splimp();
+ bp->bf_len = nbp->bf_len;
+ bp->bf_insns = newcode;
+ splx(s);
+ if (oldcode != 0)
+ FREE(oldcode, M_DEVBUF);
+ break;
+#endif
+
+ default:
+ return (ENOIOCTL);
+ }
+ return (0);
+}
+
+/*
+ * Process an ioctl request to the ppp network interface.
+ */
+static int
+pppsioctl(ifp, cmd, data)
+ register struct ifnet *ifp;
+ u_long cmd;
+ caddr_t data;
+{
+ struct proc *p = curproc; /* XXX */
+ register struct ppp_softc *sc = &ppp_softc[ifp->if_unit];
+ register struct ifaddr *ifa = (struct ifaddr *)data;
+ register struct ifreq *ifr = (struct ifreq *)data;
+ struct ppp_stats *psp;
+#ifdef PPP_COMPRESS
+ struct ppp_comp_stats *pcp;
+#endif
+ int s = splimp(), error = 0;
+
+ switch (cmd) {
+ case SIOCSIFFLAGS:
+ if ((ifp->if_flags & IFF_RUNNING) == 0)
+ ifp->if_flags &= ~IFF_UP;
+ break;
+
+ case SIOCSIFADDR:
+ case SIOCAIFADDR:
+ switch(ifa->ifa_addr->sa_family) {
+#ifdef INET
+ case AF_INET:
+ break;
+#endif
+#ifdef IPX
+ case AF_IPX:
+ break;
+#endif
+ default:
+ error = EAFNOSUPPORT;
+ break;
+ }
+ break;
+
+ case SIOCSIFDSTADDR:
+ switch(ifa->ifa_addr->sa_family) {
+#ifdef INET
+ case AF_INET:
+ break;
+#endif
+#ifdef IPX
+ case AF_IPX:
+ break;
+#endif
+ default:
+ error = EAFNOSUPPORT;
+ break;
+ }
+ break;
+
+ case SIOCSIFMTU:
+ if ((error = suser(p)) != 0)
+ break;
+ if (ifr->ifr_mtu > PPP_MAXMTU)
+ error = EINVAL;
+ else {
+ sc->sc_if.if_mtu = ifr->ifr_mtu;
+ if (sc->sc_setmtu)
+ (*sc->sc_setmtu)(sc);
+ }
+ break;
+
+ case SIOCGIFMTU:
+ ifr->ifr_mtu = sc->sc_if.if_mtu;
+ break;
+
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ if (ifr == 0) {
+ error = EAFNOSUPPORT;
+ break;
+ }
+ switch(ifr->ifr_addr.sa_family) {
+#ifdef INET
+ case AF_INET:
+ break;
+#endif
+ default:
+ error = EAFNOSUPPORT;
+ break;
+ }
+ break;
+
+ case SIOCGPPPSTATS:
+ psp = &((struct ifpppstatsreq *) data)->stats;
+ bzero(psp, sizeof(*psp));
+ psp->p = sc->sc_stats;
+#if defined(VJC) && !defined(SL_NO_STATS)
+ if (sc->sc_comp) {
+ psp->vj.vjs_packets = sc->sc_comp->sls_packets;
+ psp->vj.vjs_compressed = sc->sc_comp->sls_compressed;
+ psp->vj.vjs_searches = sc->sc_comp->sls_searches;
+ psp->vj.vjs_misses = sc->sc_comp->sls_misses;
+ psp->vj.vjs_uncompressedin = sc->sc_comp->sls_uncompressedin;
+ psp->vj.vjs_compressedin = sc->sc_comp->sls_compressedin;
+ psp->vj.vjs_errorin = sc->sc_comp->sls_errorin;
+ psp->vj.vjs_tossed = sc->sc_comp->sls_tossed;
+ }
+#endif /* VJC */
+ break;
+
+#ifdef PPP_COMPRESS
+ case SIOCGPPPCSTATS:
+ pcp = &((struct ifpppcstatsreq *) data)->stats;
+ bzero(pcp, sizeof(*pcp));
+ if (sc->sc_xc_state != NULL)
+ (*sc->sc_xcomp->comp_stat)(sc->sc_xc_state, &pcp->c);
+ if (sc->sc_rc_state != NULL)
+ (*sc->sc_rcomp->decomp_stat)(sc->sc_rc_state, &pcp->d);
+ break;
+#endif /* PPP_COMPRESS */
+
+ default:
+ error = ENOTTY;
+ }
+ splx(s);
+ return (error);
+}
+
+/*
+ * Queue a packet. Start transmission if not active.
+ * Packet is placed in Information field of PPP frame.
+ * Called at splnet as the if->if_output handler.
+ * Called at splnet from pppwrite().
+ */
+int
+pppoutput(ifp, m0, dst, rtp)
+ struct ifnet *ifp;
+ struct mbuf *m0;
+ struct sockaddr *dst;
+ struct rtentry *rtp;
+{
+ register struct ppp_softc *sc = &ppp_softc[ifp->if_unit];
+ int protocol, address, control;
+ u_char *cp;
+ int s, error;
+ struct ip *ip;
+ struct ifqueue *ifq;
+ enum NPmode mode;
+ int len;
+ struct mbuf *m;
+
+ if (sc->sc_devp == NULL || (ifp->if_flags & IFF_RUNNING) == 0
+ || ((ifp->if_flags & IFF_UP) == 0 && dst->sa_family != AF_UNSPEC)) {
+ error = ENETDOWN; /* sort of */
+ goto bad;
+ }
+
+ /*
+ * Compute PPP header.
+ */
+ m0->m_flags &= ~M_HIGHPRI;
+ switch (dst->sa_family) {
+#ifdef INET
+ case AF_INET:
+ address = PPP_ALLSTATIONS;
+ control = PPP_UI;
+ protocol = PPP_IP;
+ mode = sc->sc_npmode[NP_IP];
+
+ /*
+ * If this packet has the "low delay" bit set in the IP header,
+ * put it on the fastq instead.
+ */
+ ip = mtod(m0, struct ip *);
+ if (ip->ip_tos & IPTOS_LOWDELAY)
+ m0->m_flags |= M_HIGHPRI;
+ break;
+#endif
+#ifdef IPX
+ case AF_IPX:
+ /*
+ * This is pretty bogus.. We dont have an ipxcp module in pppd
+ * yet to configure the link parameters. Sigh. I guess a
+ * manual ifconfig would do.... -Peter
+ */
+ address = PPP_ALLSTATIONS;
+ control = PPP_UI;
+ protocol = PPP_IPX;
+ mode = NPMODE_PASS;
+ break;
+#endif
+ case AF_UNSPEC:
+ address = PPP_ADDRESS(dst->sa_data);
+ control = PPP_CONTROL(dst->sa_data);
+ protocol = PPP_PROTOCOL(dst->sa_data);
+ mode = NPMODE_PASS;
+ break;
+ default:
+ printf("ppp%d: af%d not supported\n", ifp->if_unit, dst->sa_family);
+ error = EAFNOSUPPORT;
+ goto bad;
+ }
+
+ /*
+ * Drop this packet, or return an error, if necessary.
+ */
+ if (mode == NPMODE_ERROR) {
+ error = ENETDOWN;
+ goto bad;
+ }
+ if (mode == NPMODE_DROP) {
+ error = 0;
+ goto bad;
+ }
+
+ /*
+ * Add PPP header. If no space in first mbuf, allocate another.
+ * (This assumes M_LEADINGSPACE is always 0 for a cluster mbuf.)
+ */
+ if (M_LEADINGSPACE(m0) < PPP_HDRLEN) {
+ m0 = m_prepend(m0, PPP_HDRLEN, M_DONTWAIT);
+ if (m0 == 0) {
+ error = ENOBUFS;
+ goto bad;
+ }
+ m0->m_len = 0;
+ } else
+ m0->m_data -= PPP_HDRLEN;
+
+ cp = mtod(m0, u_char *);
+ *cp++ = address;
+ *cp++ = control;
+ *cp++ = protocol >> 8;
+ *cp++ = protocol & 0xff;
+ m0->m_len += PPP_HDRLEN;
+
+ len = 0;
+ for (m = m0; m != 0; m = m->m_next)
+ len += m->m_len;
+
+ if (sc->sc_flags & SC_LOG_OUTPKT) {
+ printf("ppp%d output: ", ifp->if_unit);
+ pppdumpm(m0);
+ }
+
+ if ((protocol & 0x8000) == 0) {
+#ifdef PPP_FILTER
+ /*
+ * Apply the pass and active filters to the packet,
+ * but only if it is a data packet.
+ */
+ *mtod(m0, u_char *) = 1; /* indicates outbound */
+ if (sc->sc_pass_filt.bf_insns != 0
+ && bpf_filter(sc->sc_pass_filt.bf_insns, (u_char *) m0,
+ len, 0) == 0) {
+ error = 0; /* drop this packet */
+ goto bad;
+ }
+
+ /*
+ * Update the time we sent the most recent packet.
+ */
+ if (sc->sc_active_filt.bf_insns == 0
+ || bpf_filter(sc->sc_active_filt.bf_insns, (u_char *) m0, len, 0))
+ sc->sc_last_sent = time_second;
+
+ *mtod(m0, u_char *) = address;
+#else
+ /*
+ * Update the time we sent the most recent data packet.
+ */
+ sc->sc_last_sent = time_second;
+#endif /* PPP_FILTER */
+ }
+
+ /*
+ * See if bpf wants to look at the packet.
+ */
+ if (ifp->if_bpf)
+ bpf_mtap(ifp, m0);
+
+ /*
+ * Put the packet on the appropriate queue.
+ */
+ s = splsoftnet(); /* redundant */
+ if (mode == NPMODE_QUEUE) {
+ /* XXX we should limit the number of packets on this queue */
+ *sc->sc_npqtail = m0;
+ m0->m_nextpkt = NULL;
+ sc->sc_npqtail = &m0->m_nextpkt;
+ } else {
+ /* fastq and if_snd are emptied at spl[soft]net now */
+ ifq = (m0->m_flags & M_HIGHPRI)? &sc->sc_fastq: &ifp->if_snd;
+ if (IF_QFULL(ifq) && dst->sa_family != AF_UNSPEC) {
+ IF_DROP(ifq);
+ splx(s);
+ sc->sc_if.if_oerrors++;
+ sc->sc_stats.ppp_oerrors++;
+ error = ENOBUFS;
+ goto bad;
+ }
+ IF_ENQUEUE(ifq, m0);
+ (*sc->sc_start)(sc);
+ }
+ getmicrotime(&ifp->if_lastchange);
+ ifp->if_opackets++;
+ ifp->if_obytes += len;
+
+ splx(s);
+ return (0);
+
+bad:
+ m_freem(m0);
+ return (error);
+}
+
+/*
+ * After a change in the NPmode for some NP, move packets from the
+ * npqueue to the send queue or the fast queue as appropriate.
+ * Should be called at spl[soft]net.
+ */
+static void
+ppp_requeue(sc)
+ struct ppp_softc *sc;
+{
+ struct mbuf *m, **mpp;
+ struct ifqueue *ifq;
+ enum NPmode mode;
+
+ for (mpp = &sc->sc_npqueue; (m = *mpp) != NULL; ) {
+ switch (PPP_PROTOCOL(mtod(m, u_char *))) {
+ case PPP_IP:
+ mode = sc->sc_npmode[NP_IP];
+ break;
+ default:
+ mode = NPMODE_PASS;
+ }
+
+ switch (mode) {
+ case NPMODE_PASS:
+ /*
+ * This packet can now go on one of the queues to be sent.
+ */
+ *mpp = m->m_nextpkt;
+ m->m_nextpkt = NULL;
+ ifq = (m->m_flags & M_HIGHPRI)? &sc->sc_fastq: &sc->sc_if.if_snd;
+ if (IF_QFULL(ifq)) {
+ IF_DROP(ifq);
+ sc->sc_if.if_oerrors++;
+ sc->sc_stats.ppp_oerrors++;
+ } else
+ IF_ENQUEUE(ifq, m);
+ break;
+
+ case NPMODE_DROP:
+ case NPMODE_ERROR:
+ *mpp = m->m_nextpkt;
+ m_freem(m);
+ break;
+
+ case NPMODE_QUEUE:
+ mpp = &m->m_nextpkt;
+ break;
+ }
+ }
+ sc->sc_npqtail = mpp;
+}
+
+/*
+ * Transmitter has finished outputting some stuff;
+ * remember to call sc->sc_start later at splsoftnet.
+ */
+void
+ppp_restart(sc)
+ struct ppp_softc *sc;
+{
+ int s = splimp();
+
+ sc->sc_flags &= ~SC_TBUSY;
+ schednetisr(NETISR_PPP);
+ splx(s);
+}
+
+
+/*
+ * Get a packet to send. This procedure is intended to be called at
+ * splsoftnet, since it may involve time-consuming operations such as
+ * applying VJ compression, packet compression, address/control and/or
+ * protocol field compression to the packet.
+ */
+struct mbuf *
+ppp_dequeue(sc)
+ struct ppp_softc *sc;
+{
+ struct mbuf *m, *mp;
+ u_char *cp;
+ int address, control, protocol;
+
+ /*
+ * Grab a packet to send: first try the fast queue, then the
+ * normal queue.
+ */
+ IF_DEQUEUE(&sc->sc_fastq, m);
+ if (m == NULL)
+ IF_DEQUEUE(&sc->sc_if.if_snd, m);
+ if (m == NULL)
+ return NULL;
+
+ ++sc->sc_stats.ppp_opackets;
+
+ /*
+ * Extract the ppp header of the new packet.
+ * The ppp header will be in one mbuf.
+ */
+ cp = mtod(m, u_char *);
+ address = PPP_ADDRESS(cp);
+ control = PPP_CONTROL(cp);
+ protocol = PPP_PROTOCOL(cp);
+
+ switch (protocol) {
+ case PPP_IP:
+#ifdef VJC
+ /*
+ * If the packet is a TCP/IP packet, see if we can compress it.
+ */
+ if ((sc->sc_flags & SC_COMP_TCP) && sc->sc_comp != NULL) {
+ struct ip *ip;
+ int type;
+
+ mp = m;
+ ip = (struct ip *) (cp + PPP_HDRLEN);
+ if (mp->m_len <= PPP_HDRLEN) {
+ mp = mp->m_next;
+ if (mp == NULL)
+ break;
+ ip = mtod(mp, struct ip *);
+ }
+ /* this code assumes the IP/TCP header is in one non-shared mbuf */
+ if (ip->ip_p == IPPROTO_TCP) {
+ type = sl_compress_tcp(mp, ip, sc->sc_comp,
+ !(sc->sc_flags & SC_NO_TCP_CCID));
+ switch (type) {
+ case TYPE_UNCOMPRESSED_TCP:
+ protocol = PPP_VJC_UNCOMP;
+ break;
+ case TYPE_COMPRESSED_TCP:
+ protocol = PPP_VJC_COMP;
+ cp = mtod(m, u_char *);
+ cp[0] = address; /* header has moved */
+ cp[1] = control;
+ cp[2] = 0;
+ break;
+ }
+ cp[3] = protocol; /* update protocol in PPP header */
+ }
+ }
+#endif /* VJC */
+ break;
+
+#ifdef PPP_COMPRESS
+ case PPP_CCP:
+ ppp_ccp(sc, m, 0);
+ break;
+#endif /* PPP_COMPRESS */
+ }
+
+#ifdef PPP_COMPRESS
+ if (protocol != PPP_LCP && protocol != PPP_CCP
+ && sc->sc_xc_state && (sc->sc_flags & SC_COMP_RUN)) {
+ struct mbuf *mcomp = NULL;
+ int slen, clen;
+
+ slen = 0;
+ for (mp = m; mp != NULL; mp = mp->m_next)
+ slen += mp->m_len;
+ clen = (*sc->sc_xcomp->compress)
+ (sc->sc_xc_state, &mcomp, m, slen, sc->sc_if.if_mtu + PPP_HDRLEN);
+ if (mcomp != NULL) {
+ if (sc->sc_flags & SC_CCP_UP) {
+ /* Send the compressed packet instead of the original. */
+ m_freem(m);
+ m = mcomp;
+ cp = mtod(m, u_char *);
+ protocol = cp[3];
+ } else {
+ /* Can't transmit compressed packets until CCP is up. */
+ m_freem(mcomp);
+ }
+ }
+ }
+#endif /* PPP_COMPRESS */
+
+ /*
+ * Compress the address/control and protocol, if possible.
+ */
+ if (sc->sc_flags & SC_COMP_AC && address == PPP_ALLSTATIONS &&
+ control == PPP_UI && protocol != PPP_ALLSTATIONS &&
+ protocol != PPP_LCP) {
+ /* can compress address/control */
+ m->m_data += 2;
+ m->m_len -= 2;
+ }
+ if (sc->sc_flags & SC_COMP_PROT && protocol < 0xFF) {
+ /* can compress protocol */
+ if (mtod(m, u_char *) == cp) {
+ cp[2] = cp[1]; /* move address/control up */
+ cp[1] = cp[0];
+ }
+ ++m->m_data;
+ --m->m_len;
+ }
+
+ return m;
+}
+
+/*
+ * Software interrupt routine, called at spl[soft]net.
+ */
+static void
+pppintr()
+{
+ struct ppp_softc *sc;
+ int i, s;
+ struct mbuf *m;
+
+ sc = ppp_softc;
+ for (i = 0; i < NPPP; ++i, ++sc) {
+ s = splimp();
+ if (!(sc->sc_flags & SC_TBUSY)
+ && (sc->sc_if.if_snd.ifq_head || sc->sc_fastq.ifq_head)) {
+ sc->sc_flags |= SC_TBUSY;
+ splx(s);
+ (*sc->sc_start)(sc);
+ } else
+ splx(s);
+ for (;;) {
+ s = splimp();
+ IF_DEQUEUE(&sc->sc_rawq, m);
+ splx(s);
+ if (m == NULL)
+ break;
+ ppp_inproc(sc, m);
+ }
+ }
+}
+
+#ifdef PPP_COMPRESS
+/*
+ * Handle a CCP packet. `rcvd' is 1 if the packet was received,
+ * 0 if it is about to be transmitted.
+ */
+static void
+ppp_ccp(sc, m, rcvd)
+ struct ppp_softc *sc;
+ struct mbuf *m;
+ int rcvd;
+{
+ u_char *dp, *ep;
+ struct mbuf *mp;
+ int slen, s;
+
+ /*
+ * Get a pointer to the data after the PPP header.
+ */
+ if (m->m_len <= PPP_HDRLEN) {
+ mp = m->m_next;
+ if (mp == NULL)
+ return;
+ dp = (mp != NULL)? mtod(mp, u_char *): NULL;
+ } else {
+ mp = m;
+ dp = mtod(mp, u_char *) + PPP_HDRLEN;
+ }
+
+ ep = mtod(mp, u_char *) + mp->m_len;
+ if (dp + CCP_HDRLEN > ep)
+ return;
+ slen = CCP_LENGTH(dp);
+ if (dp + slen > ep) {
+ if (sc->sc_flags & SC_DEBUG)
+ printf("if_ppp/ccp: not enough data in mbuf (%p+%x > %p+%x)\n",
+ dp, slen, mtod(mp, u_char *), mp->m_len);
+ return;
+ }
+
+ switch (CCP_CODE(dp)) {
+ case CCP_CONFREQ:
+ case CCP_TERMREQ:
+ case CCP_TERMACK:
+ /* CCP must be going down - disable compression */
+ if (sc->sc_flags & SC_CCP_UP) {
+ s = splimp();
+ sc->sc_flags &= ~(SC_CCP_UP | SC_COMP_RUN | SC_DECOMP_RUN);
+ splx(s);
+ }
+ break;
+
+ case CCP_CONFACK:
+ if (sc->sc_flags & SC_CCP_OPEN && !(sc->sc_flags & SC_CCP_UP)
+ && slen >= CCP_HDRLEN + CCP_OPT_MINLEN
+ && slen >= CCP_OPT_LENGTH(dp + CCP_HDRLEN) + CCP_HDRLEN) {
+ if (!rcvd) {
+ /* we're agreeing to send compressed packets. */
+ if (sc->sc_xc_state != NULL
+ && (*sc->sc_xcomp->comp_init)
+ (sc->sc_xc_state, dp + CCP_HDRLEN, slen - CCP_HDRLEN,
+ sc->sc_if.if_unit, 0, sc->sc_flags & SC_DEBUG)) {
+ s = splimp();
+ sc->sc_flags |= SC_COMP_RUN;
+ splx(s);
+ }
+ } else {
+ /* peer is agreeing to send compressed packets. */
+ if (sc->sc_rc_state != NULL
+ && (*sc->sc_rcomp->decomp_init)
+ (sc->sc_rc_state, dp + CCP_HDRLEN, slen - CCP_HDRLEN,
+ sc->sc_if.if_unit, 0, sc->sc_mru,
+ sc->sc_flags & SC_DEBUG)) {
+ s = splimp();
+ sc->sc_flags |= SC_DECOMP_RUN;
+ sc->sc_flags &= ~(SC_DC_ERROR | SC_DC_FERROR);
+ splx(s);
+ }
+ }
+ }
+ break;
+
+ case CCP_RESETACK:
+ if (sc->sc_flags & SC_CCP_UP) {
+ if (!rcvd) {
+ if (sc->sc_xc_state && (sc->sc_flags & SC_COMP_RUN))
+ (*sc->sc_xcomp->comp_reset)(sc->sc_xc_state);
+ } else {
+ if (sc->sc_rc_state && (sc->sc_flags & SC_DECOMP_RUN)) {
+ (*sc->sc_rcomp->decomp_reset)(sc->sc_rc_state);
+ s = splimp();
+ sc->sc_flags &= ~SC_DC_ERROR;
+ splx(s);
+ }
+ }
+ }
+ break;
+ }
+}
+
+/*
+ * CCP is down; free (de)compressor state if necessary.
+ */
+static void
+ppp_ccp_closed(sc)
+ struct ppp_softc *sc;
+{
+ if (sc->sc_xc_state) {
+ (*sc->sc_xcomp->comp_free)(sc->sc_xc_state);
+ sc->sc_xc_state = NULL;
+ }
+ if (sc->sc_rc_state) {
+ (*sc->sc_rcomp->decomp_free)(sc->sc_rc_state);
+ sc->sc_rc_state = NULL;
+ }
+}
+#endif /* PPP_COMPRESS */
+
+/*
+ * PPP packet input routine.
+ * The caller has checked and removed the FCS and has inserted
+ * the address/control bytes and the protocol high byte if they
+ * were omitted.
+ */
+void
+ppppktin(sc, m, lost)
+ struct ppp_softc *sc;
+ struct mbuf *m;
+ int lost;
+{
+ int s = splimp();
+
+ if (lost)
+ m->m_flags |= M_ERRMARK;
+ IF_ENQUEUE(&sc->sc_rawq, m);
+ schednetisr(NETISR_PPP);
+ splx(s);
+}
+
+/*
+ * Process a received PPP packet, doing decompression as necessary.
+ * Should be called at splsoftnet.
+ */
+#define COMPTYPE(proto) ((proto) == PPP_VJC_COMP? TYPE_COMPRESSED_TCP: \
+ TYPE_UNCOMPRESSED_TCP)
+
+static void
+ppp_inproc(sc, m)
+ struct ppp_softc *sc;
+ struct mbuf *m;
+{
+ struct ifnet *ifp = &sc->sc_if;
+ struct ifqueue *inq;
+ int s, ilen = 0, xlen, proto, rv;
+ u_char *cp, adrs, ctrl;
+ struct mbuf *mp, *dmp = NULL;
+ u_char *iphdr;
+ u_int hlen;
+
+ sc->sc_stats.ppp_ipackets++;
+
+ if (sc->sc_flags & SC_LOG_INPKT) {
+ ilen = 0;
+ for (mp = m; mp != NULL; mp = mp->m_next)
+ ilen += mp->m_len;
+ printf("ppp%d: got %d bytes\n", ifp->if_unit, ilen);
+ pppdumpm(m);
+ }
+
+ cp = mtod(m, u_char *);
+ adrs = PPP_ADDRESS(cp);
+ ctrl = PPP_CONTROL(cp);
+ proto = PPP_PROTOCOL(cp);
+
+ if (m->m_flags & M_ERRMARK) {
+ m->m_flags &= ~M_ERRMARK;
+ s = splimp();
+ sc->sc_flags |= SC_VJ_RESET;
+ splx(s);
+ }
+
+#ifdef PPP_COMPRESS
+ /*
+ * Decompress this packet if necessary, update the receiver's
+ * dictionary, or take appropriate action on a CCP packet.
+ */
+ if (proto == PPP_COMP && sc->sc_rc_state && (sc->sc_flags & SC_DECOMP_RUN)
+ && !(sc->sc_flags & SC_DC_ERROR) && !(sc->sc_flags & SC_DC_FERROR)) {
+ /* decompress this packet */
+ rv = (*sc->sc_rcomp->decompress)(sc->sc_rc_state, m, &dmp);
+ if (rv == DECOMP_OK) {
+ m_freem(m);
+ if (dmp == NULL) {
+ /* no error, but no decompressed packet produced */
+ return;
+ }
+ m = dmp;
+ cp = mtod(m, u_char *);
+ proto = PPP_PROTOCOL(cp);
+
+ } else {
+ /*
+ * An error has occurred in decompression.
+ * Pass the compressed packet up to pppd, which may take
+ * CCP down or issue a Reset-Req.
+ */
+ if (sc->sc_flags & SC_DEBUG)
+ printf("ppp%d: decompress failed %d\n", ifp->if_unit, rv);
+ s = splimp();
+ sc->sc_flags |= SC_VJ_RESET;
+ if (rv == DECOMP_ERROR)
+ sc->sc_flags |= SC_DC_ERROR;
+ else
+ sc->sc_flags |= SC_DC_FERROR;
+ splx(s);
+ }
+
+ } else {
+ if (sc->sc_rc_state && (sc->sc_flags & SC_DECOMP_RUN)) {
+ (*sc->sc_rcomp->incomp)(sc->sc_rc_state, m);
+ }
+ if (proto == PPP_CCP) {
+ ppp_ccp(sc, m, 1);
+ }
+ }
+#endif
+
+ ilen = 0;
+ for (mp = m; mp != NULL; mp = mp->m_next)
+ ilen += mp->m_len;
+
+#ifdef VJC
+ if (sc->sc_flags & SC_VJ_RESET) {
+ /*
+ * If we've missed a packet, we must toss subsequent compressed
+ * packets which don't have an explicit connection ID.
+ */
+ if (sc->sc_comp)
+ sl_uncompress_tcp(NULL, 0, TYPE_ERROR, sc->sc_comp);
+ s = splimp();
+ sc->sc_flags &= ~SC_VJ_RESET;
+ splx(s);
+ }
+
+ /*
+ * See if we have a VJ-compressed packet to uncompress.
+ */
+ if (proto == PPP_VJC_COMP) {
+ if ((sc->sc_flags & SC_REJ_COMP_TCP) || sc->sc_comp == 0)
+ goto bad;
+
+ xlen = sl_uncompress_tcp_core(cp + PPP_HDRLEN, m->m_len - PPP_HDRLEN,
+ ilen - PPP_HDRLEN, TYPE_COMPRESSED_TCP,
+ sc->sc_comp, &iphdr, &hlen);
+
+ if (xlen <= 0) {
+ if (sc->sc_flags & SC_DEBUG)
+ printf("ppp%d: VJ uncompress failed on type comp\n",
+ ifp->if_unit);
+ goto bad;
+ }
+
+ /* Copy the PPP and IP headers into a new mbuf. */
+ MGETHDR(mp, M_DONTWAIT, MT_DATA);
+ if (mp == NULL)
+ goto bad;
+ mp->m_len = 0;
+ mp->m_next = NULL;
+ if (hlen + PPP_HDRLEN > MHLEN) {
+ MCLGET(mp, M_DONTWAIT);
+ if (M_TRAILINGSPACE(mp) < hlen + PPP_HDRLEN) {
+ m_freem(mp);
+ goto bad; /* lose if big headers and no clusters */
+ }
+ }
+ cp = mtod(mp, u_char *);
+ cp[0] = adrs;
+ cp[1] = ctrl;
+ cp[2] = 0;
+ cp[3] = PPP_IP;
+ proto = PPP_IP;
+ bcopy(iphdr, cp + PPP_HDRLEN, hlen);
+ mp->m_len = hlen + PPP_HDRLEN;
+
+ /*
+ * Trim the PPP and VJ headers off the old mbuf
+ * and stick the new and old mbufs together.
+ */
+ m->m_data += PPP_HDRLEN + xlen;
+ m->m_len -= PPP_HDRLEN + xlen;
+ if (m->m_len <= M_TRAILINGSPACE(mp)) {
+ bcopy(mtod(m, u_char *), mtod(mp, u_char *) + mp->m_len, m->m_len);
+ mp->m_len += m->m_len;
+ MFREE(m, mp->m_next);
+ } else
+ mp->m_next = m;
+ m = mp;
+ ilen += hlen - xlen;
+
+ } else if (proto == PPP_VJC_UNCOMP) {
+ if ((sc->sc_flags & SC_REJ_COMP_TCP) || sc->sc_comp == 0)
+ goto bad;
+
+ xlen = sl_uncompress_tcp_core(cp + PPP_HDRLEN, m->m_len - PPP_HDRLEN,
+ ilen - PPP_HDRLEN, TYPE_UNCOMPRESSED_TCP,
+ sc->sc_comp, &iphdr, &hlen);
+
+ if (xlen < 0) {
+ if (sc->sc_flags & SC_DEBUG)
+ printf("ppp%d: VJ uncompress failed on type uncomp\n",
+ ifp->if_unit);
+ goto bad;
+ }
+
+ proto = PPP_IP;
+ cp[3] = PPP_IP;
+ }
+#endif /* VJC */
+
+ /*
+ * If the packet will fit in a header mbuf, don't waste a
+ * whole cluster on it.
+ */
+ if (ilen <= MHLEN && M_IS_CLUSTER(m)) {
+ MGETHDR(mp, M_DONTWAIT, MT_DATA);
+ if (mp != NULL) {
+ m_copydata(m, 0, ilen, mtod(mp, caddr_t));
+ m_freem(m);
+ m = mp;
+ m->m_len = ilen;
+ }
+ }
+ m->m_pkthdr.len = ilen;
+ m->m_pkthdr.rcvif = ifp;
+
+ if ((proto & 0x8000) == 0) {
+#ifdef PPP_FILTER
+ /*
+ * See whether we want to pass this packet, and
+ * if it counts as link activity.
+ */
+ adrs = *mtod(m, u_char *); /* save address field */
+ *mtod(m, u_char *) = 0; /* indicate inbound */
+ if (sc->sc_pass_filt.bf_insns != 0
+ && bpf_filter(sc->sc_pass_filt.bf_insns, (u_char *) m,
+ ilen, 0) == 0) {
+ /* drop this packet */
+ m_freem(m);
+ return;
+ }
+ if (sc->sc_active_filt.bf_insns == 0
+ || bpf_filter(sc->sc_active_filt.bf_insns, (u_char *) m, ilen, 0))
+ sc->sc_last_recv = time_second;
+
+ *mtod(m, u_char *) = adrs;
+#else
+ /*
+ * Record the time that we received this packet.
+ */
+ sc->sc_last_recv = time_second;
+#endif /* PPP_FILTER */
+ }
+
+ /* See if bpf wants to look at the packet. */
+ if (sc->sc_if.if_bpf)
+ bpf_mtap(&sc->sc_if, m);
+
+ rv = 0;
+ switch (proto) {
+#ifdef INET
+ case PPP_IP:
+ /*
+ * IP packet - take off the ppp header and pass it up to IP.
+ */
+ if ((ifp->if_flags & IFF_UP) == 0
+ || sc->sc_npmode[NP_IP] != NPMODE_PASS) {
+ /* interface is down - drop the packet. */
+ m_freem(m);
+ return;
+ }
+ m->m_pkthdr.len -= PPP_HDRLEN;
+ m->m_data += PPP_HDRLEN;
+ m->m_len -= PPP_HDRLEN;
+ if (ipflow_fastforward(m)) {
+ sc->sc_last_recv = time_second;
+ return;
+ }
+ schednetisr(NETISR_IP);
+ inq = &ipintrq;
+ sc->sc_last_recv = time_second; /* update time of last pkt rcvd */
+ break;
+#endif
+#ifdef IPX
+ case PPP_IPX:
+ /*
+ * IPX packet - take off the ppp header and pass it up to IPX.
+ */
+ if ((sc->sc_if.if_flags & IFF_UP) == 0
+ /* XXX: || sc->sc_npmode[NP_IPX] != NPMODE_PASS*/) {
+ /* interface is down - drop the packet. */
+ m_freem(m);
+ return;
+ }
+ m->m_pkthdr.len -= PPP_HDRLEN;
+ m->m_data += PPP_HDRLEN;
+ m->m_len -= PPP_HDRLEN;
+ schednetisr(NETISR_IPX);
+ inq = &ipxintrq;
+ sc->sc_last_recv = time_second; /* update time of last pkt rcvd */
+ break;
+#endif
+
+ default:
+ /*
+ * Some other protocol - place on input queue for read().
+ */
+ inq = &sc->sc_inq;
+ rv = 1;
+ break;
+ }
+
+ /*
+ * Put the packet on the appropriate input queue.
+ */
+ s = splimp();
+ if (IF_QFULL(inq)) {
+ IF_DROP(inq);
+ splx(s);
+ if (sc->sc_flags & SC_DEBUG)
+ printf("ppp%d: input queue full\n", ifp->if_unit);
+ ifp->if_iqdrops++;
+ goto bad;
+ }
+ IF_ENQUEUE(inq, m);
+ splx(s);
+ ifp->if_ipackets++;
+ ifp->if_ibytes += ilen;
+ getmicrotime(&ifp->if_lastchange);
+
+ if (rv)
+ (*sc->sc_ctlp)(sc);
+
+ return;
+
+ bad:
+ m_freem(m);
+ sc->sc_if.if_ierrors++;
+ sc->sc_stats.ppp_ierrors++;
+}
+
+#define MAX_DUMP_BYTES 128
+
+static void
+pppdumpm(m0)
+ struct mbuf *m0;
+{
+ char buf[3*MAX_DUMP_BYTES+4];
+ char *bp = buf;
+ struct mbuf *m;
+
+ for (m = m0; m; m = m->m_next) {
+ int l = m->m_len;
+ u_char *rptr = (u_char *)m->m_data;
+
+ while (l--) {
+ if (bp > buf + sizeof(buf) - 4)
+ goto done;
+ *bp++ = hex2ascii(*rptr >> 4);
+ *bp++ = hex2ascii(*rptr++ & 0xf);
+ }
+
+ if (m->m_next) {
+ if (bp > buf + sizeof(buf) - 3)
+ goto done;
+ *bp++ = '|';
+ } else
+ *bp++ = ' ';
+ }
+done:
+ if (m)
+ *bp++ = '>';
+ *bp = 0;
+ printf("%s\n", buf);
+}
+
+#endif /* NPPP > 0 */
diff --git a/sys/net/if_ppp.h b/sys/net/if_ppp.h
new file mode 100644
index 0000000..713d39a
--- /dev/null
+++ b/sys/net/if_ppp.h
@@ -0,0 +1,137 @@
+/*
+ * if_ppp.h - Point-to-Point Protocol definitions.
+ *
+ * Copyright (c) 1989 Carnegie Mellon University.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms are permitted
+ * provided that the above copyright notice and this paragraph are
+ * duplicated in all such forms and that any documentation,
+ * advertising materials, and other materials related to such
+ * distribution and use acknowledge that the software was developed
+ * by Carnegie Mellon University. The name of the
+ * University may not be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _IF_PPP_H_
+#define _IF_PPP_H_
+
+/* XXX this used to be self-contained. */
+#include <net/ppp_defs.h>
+#include <net/if.h>
+
+/*
+ * Packet sizes
+ */
+#define PPP_MTU 1500 /* Default MTU (size of Info field) */
+#define PPP_MAXMRU 65000 /* Largest MRU we allow */
+#define PPP_MAXMTU 16384 /* Largest MTU we allow */
+
+/*
+ * Bit definitions for flags.
+ */
+#define SC_COMP_PROT 0x00000001 /* protocol compression (output) */
+#define SC_COMP_AC 0x00000002 /* header compression (output) */
+#define SC_COMP_TCP 0x00000004 /* TCP (VJ) compression (output) */
+#define SC_NO_TCP_CCID 0x00000008 /* disable VJ connection-id comp. */
+#define SC_REJ_COMP_AC 0x00000010 /* reject adrs/ctrl comp. on input */
+#define SC_REJ_COMP_TCP 0x00000020 /* reject TCP (VJ) comp. on input */
+#define SC_CCP_OPEN 0x00000040 /* Look at CCP packets */
+#define SC_CCP_UP 0x00000080 /* May send/recv compressed packets */
+#define SC_DEBUG 0x00010000 /* enable debug messages */
+#define SC_LOG_INPKT 0x00020000 /* log contents of good pkts recvd */
+#define SC_LOG_OUTPKT 0x00040000 /* log contents of pkts sent */
+#define SC_LOG_RAWIN 0x00080000 /* log all chars received */
+#define SC_LOG_FLUSH 0x00100000 /* log all chars flushed */
+#define SC_RCV_B7_0 0x01000000 /* have rcvd char with bit 7 = 0 */
+#define SC_RCV_B7_1 0x02000000 /* have rcvd char with bit 7 = 1 */
+#define SC_RCV_EVNP 0x04000000 /* have rcvd char with even parity */
+#define SC_RCV_ODDP 0x08000000 /* have rcvd char with odd parity */
+#define SC_MASK 0x0fff00ff /* bits that user can change */
+
+/*
+ * State bits in sc_flags, not changeable by user.
+ */
+#define SC_TIMEOUT 0x00000400 /* timeout is currently pending */
+#define SC_VJ_RESET 0x00000800 /* need to reset VJ decomp */
+#define SC_COMP_RUN 0x00001000 /* compressor has been initiated */
+#define SC_DECOMP_RUN 0x00002000 /* decompressor has been initiated */
+#define SC_DC_ERROR 0x00004000 /* non-fatal decomp error detected */
+#define SC_DC_FERROR 0x00008000 /* fatal decomp error detected */
+#define SC_TBUSY 0x10000000 /* xmitter doesn't need a packet yet */
+#define SC_PKTLOST 0x20000000 /* have lost or dropped a packet */
+#define SC_FLUSH 0x40000000 /* flush input until next PPP_FLAG */
+#define SC_ESCAPED 0x80000000 /* saw a PPP_ESCAPE */
+
+/*
+ * Ioctl definitions.
+ */
+
+struct npioctl {
+ int protocol; /* PPP procotol, e.g. PPP_IP */
+ enum NPmode mode;
+};
+
+/* Structure describing a CCP configuration option, for PPPIOCSCOMPRESS */
+struct ppp_option_data {
+ u_char *ptr;
+ u_int length;
+ int transmit;
+};
+
+struct ifpppstatsreq {
+ char ifr_name[IFNAMSIZ];
+ struct ppp_stats stats;
+};
+
+struct ifpppcstatsreq {
+ char ifr_name[IFNAMSIZ];
+ struct ppp_comp_stats stats;
+};
+
+/*
+ * Ioctl definitions.
+ */
+
+#define PPPIOCGFLAGS _IOR('t', 90, int) /* get configuration flags */
+#define PPPIOCSFLAGS _IOW('t', 89, int) /* set configuration flags */
+#define PPPIOCGASYNCMAP _IOR('t', 88, int) /* get async map */
+#define PPPIOCSASYNCMAP _IOW('t', 87, int) /* set async map */
+#define PPPIOCGUNIT _IOR('t', 86, int) /* get ppp unit number */
+#define PPPIOCGRASYNCMAP _IOR('t', 85, int) /* get receive async map */
+#define PPPIOCSRASYNCMAP _IOW('t', 84, int) /* set receive async map */
+#define PPPIOCGMRU _IOR('t', 83, int) /* get max receive unit */
+#define PPPIOCSMRU _IOW('t', 82, int) /* set max receive unit */
+#define PPPIOCSMAXCID _IOW('t', 81, int) /* set VJ max slot ID */
+#define PPPIOCGXASYNCMAP _IOR('t', 80, ext_accm) /* get extended ACCM */
+#define PPPIOCSXASYNCMAP _IOW('t', 79, ext_accm) /* set extended ACCM */
+#define PPPIOCXFERUNIT _IO('t', 78) /* transfer PPP unit */
+#define PPPIOCSCOMPRESS _IOW('t', 77, struct ppp_option_data)
+#define PPPIOCGNPMODE _IOWR('t', 76, struct npioctl) /* get NP mode */
+#define PPPIOCSNPMODE _IOW('t', 75, struct npioctl) /* set NP mode */
+#define PPPIOCGIDLE _IOR('t', 74, struct ppp_idle) /* get idle time */
+#define PPPIOCSPASS _IOW('t', 71, struct bpf_program) /* set pass filter */
+#define PPPIOCSACTIVE _IOW('t', 70, struct bpf_program) /* set active filt */
+
+/* PPPIOC[GS]MTU are alternatives to SIOC[GS]IFMTU, used under Ultrix */
+#define PPPIOCGMTU _IOR('t', 73, int) /* get interface MTU */
+#define PPPIOCSMTU _IOW('t', 72, int) /* set interface MTU */
+
+/*
+ * These two are interface ioctls so that pppstats can do them on
+ * a socket without having to open the serial device.
+ */
+#define SIOCGPPPSTATS _IOWR('i', 123, struct ifpppstatsreq)
+#define SIOCGPPPCSTATS _IOWR('i', 122, struct ifpppcstatsreq)
+
+#if !defined(ifr_mtu)
+#define ifr_mtu ifr_ifru.ifru_metric
+#endif
+
+#endif /* _IF_PPP_H_ */
diff --git a/sys/net/if_pppvar.h b/sys/net/if_pppvar.h
new file mode 100644
index 0000000..e4b4fd7
--- /dev/null
+++ b/sys/net/if_pppvar.h
@@ -0,0 +1,111 @@
+/*
+ * if_pppvar.h - private structures and declarations for PPP.
+ *
+ * Copyright (c) 1994 The Australian National University.
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify, and distribute this software and its
+ * documentation is hereby granted, provided that the above copyright
+ * notice appears in all copies. This software is provided without any
+ * warranty, express or implied. The Australian National University
+ * makes no representations about the suitability of this software for
+ * any purpose.
+ *
+ * IN NO EVENT SHALL THE AUSTRALIAN NATIONAL UNIVERSITY BE LIABLE TO ANY
+ * PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
+ * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF
+ * THE AUSTRALIAN NATIONAL UNIVERSITY HAVE BEEN ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * THE AUSTRALIAN NATIONAL UNIVERSITY SPECIFICALLY DISCLAIMS ANY WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
+ * ON AN "AS IS" BASIS, AND THE AUSTRALIAN NATIONAL UNIVERSITY HAS NO
+ * OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS,
+ * OR MODIFICATIONS.
+ *
+ * Copyright (c) 1989 Carnegie Mellon University.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms are permitted
+ * provided that the above copyright notice and this paragraph are
+ * duplicated in all such forms and that any documentation,
+ * advertising materials, and other materials related to such
+ * distribution and use acknowledge that the software was developed
+ * by Carnegie Mellon University. The name of the
+ * University may not be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Supported network protocols. These values are used for
+ * indexing sc_npmode.
+ */
+#define NP_IP 0 /* Internet Protocol */
+#define NUM_NP 1 /* Number of NPs. */
+
+/*
+ * Structure describing each ppp unit.
+ */
+struct ppp_softc {
+ struct ifnet sc_if; /* network-visible interface */
+/*hi*/ u_int sc_flags; /* control/status bits; see if_ppp.h */
+ struct callout_handle sc_ch; /* Used for scheduling timeouts */
+ void *sc_devp; /* pointer to device-dep structure */
+ void (*sc_start) __P((struct ppp_softc *)); /* start output proc */
+ void (*sc_ctlp) __P((struct ppp_softc *)); /* rcvd control pkt */
+ void (*sc_relinq) __P((struct ppp_softc *)); /* relinquish ifunit */
+ void (*sc_setmtu) __P((struct ppp_softc *)); /* set mtu */
+ short sc_mru; /* max receive unit */
+ pid_t sc_xfer; /* used in transferring unit */
+/*hi*/ struct ifqueue sc_rawq; /* received packets */
+/*net*/ struct ifqueue sc_inq; /* queue of input packets for daemon */
+/*net*/ struct ifqueue sc_fastq; /* interactive output packet q */
+ struct mbuf *sc_npqueue; /* output packets not to be sent yet */
+ struct mbuf **sc_npqtail; /* ptr to last next ptr in npqueue */
+ struct pppstat sc_stats; /* count of bytes/pkts sent/rcvd */
+ enum NPmode sc_npmode[NUM_NP]; /* what to do with each NP */
+ struct compressor *sc_xcomp; /* transmit compressor */
+ void *sc_xc_state; /* transmit compressor state */
+ struct compressor *sc_rcomp; /* receive decompressor */
+ void *sc_rc_state; /* receive decompressor state */
+ time_t sc_last_sent; /* time (secs) last NP pkt sent */
+ time_t sc_last_recv; /* time (secs) last NP pkt rcvd */
+#ifdef PPP_FILTER
+ struct bpf_program sc_pass_filt; /* filter for packets to pass */
+ struct bpf_program sc_active_filt; /* filter for "non-idle" packets */
+#endif /* PPP_FILTER */
+#ifdef VJC
+ struct slcompress *sc_comp; /* vjc control buffer */
+#endif
+
+ /* Device-dependent part for async lines. */
+ ext_accm sc_asyncmap; /* async control character map */
+ u_long sc_rasyncmap; /* receive async control char map */
+ struct mbuf *sc_outm; /* mbuf chain currently being output */
+ struct mbuf *sc_m; /* pointer to input mbuf chain */
+ struct mbuf *sc_mc; /* pointer to current input mbuf */
+ char *sc_mp; /* ptr to next char in input mbuf */
+ short sc_ilen; /* length of input packet so far */
+ u_short sc_fcs; /* FCS so far (input) */
+ u_short sc_outfcs; /* FCS so far for output packet */
+ u_char sc_rawin[16]; /* chars as received */
+ int sc_rawin_count; /* # in sc_rawin */
+};
+
+extern struct ppp_softc ppp_softc[NPPP];
+
+struct ppp_softc *pppalloc __P((pid_t pid));
+void pppdealloc __P((struct ppp_softc *sc));
+int pppioctl __P((struct ppp_softc *sc, u_long cmd, caddr_t data,
+ int flag, struct proc *p));
+int pppoutput __P((struct ifnet *ifp, struct mbuf *m0,
+ struct sockaddr *dst, struct rtentry *rtp));
+void ppp_restart __P((struct ppp_softc *sc));
+void ppppktin __P((struct ppp_softc *sc, struct mbuf *m, int lost));
+struct mbuf *ppp_dequeue __P((struct ppp_softc *sc));
diff --git a/sys/net/if_sl.c b/sys/net/if_sl.c
new file mode 100644
index 0000000..456f827
--- /dev/null
+++ b/sys/net/if_sl.c
@@ -0,0 +1,1040 @@
+/*
+ * Copyright (c) 1987, 1989, 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)if_sl.c 8.6 (Berkeley) 2/1/94
+ * $FreeBSD$
+ */
+
+/*
+ * Serial Line interface
+ *
+ * Rick Adams
+ * Center for Seismic Studies
+ * 1300 N 17th Street, Suite 1450
+ * Arlington, Virginia 22209
+ * (703)276-7900
+ * rick@seismo.ARPA
+ * seismo!rick
+ *
+ * Pounded on heavily by Chris Torek (chris@mimsy.umd.edu, umcp-cs!chris).
+ * N.B.: this belongs in netinet, not net, the way it stands now.
+ * Should have a link-layer type designation, but wouldn't be
+ * backwards-compatible.
+ *
+ * Converted to 4.3BSD Beta by Chris Torek.
+ * Other changes made at Berkeley, based in part on code by Kirk Smith.
+ * W. Jolitz added slip abort.
+ *
+ * Hacked almost beyond recognition by Van Jacobson (van@helios.ee.lbl.gov).
+ * Added priority queuing for "interactive" traffic; hooks for TCP
+ * header compression; ICMP filtering (at 2400 baud, some cretin
+ * pinging you can use up all your bandwidth). Made low clist behavior
+ * more robust and slightly less likely to hang serial line.
+ * Sped up a bunch of things.
+ *
+ * Note that splimp() is used throughout to block both (tty) input
+ * interrupts and network activity; thus, splimp must be >= spltty.
+ */
+
+#include "sl.h"
+#if NSL > 0
+
+#include "opt_inet.h"
+#if !defined(ACTUALLY_LKM_NOT_KERNEL) && !defined(KLD_MODULE)
+#include "opt_slip.h"
+#endif
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/dkstat.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/fcntl.h>
+#include <sys/signalvar.h>
+#include <sys/tty.h>
+#include <sys/clist.h>
+#include <sys/kernel.h>
+#include <sys/conf.h>
+
+#include <net/if.h>
+#include <net/if_types.h>
+#include <net/netisr.h>
+
+#if INET
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/in_var.h>
+#include <netinet/ip.h>
+#else
+#error "Huh? Slip without inet?"
+#endif
+
+#include <net/slcompress.h>
+#include <net/if_slvar.h>
+#include <net/slip.h>
+
+#include <net/bpf.h>
+
+static void slattach __P((void *));
+PSEUDO_SET(slattach, if_sl);
+
+/*
+ * SLRMAX is a hard limit on input packet size. To simplify the code
+ * and improve performance, we require that packets fit in an mbuf
+ * cluster, and if we get a compressed packet, there's enough extra
+ * room to expand the header into a max length tcp/ip header (128
+ * bytes). So, SLRMAX can be at most
+ * MCLBYTES - 128
+ *
+ * SLMTU is the default transmit MTU. The transmit MTU should be kept
+ * small enough so that interactive use doesn't suffer, but large
+ * enough to provide good performance. 552 is a good choice for SLMTU
+ * because it is high enough to not fragment TCP packets being routed
+ * through this host. Packet fragmentation is bad with SLIP because
+ * fragment headers aren't compressed. The previous assumptions about
+ * the best MTU value don't really hold when using modern modems with
+ * BTLZ data compression because the modem buffers play a much larger
+ * role in interactive performance than the MTU. The MTU can be changed
+ * at any time to suit the specific environment with ifconfig(8), and
+ * its maximum value is defined as SLTMAX. SLTMAX must not be so large
+ * that it would overflow the stack if BPF is configured (XXX; if_ppp.c
+ * handles this better).
+ *
+ * SLIP_HIWAT is the amount of data that will be queued 'downstream'
+ * of us (i.e., in clists waiting to be picked up by the tty output
+ * interrupt). If we queue a lot of data downstream, it's immune to
+ * our t.o.s. queuing.
+ * E.g., if SLIP_HIWAT is 1024, the interactive traffic in mixed
+ * telnet/ftp will see a 1 sec wait, independent of the mtu (the
+ * wait is dependent on the ftp window size but that's typically
+ * 1k - 4k). So, we want SLIP_HIWAT just big enough to amortize
+ * the cost (in idle time on the wire) of the tty driver running
+ * off the end of its clists & having to call back slstart for a
+ * new packet. For a tty interface with any buffering at all, this
+ * cost will be zero. Even with a totally brain dead interface (like
+ * the one on a typical workstation), the cost will be <= 1 character
+ * time. So, setting SLIP_HIWAT to ~100 guarantees that we'll lose
+ * at most 1% while maintaining good interactive response.
+ */
+#define BUFOFFSET (128+sizeof(struct ifnet **)+SLIP_HDRLEN)
+#define SLRMAX (MCLBYTES - BUFOFFSET)
+#define SLBUFSIZE (SLRMAX + BUFOFFSET)
+#ifndef SLMTU
+#define SLMTU 552 /* default MTU */
+#endif
+#define SLTMAX 1500 /* maximum MTU */
+#define SLIP_HIWAT roundup(50,CBSIZE)
+#define CLISTRESERVE 1024 /* Can't let clists get too low */
+
+/*
+ * SLIP ABORT ESCAPE MECHANISM:
+ * (inspired by HAYES modem escape arrangement)
+ * 1sec escape 1sec escape 1sec escape { 1sec escape 1sec escape }
+ * within window time signals a "soft" exit from slip mode by remote end
+ * if the IFF_DEBUG flag is on.
+ */
+#define ABT_ESC '\033' /* can't be t_intr - distant host must know it*/
+#define ABT_IDLE 1 /* in seconds - idle before an escape */
+#define ABT_COUNT 3 /* count of escapes for abort */
+#define ABT_WINDOW (ABT_COUNT*2+2) /* in seconds - time to count */
+
+static struct sl_softc sl_softc[NSL];
+
+#define FRAME_END 0xc0 /* Frame End */
+#define FRAME_ESCAPE 0xdb /* Frame Esc */
+#define TRANS_FRAME_END 0xdc /* transposed frame end */
+#define TRANS_FRAME_ESCAPE 0xdd /* transposed frame esc */
+
+static int slinit __P((struct sl_softc *));
+static struct mbuf *sl_btom __P((struct sl_softc *, int));
+static timeout_t sl_keepalive;
+static timeout_t sl_outfill;
+static int slclose __P((struct tty *,int));
+static int slinput __P((int, struct tty *));
+static int slioctl __P((struct ifnet *, u_long, caddr_t));
+static int sltioctl __P((struct tty *, u_long, caddr_t, int, struct proc *));
+static int slopen __P((dev_t, struct tty *));
+static int sloutput __P((struct ifnet *,
+ struct mbuf *, struct sockaddr *, struct rtentry *));
+static int slstart __P((struct tty *));
+
+static struct linesw slipdisc = {
+ slopen, slclose, l_noread, l_nowrite,
+ sltioctl, slinput, slstart, ttymodem,
+ FRAME_END
+};
+
+/*
+ * Called from boot code to establish sl interfaces.
+ */
+static void
+slattach(dummy)
+ void *dummy;
+{
+ register struct sl_softc *sc;
+ register int i = 0;
+
+ linesw[SLIPDISC] = slipdisc;
+
+ for (sc = sl_softc; i < NSL; sc++) {
+ sc->sc_if.if_name = "sl";
+ sc->sc_if.if_unit = i++;
+ sc->sc_if.if_mtu = SLMTU;
+ sc->sc_if.if_flags =
+#ifdef SLIP_IFF_OPTS
+ SLIP_IFF_OPTS;
+#else
+ IFF_POINTOPOINT | SC_AUTOCOMP | IFF_MULTICAST;
+#endif
+ sc->sc_if.if_type = IFT_SLIP;
+ sc->sc_if.if_ioctl = slioctl;
+ sc->sc_if.if_output = sloutput;
+ sc->sc_if.if_snd.ifq_maxlen = 50;
+ sc->sc_fastq.ifq_maxlen = 32;
+ sc->sc_if.if_linkmib = sc;
+ sc->sc_if.if_linkmiblen = sizeof *sc;
+ if_attach(&sc->sc_if);
+ bpfattach(&sc->sc_if, DLT_SLIP, SLIP_HDRLEN);
+ }
+}
+
+static int
+slinit(sc)
+ register struct sl_softc *sc;
+{
+ register caddr_t p;
+
+ if (sc->sc_ep == (u_char *) 0) {
+ MCLALLOC(p, M_WAIT);
+ if (p)
+ sc->sc_ep = (u_char *)p + SLBUFSIZE;
+ else {
+ printf("sl%ld: can't allocate buffer\n",
+ (long)(sc - sl_softc));
+ return (0);
+ }
+ }
+ sc->sc_buf = sc->sc_ep - SLRMAX;
+ sc->sc_mp = sc->sc_buf;
+ sl_compress_init(&sc->sc_comp, -1);
+ return (1);
+}
+
+/*
+ * Line specific open routine.
+ * Attach the given tty to the first available sl unit.
+ */
+/* ARGSUSED */
+static int
+slopen(dev, tp)
+ dev_t dev;
+ register struct tty *tp;
+{
+ struct proc *p = curproc; /* XXX */
+ register struct sl_softc *sc;
+ register int nsl;
+ int s, error;
+
+ error = suser(p);
+ if (error)
+ return (error);
+
+ if (tp->t_line == SLIPDISC)
+ return (0);
+
+ for (nsl = NSL, sc = sl_softc; --nsl >= 0; sc++)
+ if (sc->sc_ttyp == NULL && !(sc->sc_flags & SC_STATIC)) {
+ if (slinit(sc) == 0)
+ return (ENOBUFS);
+ tp->t_sc = (caddr_t)sc;
+ sc->sc_ttyp = tp;
+ sc->sc_if.if_baudrate = tp->t_ospeed;
+ ttyflush(tp, FREAD | FWRITE);
+
+ tp->t_line = SLIPDISC;
+ /*
+ * We don't use t_canq or t_rawq, so reduce their
+ * cblock resources to 0. Reserve enough cblocks
+ * for t_outq to guarantee that we can fit a full
+ * packet if the SLIP_HIWAT check allows slstart()
+ * to loop. Use the same value for the cblock
+ * limit since the reserved blocks should always
+ * be enough. Reserving cblocks probably makes
+ * the CLISTRESERVE check unnecessary and wasteful.
+ */
+ clist_alloc_cblocks(&tp->t_canq, 0, 0);
+ clist_alloc_cblocks(&tp->t_outq,
+ SLIP_HIWAT + 2 * sc->sc_if.if_mtu + 1,
+ SLIP_HIWAT + 2 * sc->sc_if.if_mtu + 1);
+ clist_alloc_cblocks(&tp->t_rawq, 0, 0);
+
+ s = splnet();
+ if_up(&sc->sc_if);
+ splx(s);
+ return (0);
+ }
+ return (ENXIO);
+}
+
+/*
+ * Line specific close routine.
+ * Detach the tty from the sl unit.
+ */
+static int
+slclose(tp,flag)
+ struct tty *tp;
+ int flag;
+{
+ register struct sl_softc *sc;
+ int s;
+
+ ttyflush(tp, FREAD | FWRITE);
+ /*
+ * XXX the placement of the following spl is misleading. tty
+ * interrupts must be blocked across line discipline switches
+ * and throughout closes to avoid races.
+ */
+ s = splimp(); /* actually, max(spltty, splnet) */
+ clist_free_cblocks(&tp->t_outq);
+ tp->t_line = 0;
+ sc = (struct sl_softc *)tp->t_sc;
+ if (sc != NULL) {
+ if (sc->sc_outfill) {
+ sc->sc_outfill = 0;
+ untimeout(sl_outfill, sc, sc->sc_ofhandle);
+ }
+ if (sc->sc_keepalive) {
+ sc->sc_keepalive = 0;
+ untimeout(sl_keepalive, sc, sc->sc_kahandle);
+ }
+ if_down(&sc->sc_if);
+ sc->sc_flags &= SC_STATIC;
+ sc->sc_ttyp = NULL;
+ tp->t_sc = NULL;
+ MCLFREE((caddr_t)(sc->sc_ep - SLBUFSIZE));
+ sc->sc_ep = 0;
+ sc->sc_mp = 0;
+ sc->sc_buf = 0;
+ }
+ splx(s);
+ return 0;
+}
+
+/*
+ * Line specific (tty) ioctl routine.
+ * Provide a way to get the sl unit number.
+ */
+/* ARGSUSED */
+static int
+sltioctl(tp, cmd, data, flag, p)
+ struct tty *tp;
+ u_long cmd;
+ caddr_t data;
+ int flag;
+ struct proc *p;
+{
+ struct sl_softc *sc = (struct sl_softc *)tp->t_sc, *nc, *tmpnc;
+ int s, nsl;
+
+ s = splimp();
+ switch (cmd) {
+ case SLIOCGUNIT:
+ *(int *)data = sc->sc_if.if_unit;
+ break;
+
+ case SLIOCSUNIT:
+ if (sc->sc_if.if_unit != *(u_int *)data) {
+ for (nsl = NSL, nc = sl_softc; --nsl >= 0; nc++) {
+ if ( nc->sc_if.if_unit == *(u_int *)data
+ && nc->sc_ttyp == NULL
+ ) {
+ tmpnc = malloc(sizeof *tmpnc, M_TEMP,
+ M_NOWAIT);
+ if (tmpnc == NULL) {
+ splx(s);
+ return (ENOMEM);
+ }
+ *tmpnc = *nc;
+ *nc = *sc;
+ nc->sc_if = tmpnc->sc_if;
+ tmpnc->sc_if = sc->sc_if;
+ *sc = *tmpnc;
+ free(tmpnc, M_TEMP);
+ if (sc->sc_if.if_flags & IFF_UP) {
+ if_down(&sc->sc_if);
+ if (!(nc->sc_if.if_flags & IFF_UP))
+ if_up(&nc->sc_if);
+ } else if (nc->sc_if.if_flags & IFF_UP)
+ if_down(&nc->sc_if);
+ sc->sc_flags &= ~SC_STATIC;
+ sc->sc_flags |= (nc->sc_flags & SC_STATIC);
+ tp->t_sc = sc = nc;
+ clist_alloc_cblocks(&tp->t_outq,
+ SLIP_HIWAT + 2 * sc->sc_if.if_mtu + 1,
+ SLIP_HIWAT + 2 * sc->sc_if.if_mtu + 1);
+ goto slfound;
+ }
+ }
+ splx(s);
+ return (ENXIO);
+ }
+ slfound:
+ sc->sc_flags |= SC_STATIC;
+ break;
+
+ case SLIOCSKEEPAL:
+ sc->sc_keepalive = *(u_int *)data * hz;
+ if (sc->sc_keepalive) {
+ sc->sc_flags |= SC_KEEPALIVE;
+ sc->sc_kahandle = timeout(sl_keepalive, sc,
+ sc->sc_keepalive);
+ } else {
+ if ((sc->sc_flags & SC_KEEPALIVE) != 0) {
+ untimeout(sl_keepalive, sc, sc->sc_kahandle);
+ sc->sc_flags &= ~SC_KEEPALIVE;
+ }
+ }
+ break;
+
+ case SLIOCGKEEPAL:
+ *(int *)data = sc->sc_keepalive / hz;
+ break;
+
+ case SLIOCSOUTFILL:
+ sc->sc_outfill = *(u_int *)data * hz;
+ if (sc->sc_outfill) {
+ sc->sc_flags |= SC_OUTWAIT;
+ sc->sc_ofhandle = timeout(sl_outfill, sc,
+ sc->sc_outfill);
+ } else {
+ if ((sc->sc_flags & SC_OUTWAIT) != 0) {
+ untimeout(sl_outfill, sc, sc->sc_ofhandle);
+ sc->sc_flags &= ~SC_OUTWAIT;
+ }
+ }
+ break;
+
+ case SLIOCGOUTFILL:
+ *(int *)data = sc->sc_outfill / hz;
+ break;
+
+ default:
+ splx(s);
+ return (ENOIOCTL);
+ }
+ splx(s);
+ return (0);
+}
+
+/*
+ * Queue a packet. Start transmission if not active.
+ * Compression happens in slstart; if we do it here, IP TOS
+ * will cause us to not compress "background" packets, because
+ * ordering gets trashed. It can be done for all packets in slstart.
+ */
+static int
+sloutput(ifp, m, dst, rtp)
+ struct ifnet *ifp;
+ register struct mbuf *m;
+ struct sockaddr *dst;
+ struct rtentry *rtp;
+{
+ register struct sl_softc *sc = &sl_softc[ifp->if_unit];
+ register struct ip *ip;
+ register struct ifqueue *ifq;
+ int s;
+
+ /*
+ * `Cannot happen' (see slioctl). Someday we will extend
+ * the line protocol to support other address families.
+ */
+ if (dst->sa_family != AF_INET) {
+ printf("sl%d: af%d not supported\n", sc->sc_if.if_unit,
+ dst->sa_family);
+ m_freem(m);
+ sc->sc_if.if_noproto++;
+ return (EAFNOSUPPORT);
+ }
+
+ if (sc->sc_ttyp == NULL || !(ifp->if_flags & IFF_UP)) {
+ m_freem(m);
+ return (ENETDOWN);
+ }
+ if ((sc->sc_ttyp->t_state & TS_CONNECTED) == 0) {
+ m_freem(m);
+ return (EHOSTUNREACH);
+ }
+ ifq = &sc->sc_if.if_snd;
+ ip = mtod(m, struct ip *);
+ if (sc->sc_if.if_flags & SC_NOICMP && ip->ip_p == IPPROTO_ICMP) {
+ m_freem(m);
+ return (ENETRESET); /* XXX ? */
+ }
+ if (ip->ip_tos & IPTOS_LOWDELAY)
+ ifq = &sc->sc_fastq;
+ s = splimp();
+ if (IF_QFULL(ifq)) {
+ IF_DROP(ifq);
+ m_freem(m);
+ splx(s);
+ sc->sc_if.if_oerrors++;
+ return (ENOBUFS);
+ }
+ IF_ENQUEUE(ifq, m);
+ if (sc->sc_ttyp->t_outq.c_cc == 0)
+ slstart(sc->sc_ttyp);
+ splx(s);
+ return (0);
+}
+
+/*
+ * Start output on interface. Get another datagram
+ * to send from the interface queue and map it to
+ * the interface before starting output.
+ */
+static int
+slstart(tp)
+ register struct tty *tp;
+{
+ register struct sl_softc *sc = (struct sl_softc *)tp->t_sc;
+ register struct mbuf *m;
+ register u_char *cp;
+ register struct ip *ip;
+ int s;
+ struct mbuf *m2;
+ u_char bpfbuf[SLTMAX + SLIP_HDRLEN];
+ register int len = 0;
+
+ for (;;) {
+ /*
+ * Call output process whether or not there is more in the
+ * output queue. We are being called in lieu of ttstart
+ * and must do what it would.
+ */
+ (*tp->t_oproc)(tp);
+
+ if (tp->t_outq.c_cc != 0) {
+ if (sc != NULL)
+ sc->sc_flags &= ~SC_OUTWAIT;
+ if (tp->t_outq.c_cc > SLIP_HIWAT)
+ return 0;
+ }
+
+ /*
+ * This happens briefly when the line shuts down.
+ */
+ if (sc == NULL)
+ return 0;
+
+ /*
+ * Get a packet and send it to the interface.
+ */
+ s = splimp();
+ IF_DEQUEUE(&sc->sc_fastq, m);
+ if (m)
+ sc->sc_if.if_omcasts++; /* XXX */
+ else
+ IF_DEQUEUE(&sc->sc_if.if_snd, m);
+ splx(s);
+ if (m == NULL)
+ return 0;
+
+ /*
+ * We do the header compression here rather than in sloutput
+ * because the packets will be out of order if we are using TOS
+ * queueing, and the connection id compression will get
+ * munged when this happens.
+ */
+ if (sc->sc_if.if_bpf) {
+ /*
+ * We need to save the TCP/IP header before it's
+ * compressed. To avoid complicated code, we just
+ * copy the entire packet into a stack buffer (since
+ * this is a serial line, packets should be short
+ * and/or the copy should be negligible cost compared
+ * to the packet transmission time).
+ */
+ register struct mbuf *m1 = m;
+ register u_char *cp = bpfbuf + SLIP_HDRLEN;
+
+ len = 0;
+ do {
+ register int mlen = m1->m_len;
+
+ bcopy(mtod(m1, caddr_t), cp, mlen);
+ cp += mlen;
+ len += mlen;
+ } while ((m1 = m1->m_next) != NULL);
+ }
+ ip = mtod(m, struct ip *);
+ if (ip->ip_v == IPVERSION && ip->ip_p == IPPROTO_TCP) {
+ if (sc->sc_if.if_flags & SC_COMPRESS)
+ *mtod(m, u_char *) |= sl_compress_tcp(m, ip,
+ &sc->sc_comp, 1);
+ }
+ if (sc->sc_if.if_bpf) {
+ /*
+ * Put the SLIP pseudo-"link header" in place. The
+ * compressed header is now at the beginning of the
+ * mbuf.
+ */
+ bpfbuf[SLX_DIR] = SLIPDIR_OUT;
+ bcopy(mtod(m, caddr_t), &bpfbuf[SLX_CHDR], CHDR_LEN);
+ bpf_tap(&sc->sc_if, bpfbuf, len + SLIP_HDRLEN);
+ }
+
+ /*
+ * If system is getting low on clists, just flush our
+ * output queue (if the stuff was important, it'll get
+ * retransmitted). Note that SLTMAX is used instead of
+ * the current if_mtu setting because connections that
+ * have already been established still use the original
+ * (possibly larger) mss.
+ */
+ if (cfreecount < CLISTRESERVE + SLTMAX) {
+ m_freem(m);
+ sc->sc_if.if_collisions++;
+ continue;
+ }
+
+ sc->sc_flags &= ~SC_OUTWAIT;
+ /*
+ * The extra FRAME_END will start up a new packet, and thus
+ * will flush any accumulated garbage. We do this whenever
+ * the line may have been idle for some time.
+ */
+ if (tp->t_outq.c_cc == 0) {
+ ++sc->sc_if.if_obytes;
+ (void) putc(FRAME_END, &tp->t_outq);
+ }
+
+ while (m) {
+ register u_char *ep;
+
+ cp = mtod(m, u_char *); ep = cp + m->m_len;
+ while (cp < ep) {
+ /*
+ * Find out how many bytes in the string we can
+ * handle without doing something special.
+ */
+ register u_char *bp = cp;
+
+ while (cp < ep) {
+ switch (*cp++) {
+ case FRAME_ESCAPE:
+ case FRAME_END:
+ --cp;
+ goto out;
+ }
+ }
+ out:
+ if (cp > bp) {
+ /*
+ * Put n characters at once
+ * into the tty output queue.
+ */
+ if (b_to_q((char *)bp, cp - bp,
+ &tp->t_outq))
+ break;
+ sc->sc_if.if_obytes += cp - bp;
+ }
+ /*
+ * If there are characters left in the mbuf,
+ * the first one must be special..
+ * Put it out in a different form.
+ */
+ if (cp < ep) {
+ if (putc(FRAME_ESCAPE, &tp->t_outq))
+ break;
+ if (putc(*cp++ == FRAME_ESCAPE ?
+ TRANS_FRAME_ESCAPE : TRANS_FRAME_END,
+ &tp->t_outq)) {
+ (void) unputc(&tp->t_outq);
+ break;
+ }
+ sc->sc_if.if_obytes += 2;
+ }
+ }
+ MFREE(m, m2);
+ m = m2;
+ }
+
+ if (putc(FRAME_END, &tp->t_outq)) {
+ /*
+ * Not enough room. Remove a char to make room
+ * and end the packet normally.
+ * If you get many collisions (more than one or two
+ * a day) you probably do not have enough clists
+ * and you should increase "nclist" in param.c.
+ */
+ (void) unputc(&tp->t_outq);
+ (void) putc(FRAME_END, &tp->t_outq);
+ sc->sc_if.if_collisions++;
+ } else {
+ ++sc->sc_if.if_obytes;
+ sc->sc_if.if_opackets++;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Copy data buffer to mbuf chain; add ifnet pointer.
+ */
+static struct mbuf *
+sl_btom(sc, len)
+ register struct sl_softc *sc;
+ register int len;
+{
+ register struct mbuf *m;
+
+ MGETHDR(m, M_DONTWAIT, MT_DATA);
+ if (m == NULL)
+ return (NULL);
+
+ /*
+ * If we have more than MHLEN bytes, it's cheaper to
+ * queue the cluster we just filled & allocate a new one
+ * for the input buffer. Otherwise, fill the mbuf we
+ * allocated above. Note that code in the input routine
+ * guarantees that packet will fit in a cluster.
+ */
+ if (len >= MHLEN) {
+ MCLGET(m, M_DONTWAIT);
+ if ((m->m_flags & M_EXT) == 0) {
+ /*
+ * we couldn't get a cluster - if memory's this
+ * low, it's time to start dropping packets.
+ */
+ (void) m_free(m);
+ return (NULL);
+ }
+ sc->sc_ep = mtod(m, u_char *) + SLBUFSIZE;
+ m->m_data = (caddr_t)sc->sc_buf;
+ m->m_ext.ext_buf = (caddr_t)((intptr_t)sc->sc_buf &~ MCLOFSET);
+ } else
+ bcopy((caddr_t)sc->sc_buf, mtod(m, caddr_t), len);
+
+ m->m_len = len;
+ m->m_pkthdr.len = len;
+ m->m_pkthdr.rcvif = &sc->sc_if;
+ return (m);
+}
+
+/*
+ * tty interface receiver interrupt.
+ */
+static int
+slinput(c, tp)
+ register int c;
+ register struct tty *tp;
+{
+ register struct sl_softc *sc;
+ register struct mbuf *m;
+ register int len;
+ int s;
+ u_char chdr[CHDR_LEN];
+
+ tk_nin++;
+ sc = (struct sl_softc *)tp->t_sc;
+ if (sc == NULL)
+ return 0;
+ if (c & TTY_ERRORMASK || (tp->t_state & TS_CONNECTED) == 0) {
+ sc->sc_flags |= SC_ERROR;
+ return 0;
+ }
+ c &= TTY_CHARMASK;
+
+ ++sc->sc_if.if_ibytes;
+
+ if (sc->sc_if.if_flags & IFF_DEBUG) {
+ if (c == ABT_ESC) {
+ /*
+ * If we have a previous abort, see whether
+ * this one is within the time limit.
+ */
+ if (sc->sc_abortcount &&
+ time_second >= sc->sc_starttime + ABT_WINDOW)
+ sc->sc_abortcount = 0;
+ /*
+ * If we see an abort after "idle" time, count it;
+ * record when the first abort escape arrived.
+ */
+ if (time_second >= sc->sc_lasttime + ABT_IDLE) {
+ if (++sc->sc_abortcount == 1)
+ sc->sc_starttime = time_second;
+ if (sc->sc_abortcount >= ABT_COUNT) {
+ slclose(tp,0);
+ return 0;
+ }
+ }
+ } else
+ sc->sc_abortcount = 0;
+ sc->sc_lasttime = time_second;
+ }
+
+ switch (c) {
+
+ case TRANS_FRAME_ESCAPE:
+ if (sc->sc_escape)
+ c = FRAME_ESCAPE;
+ break;
+
+ case TRANS_FRAME_END:
+ if (sc->sc_escape)
+ c = FRAME_END;
+ break;
+
+ case FRAME_ESCAPE:
+ sc->sc_escape = 1;
+ return 0;
+
+ case FRAME_END:
+ sc->sc_flags &= ~SC_KEEPALIVE;
+ if(sc->sc_flags & SC_ERROR) {
+ sc->sc_flags &= ~SC_ERROR;
+ goto newpack;
+ }
+ len = sc->sc_mp - sc->sc_buf;
+ if (len < 3)
+ /* less than min length packet - ignore */
+ goto newpack;
+
+ if (sc->sc_if.if_bpf) {
+ /*
+ * Save the compressed header, so we
+ * can tack it on later. Note that we
+ * will end up copying garbage in some
+ * cases but this is okay. We remember
+ * where the buffer started so we can
+ * compute the new header length.
+ */
+ bcopy(sc->sc_buf, chdr, CHDR_LEN);
+ }
+
+ if ((c = (*sc->sc_buf & 0xf0)) != (IPVERSION << 4)) {
+ if (c & 0x80)
+ c = TYPE_COMPRESSED_TCP;
+ else if (c == TYPE_UNCOMPRESSED_TCP)
+ *sc->sc_buf &= 0x4f; /* XXX */
+ /*
+ * We've got something that's not an IP packet.
+ * If compression is enabled, try to decompress it.
+ * Otherwise, if `auto-enable' compression is on and
+ * it's a reasonable packet, decompress it and then
+ * enable compression. Otherwise, drop it.
+ */
+ if (sc->sc_if.if_flags & SC_COMPRESS) {
+ len = sl_uncompress_tcp(&sc->sc_buf, len,
+ (u_int)c, &sc->sc_comp);
+ if (len <= 0)
+ goto error;
+ } else if ((sc->sc_if.if_flags & SC_AUTOCOMP) &&
+ c == TYPE_UNCOMPRESSED_TCP && len >= 40) {
+ len = sl_uncompress_tcp(&sc->sc_buf, len,
+ (u_int)c, &sc->sc_comp);
+ if (len <= 0)
+ goto error;
+ sc->sc_if.if_flags |= SC_COMPRESS;
+ } else
+ goto error;
+ }
+ if (sc->sc_if.if_bpf) {
+ /*
+ * Put the SLIP pseudo-"link header" in place.
+ * We couldn't do this any earlier since
+ * decompression probably moved the buffer
+ * pointer. Then, invoke BPF.
+ */
+ register u_char *hp = sc->sc_buf - SLIP_HDRLEN;
+
+ hp[SLX_DIR] = SLIPDIR_IN;
+ bcopy(chdr, &hp[SLX_CHDR], CHDR_LEN);
+ bpf_tap(&sc->sc_if, hp, len + SLIP_HDRLEN);
+ }
+ m = sl_btom(sc, len);
+ if (m == NULL)
+ goto error;
+
+ sc->sc_if.if_ipackets++;
+
+ if ((sc->sc_if.if_flags & IFF_UP) == 0) {
+ m_freem(m);
+ goto newpack;
+ }
+
+ s = splimp();
+ if (IF_QFULL(&ipintrq)) {
+ IF_DROP(&ipintrq);
+ sc->sc_if.if_ierrors++;
+ sc->sc_if.if_iqdrops++;
+ m_freem(m);
+ } else {
+ IF_ENQUEUE(&ipintrq, m);
+ schednetisr(NETISR_IP);
+ }
+ splx(s);
+ goto newpack;
+ }
+ if (sc->sc_mp < sc->sc_ep) {
+ *sc->sc_mp++ = c;
+ sc->sc_escape = 0;
+ return 0;
+ }
+
+ /* can't put lower; would miss an extra frame */
+ sc->sc_flags |= SC_ERROR;
+
+error:
+ sc->sc_if.if_ierrors++;
+newpack:
+ sc->sc_mp = sc->sc_buf = sc->sc_ep - SLRMAX;
+ sc->sc_escape = 0;
+ return 0;
+}
+
+/*
+ * Process an ioctl request.
+ */
+static int
+slioctl(ifp, cmd, data)
+ register struct ifnet *ifp;
+ u_long cmd;
+ caddr_t data;
+{
+ register struct ifaddr *ifa = (struct ifaddr *)data;
+ register struct ifreq *ifr = (struct ifreq *)data;
+ register int s, error = 0;
+
+ s = splimp();
+
+ switch (cmd) {
+
+ case SIOCSIFFLAGS:
+ /*
+ * if.c will set the interface up even if we
+ * don't want it to.
+ */
+ if (sl_softc[ifp->if_unit].sc_ttyp == NULL) {
+ ifp->if_flags &= ~IFF_UP;
+ }
+ break;
+ case SIOCSIFADDR:
+ /*
+ * This is "historical" - set the interface up when
+ * setting the address.
+ */
+ if (ifa->ifa_addr->sa_family == AF_INET) {
+ if (sl_softc[ifp->if_unit].sc_ttyp != NULL)
+ ifp->if_flags |= IFF_UP;
+ } else {
+ error = EAFNOSUPPORT;
+ }
+ break;
+
+ case SIOCSIFDSTADDR:
+ if (ifa->ifa_addr->sa_family != AF_INET)
+ error = EAFNOSUPPORT;
+ break;
+
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ break;
+
+ case SIOCSIFMTU:
+ /*
+ * Set the interface MTU.
+ */
+ if (ifr->ifr_mtu > SLTMAX)
+ error = EINVAL;
+ else {
+ struct tty *tp;
+
+ ifp->if_mtu = ifr->ifr_mtu;
+ tp = sl_softc[ifp->if_unit].sc_ttyp;
+ if (tp != NULL)
+ clist_alloc_cblocks(&tp->t_outq,
+ SLIP_HIWAT + 2 * ifp->if_mtu + 1,
+ SLIP_HIWAT + 2 * ifp->if_mtu + 1);
+ }
+ break;
+
+ default:
+ error = EINVAL;
+ }
+ splx(s);
+ return (error);
+}
+
+static void
+sl_keepalive(chan)
+ void *chan;
+{
+ struct sl_softc *sc = chan;
+
+ if (sc->sc_keepalive) {
+ if (sc->sc_flags & SC_KEEPALIVE)
+ pgsignal (sc->sc_ttyp->t_pgrp, SIGURG, 1);
+ else
+ sc->sc_flags |= SC_KEEPALIVE;
+ sc->sc_kahandle = timeout(sl_keepalive, sc, sc->sc_keepalive);
+ } else {
+ sc->sc_flags &= ~SC_KEEPALIVE;
+ }
+}
+
+static void
+sl_outfill(chan)
+ void *chan;
+{
+ struct sl_softc *sc = chan;
+ register struct tty *tp = sc->sc_ttyp;
+ int s;
+
+ if (sc->sc_outfill && tp != NULL) {
+ if (sc->sc_flags & SC_OUTWAIT) {
+ s = splimp ();
+ ++sc->sc_if.if_obytes;
+ (void) putc(FRAME_END, &tp->t_outq);
+ (*tp->t_oproc)(tp);
+ splx (s);
+ } else
+ sc->sc_flags |= SC_OUTWAIT;
+ sc->sc_ofhandle = timeout(sl_outfill, sc, sc->sc_outfill);
+ } else {
+ sc->sc_flags &= ~SC_OUTWAIT;
+ }
+}
+
+#endif
diff --git a/sys/net/if_slvar.h b/sys/net/if_slvar.h
new file mode 100644
index 0000000..06ac4d9
--- /dev/null
+++ b/sys/net/if_slvar.h
@@ -0,0 +1,84 @@
+/*-
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)if_slvar.h 8.3 (Berkeley) 2/1/94
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NET_IF_SLVAR_H_
+#define _NET_IF_SLVAR_H_
+
+#include <sys/callout.h>
+
+/*
+ * Definitions for SLIP interface data structures
+ *
+ * (This exists so programs like slstats can get at the definition
+ * of sl_softc.)
+ */
+struct sl_softc {
+ struct ifnet sc_if; /* network-visible interface */
+ struct ifqueue sc_fastq; /* interactive output queue */
+ struct tty *sc_ttyp; /* pointer to tty structure */
+ u_char *sc_mp; /* pointer to next available buf char */
+ u_char *sc_ep; /* pointer to last available buf char */
+ u_char *sc_buf; /* input buffer */
+ u_int sc_flags; /* see below */
+ u_int sc_escape; /* =1 if last char input was FRAME_ESCAPE */
+ long sc_lasttime; /* last time a char arrived */
+ long sc_abortcount; /* number of abort escape chars */
+ long sc_starttime; /* time of first abort in window */
+ u_int sc_keepalive; /* time to decide link hang */
+ u_int sc_outfill; /* time to send FRAME_END when output idle */
+ /*
+ * Handles for scheduling outfill and
+ * keepalive timeouts.
+ */
+ struct callout_handle sc_ofhandle;
+ struct callout_handle sc_kahandle;
+ struct slcompress sc_comp; /* tcp compression data */
+};
+
+/* internal flags */
+#define SC_ERROR 0x0001 /* had an input error */
+#define SC_OUTWAIT 0x0002 /* waiting for output fill */
+#define SC_KEEPALIVE 0x0004 /* input keepalive */
+#define SC_STATIC 0x0008 /* it is static unit */
+
+/* visible flags */
+#define SC_COMPRESS IFF_LINK0 /* compress TCP traffic */
+#define SC_NOICMP IFF_LINK1 /* suppress ICMP traffic */
+#define SC_AUTOCOMP IFF_LINK2 /* auto-enable TCP compression */
+
+
+#endif
diff --git a/sys/net/if_sppp.h b/sys/net/if_sppp.h
new file mode 100644
index 0000000..5d47287
--- /dev/null
+++ b/sys/net/if_sppp.h
@@ -0,0 +1,175 @@
+/*
+ * Defines for synchronous PPP/Cisco link level subroutines.
+ *
+ * Copyright (C) 1994 Cronyx Ltd.
+ * Author: Serge Vakulenko, <vak@cronyx.ru>
+ *
+ * Heavily revamped to conform to RFC 1661.
+ * Copyright (C) 1997, Joerg Wunsch.
+ *
+ * This software is distributed with NO WARRANTIES, not even the implied
+ * warranties for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Authors grant any other persons or organizations permission to use
+ * or modify this software as long as this message is kept with the software,
+ * all derivative works or modified versions.
+ *
+ * From: Version 2.0, Fri Oct 6 20:39:21 MSK 1995
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NET_IF_SPPP_H_
+#define _NET_IF_SPPP_H_ 1
+
+#define IDX_LCP 0 /* idx into state table */
+
+struct slcp {
+ u_long opts; /* LCP options to send (bitfield) */
+ u_long magic; /* local magic number */
+ u_long mru; /* our max receive unit */
+ u_long their_mru; /* their max receive unit */
+ u_long protos; /* bitmask of protos that are started */
+ u_char echoid; /* id of last keepalive echo request */
+ /* restart max values, see RFC 1661 */
+ int timeout;
+ int max_terminate;
+ int max_configure;
+ int max_failure;
+};
+
+#define IDX_IPCP 1 /* idx into state table */
+
+struct sipcp {
+ u_long opts; /* IPCP options to send (bitfield) */
+ u_int flags;
+#define IPCP_HISADDR_SEEN 1 /* have seen his address already */
+#define IPCP_MYADDR_DYN 2 /* my address is dynamically assigned */
+#define IPCP_MYADDR_SEEN 4 /* have seen his address already */
+};
+
+#define AUTHNAMELEN 32
+#define AUTHKEYLEN 16
+
+struct sauth {
+ u_short proto; /* authentication protocol to use */
+ u_short flags;
+#define AUTHFLAG_NOCALLOUT 1 /* do not require authentication on */
+ /* callouts */
+#define AUTHFLAG_NORECHALLENGE 2 /* do not re-challenge CHAP */
+ u_char name[AUTHNAMELEN]; /* system identification name */
+ u_char secret[AUTHKEYLEN]; /* secret password */
+ u_char challenge[AUTHKEYLEN]; /* random challenge */
+};
+
+#define IDX_PAP 2
+#define IDX_CHAP 3
+
+#define IDX_COUNT (IDX_CHAP + 1) /* bump this when adding cp's! */
+
+/*
+ * Don't change the order of this. Ordering the phases this way allows
+ * for a comparision of ``pp_phase >= PHASE_AUTHENTICATE'' in order to
+ * know whether LCP is up.
+ */
+enum ppp_phase {
+ PHASE_DEAD, PHASE_ESTABLISH, PHASE_TERMINATE,
+ PHASE_AUTHENTICATE, PHASE_NETWORK
+};
+
+struct sppp {
+ /* NB: pp_if _must_ be first */
+ struct ifnet pp_if; /* network interface data */
+ struct ifqueue pp_fastq; /* fast output queue */
+ struct ifqueue pp_cpq; /* PPP control protocol queue */
+ struct sppp *pp_next; /* next interface in keepalive list */
+ u_int pp_mode; /* major protocol modes (cisco/ppp/...) */
+ u_int pp_flags; /* sub modes */
+ u_short pp_alivecnt; /* keepalive packets counter */
+ u_short pp_loopcnt; /* loopback detection counter */
+ u_long pp_seq; /* local sequence number */
+ u_long pp_rseq; /* remote sequence number */
+ enum ppp_phase pp_phase; /* phase we're currently in */
+ int state[IDX_COUNT]; /* state machine */
+ u_char confid[IDX_COUNT]; /* id of last configuration request */
+ int rst_counter[IDX_COUNT]; /* restart counter */
+ int fail_counter[IDX_COUNT]; /* negotiation failure counter */
+ struct callout_handle ch[IDX_COUNT]; /* per-proto and if callouts */
+ struct callout_handle pap_my_to_ch; /* PAP needs one more... */
+ struct slcp lcp; /* LCP params */
+ struct sipcp ipcp; /* IPCP params */
+ struct sauth myauth; /* auth params, i'm peer */
+ struct sauth hisauth; /* auth params, i'm authenticator */
+ /*
+ * These functions are filled in by sppp_attach(), and are
+ * expected to be used by the lower layer (hardware) drivers
+ * in order to communicate the (un)availability of the
+ * communication link. Lower layer drivers that are always
+ * ready to communicate (like hardware HDLC) can shortcut
+ * pp_up from pp_tls, and pp_down from pp_tlf.
+ */
+ void (*pp_up)(struct sppp *sp);
+ void (*pp_down)(struct sppp *sp);
+ /*
+ * These functions need to be filled in by the lower layer
+ * (hardware) drivers if they request notification from the
+ * PPP layer whether the link is actually required. They
+ * correspond to the tls and tlf actions.
+ */
+ void (*pp_tls)(struct sppp *sp);
+ void (*pp_tlf)(struct sppp *sp);
+ /*
+ * These (optional) functions may be filled by the hardware
+ * driver if any notification of established connections
+ * (currently: IPCP up) is desired (pp_con) or any internal
+ * state change of the interface state machine should be
+ * signaled for monitoring purposes (pp_chg).
+ */
+ void (*pp_con)(struct sppp *sp);
+ void (*pp_chg)(struct sppp *sp, int new_state);
+ /* These two fields are for use by the lower layer */
+ void *pp_lowerp;
+ int pp_loweri;
+};
+
+#define PP_KEEPALIVE 0x01 /* use keepalive protocol */
+ /* 0x04 was PP_TIMO */
+#define PP_CALLIN 0x08 /* we are being called */
+#define PP_NEEDAUTH 0x10 /* remote requested authentication */
+
+
+#define PP_MTU 1500 /* default/minimal MRU */
+#define PP_MAX_MRU 2048 /* maximal MRU we want to negotiate */
+
+/*
+ * Definitions to pass struct sppp data down into the kernel using the
+ * SIOC[SG]IFGENERIC ioctl interface.
+ *
+ * In order to use this, create a struct spppreq, fill in the cmd
+ * field with SPPPIOGDEFS, and put the address of this structure into
+ * the ifr_data portion of a struct ifreq. Pass this struct to a
+ * SIOCGIFGENERIC ioctl. Then replace the cmd field by SPPPIOCDEFS,
+ * modify the defs field as desired, and pass the struct ifreq now
+ * to a SIOCSIFGENERIC ioctl.
+ */
+
+#define SPPPIOGDEFS ((caddr_t)(('S' << 24) + (1 << 16) + sizeof(struct sppp)))
+#define SPPPIOSDEFS ((caddr_t)(('S' << 24) + (2 << 16) + sizeof(struct sppp)))
+
+struct spppreq {
+ int cmd;
+ struct sppp defs;
+};
+
+#ifdef KERNEL
+void sppp_attach (struct ifnet *ifp);
+void sppp_detach (struct ifnet *ifp);
+void sppp_input (struct ifnet *ifp, struct mbuf *m);
+int sppp_ioctl (struct ifnet *ifp, u_long cmd, void *data);
+struct mbuf *sppp_dequeue (struct ifnet *ifp);
+struct mbuf *sppp_pick(struct ifnet *ifp);
+int sppp_isempty (struct ifnet *ifp);
+void sppp_flush (struct ifnet *ifp);
+#endif
+
+#endif /* _NET_IF_SPPP_H_ */
diff --git a/sys/net/if_spppsubr.c b/sys/net/if_spppsubr.c
new file mode 100644
index 0000000..e02ef90
--- /dev/null
+++ b/sys/net/if_spppsubr.c
@@ -0,0 +1,4262 @@
+/*
+ * Synchronous PPP/Cisco link level subroutines.
+ * Keepalive protocol implemented in both Cisco and PPP modes.
+ *
+ * Copyright (C) 1994-1996 Cronyx Engineering Ltd.
+ * Author: Serge Vakulenko, <vak@cronyx.ru>
+ *
+ * Heavily revamped to conform to RFC 1661.
+ * Copyright (C) 1997, Joerg Wunsch.
+ *
+ * This software is distributed with NO WARRANTIES, not even the implied
+ * warranties for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Authors grant any other persons or organisations permission to use
+ * or modify this software as long as this message is kept with the software,
+ * all derivative works or modified versions.
+ *
+ * From: Version 2.4, Thu Apr 30 17:17:21 MSD 1997
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/param.h>
+
+#if defined(__FreeBSD__) && __FreeBSD__ >= 3
+#include "opt_inet.h"
+#include "opt_inet6.h"
+#include "opt_ipx.h"
+#endif
+
+#ifdef NetBSD1_3
+# if NetBSD1_3 > 6
+# include "opt_inet.h"
+# include "opt_inet6.h"
+# include "opt_iso.h"
+# endif
+#endif
+
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/sockio.h>
+#include <sys/socket.h>
+#include <sys/syslog.h>
+#if defined(__FreeBSD__) && __FreeBSD__ >= 3
+#include <machine/random.h>
+#endif
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+
+#if defined (__OpenBSD__)
+#include <sys/md5k.h>
+#else
+#include <sys/md5.h>
+#endif
+
+#include <net/if.h>
+#include <net/netisr.h>
+#include <net/if_types.h>
+#include <net/route.h>
+
+#if defined(__FreeBSD__) && __FreeBSD__ >= 3
+#include <machine/random.h>
+#endif
+#if defined (__NetBSD__) || defined (__OpenBSD__)
+#include <machine/cpu.h> /* XXX for softnet */
+#endif
+
+#include <machine/stdarg.h>
+
+#ifdef INET
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/in_var.h>
+#include <netinet/ip.h>
+#include <netinet/tcp.h>
+# if defined (__FreeBSD__) || defined (__OpenBSD__)
+# include <netinet/if_ether.h>
+# else
+# include <net/ethertypes.h>
+# endif
+#else
+# error Huh? sppp without INET?
+#endif
+
+#ifdef IPX
+#include <netipx/ipx.h>
+#include <netipx/ipx_if.h>
+#endif
+
+#ifdef NS
+#include <netns/ns.h>
+#include <netns/ns_if.h>
+#endif
+
+#ifdef ISO
+#include <netiso/argo_debug.h>
+#include <netiso/iso.h>
+#include <netiso/iso_var.h>
+#include <netiso/iso_snpac.h>
+#endif
+
+#include <net/if_sppp.h>
+
+#if defined(__FreeBSD__) && __FreeBSD__ >= 3
+# define UNTIMEOUT(fun, arg, handle) untimeout(fun, arg, handle)
+# define TIMEOUT(fun, arg1, arg2, handle) handle = timeout(fun, arg1, arg2)
+# define IOCTL_CMD_T u_long
+#else
+# define UNTIMEOUT(fun, arg, handle) untimeout(fun, arg)
+# define TIMEOUT(fun, arg1, arg2, handle) timeout(fun, arg1, arg2)
+# define IOCTL_CMD_T int
+#endif
+
+#define MAXALIVECNT 3 /* max. alive packets */
+
+/*
+ * Interface flags that can be set in an ifconfig command.
+ *
+ * Setting link0 will make the link passive, i.e. it will be marked
+ * as being administrative openable, but won't be opened to begin
+ * with. Incoming calls will be answered, or subsequent calls with
+ * -link1 will cause the administrative open of the LCP layer.
+ *
+ * Setting link1 will cause the link to auto-dial only as packets
+ * arrive to be sent.
+ *
+ * Setting IFF_DEBUG will syslog the option negotiation and state
+ * transitions at level kern.debug. Note: all logs consistently look
+ * like
+ *
+ * <if-name><unit>: <proto-name> <additional info...>
+ *
+ * with <if-name><unit> being something like "bppp0", and <proto-name>
+ * being one of "lcp", "ipcp", "cisco", "chap", "pap", etc.
+ */
+
+#define IFF_PASSIVE IFF_LINK0 /* wait passively for connection */
+#define IFF_AUTO IFF_LINK1 /* auto-dial on output */
+#define IFF_CISCO IFF_LINK2 /* auto-dial on output */
+
+#define PPP_ALLSTATIONS 0xff /* All-Stations broadcast address */
+#define PPP_UI 0x03 /* Unnumbered Information */
+#define PPP_IP 0x0021 /* Internet Protocol */
+#define PPP_ISO 0x0023 /* ISO OSI Protocol */
+#define PPP_XNS 0x0025 /* Xerox NS Protocol */
+#define PPP_IPX 0x002b /* Novell IPX Protocol */
+#define PPP_LCP 0xc021 /* Link Control Protocol */
+#define PPP_PAP 0xc023 /* Password Authentication Protocol */
+#define PPP_CHAP 0xc223 /* Challenge-Handshake Auth Protocol */
+#define PPP_IPCP 0x8021 /* Internet Protocol Control Protocol */
+
+#define CONF_REQ 1 /* PPP configure request */
+#define CONF_ACK 2 /* PPP configure acknowledge */
+#define CONF_NAK 3 /* PPP configure negative ack */
+#define CONF_REJ 4 /* PPP configure reject */
+#define TERM_REQ 5 /* PPP terminate request */
+#define TERM_ACK 6 /* PPP terminate acknowledge */
+#define CODE_REJ 7 /* PPP code reject */
+#define PROTO_REJ 8 /* PPP protocol reject */
+#define ECHO_REQ 9 /* PPP echo request */
+#define ECHO_REPLY 10 /* PPP echo reply */
+#define DISC_REQ 11 /* PPP discard request */
+
+#define LCP_OPT_MRU 1 /* maximum receive unit */
+#define LCP_OPT_ASYNC_MAP 2 /* async control character map */
+#define LCP_OPT_AUTH_PROTO 3 /* authentication protocol */
+#define LCP_OPT_QUAL_PROTO 4 /* quality protocol */
+#define LCP_OPT_MAGIC 5 /* magic number */
+#define LCP_OPT_RESERVED 6 /* reserved */
+#define LCP_OPT_PROTO_COMP 7 /* protocol field compression */
+#define LCP_OPT_ADDR_COMP 8 /* address/control field compression */
+
+#define IPCP_OPT_ADDRESSES 1 /* both IP addresses; deprecated */
+#define IPCP_OPT_COMPRESSION 2 /* IP compression protocol (VJ) */
+#define IPCP_OPT_ADDRESS 3 /* local IP address */
+
+#define PAP_REQ 1 /* PAP name/password request */
+#define PAP_ACK 2 /* PAP acknowledge */
+#define PAP_NAK 3 /* PAP fail */
+
+#define CHAP_CHALLENGE 1 /* CHAP challenge request */
+#define CHAP_RESPONSE 2 /* CHAP challenge response */
+#define CHAP_SUCCESS 3 /* CHAP response ok */
+#define CHAP_FAILURE 4 /* CHAP response failed */
+
+#define CHAP_MD5 5 /* hash algorithm - MD5 */
+
+#define CISCO_MULTICAST 0x8f /* Cisco multicast address */
+#define CISCO_UNICAST 0x0f /* Cisco unicast address */
+#define CISCO_KEEPALIVE 0x8035 /* Cisco keepalive protocol */
+#define CISCO_ADDR_REQ 0 /* Cisco address request */
+#define CISCO_ADDR_REPLY 1 /* Cisco address reply */
+#define CISCO_KEEPALIVE_REQ 2 /* Cisco keepalive request */
+
+/* states are named and numbered according to RFC 1661 */
+#define STATE_INITIAL 0
+#define STATE_STARTING 1
+#define STATE_CLOSED 2
+#define STATE_STOPPED 3
+#define STATE_CLOSING 4
+#define STATE_STOPPING 5
+#define STATE_REQ_SENT 6
+#define STATE_ACK_RCVD 7
+#define STATE_ACK_SENT 8
+#define STATE_OPENED 9
+
+struct ppp_header {
+ u_char address;
+ u_char control;
+ u_short protocol;
+};
+#define PPP_HEADER_LEN sizeof (struct ppp_header)
+
+struct lcp_header {
+ u_char type;
+ u_char ident;
+ u_short len;
+};
+#define LCP_HEADER_LEN sizeof (struct lcp_header)
+
+struct cisco_packet {
+ u_long type;
+ u_long par1;
+ u_long par2;
+ u_short rel;
+ u_short time0;
+ u_short time1;
+};
+#define CISCO_PACKET_LEN 18
+
+/*
+ * We follow the spelling and capitalization of RFC 1661 here, to make
+ * it easier comparing with the standard. Please refer to this RFC in
+ * case you can't make sense out of these abbreviation; it will also
+ * explain the semantics related to the various events and actions.
+ */
+struct cp {
+ u_short proto; /* PPP control protocol number */
+ u_char protoidx; /* index into state table in struct sppp */
+ u_char flags;
+#define CP_LCP 0x01 /* this is the LCP */
+#define CP_AUTH 0x02 /* this is an authentication protocol */
+#define CP_NCP 0x04 /* this is a NCP */
+#define CP_QUAL 0x08 /* this is a quality reporting protocol */
+ const char *name; /* name of this control protocol */
+ /* event handlers */
+ void (*Up)(struct sppp *sp);
+ void (*Down)(struct sppp *sp);
+ void (*Open)(struct sppp *sp);
+ void (*Close)(struct sppp *sp);
+ void (*TO)(void *sp);
+ int (*RCR)(struct sppp *sp, struct lcp_header *h, int len);
+ void (*RCN_rej)(struct sppp *sp, struct lcp_header *h, int len);
+ void (*RCN_nak)(struct sppp *sp, struct lcp_header *h, int len);
+ /* actions */
+ void (*tlu)(struct sppp *sp);
+ void (*tld)(struct sppp *sp);
+ void (*tls)(struct sppp *sp);
+ void (*tlf)(struct sppp *sp);
+ void (*scr)(struct sppp *sp);
+};
+
+static struct sppp *spppq;
+#if defined(__FreeBSD__) && __FreeBSD__ >= 3
+static struct callout_handle keepalive_ch;
+#endif
+
+#if defined(__FreeBSD__) && __FreeBSD__ >= 3
+#define SPP_FMT "%s%d: "
+#define SPP_ARGS(ifp) (ifp)->if_name, (ifp)->if_unit
+#else
+#define SPP_FMT "%s: "
+#define SPP_ARGS(ifp) (ifp)->if_xname
+#endif
+
+/*
+ * The following disgusting hack gets around the problem that IP TOS
+ * can't be set yet. We want to put "interactive" traffic on a high
+ * priority queue. To decide if traffic is interactive, we check that
+ * a) it is TCP and b) one of its ports is telnet, rlogin or ftp control.
+ *
+ * XXX is this really still necessary? - joerg -
+ */
+static u_short interactive_ports[8] = {
+ 0, 513, 0, 0,
+ 0, 21, 0, 23,
+};
+#define INTERACTIVE(p) (interactive_ports[(p) & 7] == (p))
+
+/* almost every function needs these */
+#define STDDCL \
+ struct ifnet *ifp = &sp->pp_if; \
+ int debug = ifp->if_flags & IFF_DEBUG
+
+static int sppp_output(struct ifnet *ifp, struct mbuf *m,
+ struct sockaddr *dst, struct rtentry *rt);
+
+static void sppp_cisco_send(struct sppp *sp, int type, long par1, long par2);
+static void sppp_cisco_input(struct sppp *sp, struct mbuf *m);
+
+static void sppp_cp_input(const struct cp *cp, struct sppp *sp,
+ struct mbuf *m);
+static void sppp_cp_send(struct sppp *sp, u_short proto, u_char type,
+ u_char ident, u_short len, void *data);
+/* static void sppp_cp_timeout(void *arg); */
+static void sppp_cp_change_state(const struct cp *cp, struct sppp *sp,
+ int newstate);
+static void sppp_auth_send(const struct cp *cp,
+ struct sppp *sp, unsigned int type, unsigned int id,
+ ...);
+
+static void sppp_up_event(const struct cp *cp, struct sppp *sp);
+static void sppp_down_event(const struct cp *cp, struct sppp *sp);
+static void sppp_open_event(const struct cp *cp, struct sppp *sp);
+static void sppp_close_event(const struct cp *cp, struct sppp *sp);
+static void sppp_to_event(const struct cp *cp, struct sppp *sp);
+
+static void sppp_null(struct sppp *sp);
+
+static void sppp_lcp_init(struct sppp *sp);
+static void sppp_lcp_up(struct sppp *sp);
+static void sppp_lcp_down(struct sppp *sp);
+static void sppp_lcp_open(struct sppp *sp);
+static void sppp_lcp_close(struct sppp *sp);
+static void sppp_lcp_TO(void *sp);
+static int sppp_lcp_RCR(struct sppp *sp, struct lcp_header *h, int len);
+static void sppp_lcp_RCN_rej(struct sppp *sp, struct lcp_header *h, int len);
+static void sppp_lcp_RCN_nak(struct sppp *sp, struct lcp_header *h, int len);
+static void sppp_lcp_tlu(struct sppp *sp);
+static void sppp_lcp_tld(struct sppp *sp);
+static void sppp_lcp_tls(struct sppp *sp);
+static void sppp_lcp_tlf(struct sppp *sp);
+static void sppp_lcp_scr(struct sppp *sp);
+static void sppp_lcp_check_and_close(struct sppp *sp);
+static int sppp_ncp_check(struct sppp *sp);
+
+static void sppp_ipcp_init(struct sppp *sp);
+static void sppp_ipcp_up(struct sppp *sp);
+static void sppp_ipcp_down(struct sppp *sp);
+static void sppp_ipcp_open(struct sppp *sp);
+static void sppp_ipcp_close(struct sppp *sp);
+static void sppp_ipcp_TO(void *sp);
+static int sppp_ipcp_RCR(struct sppp *sp, struct lcp_header *h, int len);
+static void sppp_ipcp_RCN_rej(struct sppp *sp, struct lcp_header *h, int len);
+static void sppp_ipcp_RCN_nak(struct sppp *sp, struct lcp_header *h, int len);
+static void sppp_ipcp_tlu(struct sppp *sp);
+static void sppp_ipcp_tld(struct sppp *sp);
+static void sppp_ipcp_tls(struct sppp *sp);
+static void sppp_ipcp_tlf(struct sppp *sp);
+static void sppp_ipcp_scr(struct sppp *sp);
+
+static void sppp_pap_input(struct sppp *sp, struct mbuf *m);
+static void sppp_pap_init(struct sppp *sp);
+static void sppp_pap_open(struct sppp *sp);
+static void sppp_pap_close(struct sppp *sp);
+static void sppp_pap_TO(void *sp);
+static void sppp_pap_my_TO(void *sp);
+static void sppp_pap_tlu(struct sppp *sp);
+static void sppp_pap_tld(struct sppp *sp);
+static void sppp_pap_scr(struct sppp *sp);
+
+static void sppp_chap_input(struct sppp *sp, struct mbuf *m);
+static void sppp_chap_init(struct sppp *sp);
+static void sppp_chap_open(struct sppp *sp);
+static void sppp_chap_close(struct sppp *sp);
+static void sppp_chap_TO(void *sp);
+static void sppp_chap_tlu(struct sppp *sp);
+static void sppp_chap_tld(struct sppp *sp);
+static void sppp_chap_scr(struct sppp *sp);
+
+static const char *sppp_auth_type_name(u_short proto, u_char type);
+static const char *sppp_cp_type_name(u_char type);
+static const char *sppp_dotted_quad(u_long addr);
+static const char *sppp_ipcp_opt_name(u_char opt);
+static const char *sppp_lcp_opt_name(u_char opt);
+static const char *sppp_phase_name(enum ppp_phase phase);
+static const char *sppp_proto_name(u_short proto);
+static const char *sppp_state_name(int state);
+static int sppp_params(struct sppp *sp, u_long cmd, void *data);
+static int sppp_strnlen(u_char *p, int max);
+static void sppp_get_ip_addrs(struct sppp *sp, u_long *src, u_long *dst,
+ u_long *srcmask);
+static void sppp_keepalive(void *dummy);
+static void sppp_phase_network(struct sppp *sp);
+static void sppp_print_bytes(const u_char *p, u_short len);
+static void sppp_print_string(const char *p, u_short len);
+static void sppp_qflush(struct ifqueue *ifq);
+static void sppp_set_ip_addr(struct sppp *sp, u_long src);
+
+/* our control protocol descriptors */
+static const struct cp lcp = {
+ PPP_LCP, IDX_LCP, CP_LCP, "lcp",
+ sppp_lcp_up, sppp_lcp_down, sppp_lcp_open, sppp_lcp_close,
+ sppp_lcp_TO, sppp_lcp_RCR, sppp_lcp_RCN_rej, sppp_lcp_RCN_nak,
+ sppp_lcp_tlu, sppp_lcp_tld, sppp_lcp_tls, sppp_lcp_tlf,
+ sppp_lcp_scr
+};
+
+static const struct cp ipcp = {
+ PPP_IPCP, IDX_IPCP, CP_NCP, "ipcp",
+ sppp_ipcp_up, sppp_ipcp_down, sppp_ipcp_open, sppp_ipcp_close,
+ sppp_ipcp_TO, sppp_ipcp_RCR, sppp_ipcp_RCN_rej, sppp_ipcp_RCN_nak,
+ sppp_ipcp_tlu, sppp_ipcp_tld, sppp_ipcp_tls, sppp_ipcp_tlf,
+ sppp_ipcp_scr
+};
+
+static const struct cp pap = {
+ PPP_PAP, IDX_PAP, CP_AUTH, "pap",
+ sppp_null, sppp_null, sppp_pap_open, sppp_pap_close,
+ sppp_pap_TO, 0, 0, 0,
+ sppp_pap_tlu, sppp_pap_tld, sppp_null, sppp_null,
+ sppp_pap_scr
+};
+
+static const struct cp chap = {
+ PPP_CHAP, IDX_CHAP, CP_AUTH, "chap",
+ sppp_null, sppp_null, sppp_chap_open, sppp_chap_close,
+ sppp_chap_TO, 0, 0, 0,
+ sppp_chap_tlu, sppp_chap_tld, sppp_null, sppp_null,
+ sppp_chap_scr
+};
+
+static const struct cp *cps[IDX_COUNT] = {
+ &lcp, /* IDX_LCP */
+ &ipcp, /* IDX_IPCP */
+ &pap, /* IDX_PAP */
+ &chap, /* IDX_CHAP */
+};
+
+
+ /*
+ * Exported functions, comprising our interface to the lower layer.
+ */
+
+/*
+ * Process the received packet.
+ */
+void
+sppp_input(struct ifnet *ifp, struct mbuf *m)
+{
+ struct ppp_header *h;
+ struct ifqueue *inq = 0;
+ int s;
+ struct sppp *sp = (struct sppp *)ifp;
+ int debug = ifp->if_flags & IFF_DEBUG;
+
+ if (ifp->if_flags & IFF_UP)
+ /* Count received bytes, add FCS and one flag */
+ ifp->if_ibytes += m->m_pkthdr.len + 3;
+
+ if (m->m_pkthdr.len <= PPP_HEADER_LEN) {
+ /* Too small packet, drop it. */
+ if (debug)
+ log(LOG_DEBUG,
+ SPP_FMT "input packet is too small, %d bytes\n",
+ SPP_ARGS(ifp), m->m_pkthdr.len);
+ drop:
+ ++ifp->if_ierrors;
+ ++ifp->if_iqdrops;
+ m_freem (m);
+ return;
+ }
+
+ /* Get PPP header. */
+ h = mtod (m, struct ppp_header*);
+ m_adj (m, PPP_HEADER_LEN);
+
+ switch (h->address) {
+ case PPP_ALLSTATIONS:
+ if (h->control != PPP_UI)
+ goto invalid;
+ if (sp->pp_mode == IFF_CISCO) {
+ if (debug)
+ log(LOG_DEBUG,
+ SPP_FMT "PPP packet in Cisco mode "
+ "<addr=0x%x ctrl=0x%x proto=0x%x>\n",
+ SPP_ARGS(ifp),
+ h->address, h->control, ntohs(h->protocol));
+ goto drop;
+ }
+ switch (ntohs (h->protocol)) {
+ default:
+ if (debug)
+ log(LOG_DEBUG,
+ SPP_FMT "rejecting protocol "
+ "<addr=0x%x ctrl=0x%x proto=0x%x>\n",
+ SPP_ARGS(ifp),
+ h->address, h->control, ntohs(h->protocol));
+ if (sp->state[IDX_LCP] == STATE_OPENED)
+ sppp_cp_send (sp, PPP_LCP, PROTO_REJ,
+ ++sp->pp_seq, m->m_pkthdr.len + 2,
+ &h->protocol);
+ ++ifp->if_noproto;
+ goto drop;
+ case PPP_LCP:
+ sppp_cp_input(&lcp, sp, m);
+ m_freem (m);
+ return;
+ case PPP_PAP:
+ if (sp->pp_phase >= PHASE_AUTHENTICATE)
+ sppp_pap_input(sp, m);
+ m_freem (m);
+ return;
+ case PPP_CHAP:
+ if (sp->pp_phase >= PHASE_AUTHENTICATE)
+ sppp_chap_input(sp, m);
+ m_freem (m);
+ return;
+#ifdef INET
+ case PPP_IPCP:
+ if (sp->pp_phase == PHASE_NETWORK)
+ sppp_cp_input(&ipcp, sp, m);
+ m_freem (m);
+ return;
+ case PPP_IP:
+ if (sp->state[IDX_IPCP] == STATE_OPENED) {
+ schednetisr (NETISR_IP);
+ inq = &ipintrq;
+ }
+ break;
+#endif
+#ifdef IPX
+ case PPP_IPX:
+ /* IPX IPXCP not implemented yet */
+ if (sp->pp_phase == PHASE_NETWORK) {
+ schednetisr (NETISR_IPX);
+ inq = &ipxintrq;
+ }
+ break;
+#endif
+#ifdef NS
+ case PPP_XNS:
+ /* XNS IDPCP not implemented yet */
+ if (sp->pp_phase == PHASE_NETWORK) {
+ schednetisr (NETISR_NS);
+ inq = &nsintrq;
+ }
+ break;
+#endif
+#ifdef ISO
+ case PPP_ISO:
+ /* OSI NLCP not implemented yet */
+ if (sp->pp_phase == PHASE_NETWORK) {
+ schednetisr (NETISR_ISO);
+ inq = &clnlintrq;
+ }
+ break;
+#endif
+ }
+ break;
+ case CISCO_MULTICAST:
+ case CISCO_UNICAST:
+ /* Don't check the control field here (RFC 1547). */
+ if (sp->pp_mode != IFF_CISCO) {
+ if (debug)
+ log(LOG_DEBUG,
+ SPP_FMT "Cisco packet in PPP mode "
+ "<addr=0x%x ctrl=0x%x proto=0x%x>\n",
+ SPP_ARGS(ifp),
+ h->address, h->control, ntohs(h->protocol));
+ goto drop;
+ }
+ switch (ntohs (h->protocol)) {
+ default:
+ ++ifp->if_noproto;
+ goto invalid;
+ case CISCO_KEEPALIVE:
+ sppp_cisco_input ((struct sppp*) ifp, m);
+ m_freem (m);
+ return;
+#ifdef INET
+ case ETHERTYPE_IP:
+ schednetisr (NETISR_IP);
+ inq = &ipintrq;
+ break;
+#endif
+#ifdef INET6
+ case ETHERTYPE_IPV6:
+ schednetisr (NETISR_IPV6);
+ inq = &ip6intrq;
+ break;
+#endif
+#ifdef IPX
+ case ETHERTYPE_IPX:
+ schednetisr (NETISR_IPX);
+ inq = &ipxintrq;
+ break;
+#endif
+#ifdef NS
+ case ETHERTYPE_NS:
+ schednetisr (NETISR_NS);
+ inq = &nsintrq;
+ break;
+#endif
+ }
+ break;
+ default: /* Invalid PPP packet. */
+ invalid:
+ if (debug)
+ log(LOG_DEBUG,
+ SPP_FMT "invalid input packet "
+ "<addr=0x%x ctrl=0x%x proto=0x%x>\n",
+ SPP_ARGS(ifp),
+ h->address, h->control, ntohs(h->protocol));
+ goto drop;
+ }
+
+ if (! (ifp->if_flags & IFF_UP) || ! inq)
+ goto drop;
+
+ /* Check queue. */
+ s = splimp();
+ if (IF_QFULL (inq)) {
+ /* Queue overflow. */
+ IF_DROP(inq);
+ splx(s);
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "protocol queue overflow\n",
+ SPP_ARGS(ifp));
+ goto drop;
+ }
+ IF_ENQUEUE(inq, m);
+ splx(s);
+}
+
+/*
+ * Enqueue transmit packet.
+ */
+static int
+sppp_output(struct ifnet *ifp, struct mbuf *m,
+ struct sockaddr *dst, struct rtentry *rt)
+{
+ struct sppp *sp = (struct sppp*) ifp;
+ struct ppp_header *h;
+ struct ifqueue *ifq;
+ int s, rv = 0;
+ int debug = ifp->if_flags & IFF_DEBUG;
+
+ s = splimp();
+
+ if ((ifp->if_flags & IFF_UP) == 0 ||
+ (ifp->if_flags & (IFF_RUNNING | IFF_AUTO)) == 0) {
+ m_freem (m);
+ splx (s);
+ return (ENETDOWN);
+ }
+
+ if ((ifp->if_flags & (IFF_RUNNING | IFF_AUTO)) == IFF_AUTO) {
+ /*
+ * Interface is not yet running, but auto-dial. Need
+ * to start LCP for it.
+ */
+ ifp->if_flags |= IFF_RUNNING;
+ splx(s);
+ lcp.Open(sp);
+ s = splimp();
+ }
+
+ ifq = &ifp->if_snd;
+#ifdef INET
+ if (dst->sa_family == AF_INET) {
+ /* XXX Check mbuf length here? */
+ struct ip *ip = mtod (m, struct ip*);
+ struct tcphdr *tcp = (struct tcphdr*) ((long*)ip + ip->ip_hl);
+
+ /*
+ * When using dynamic local IP address assignment by using
+ * 0.0.0.0 as a local address, the first TCP session will
+ * not connect because the local TCP checksum is computed
+ * using 0.0.0.0 which will later become our real IP address
+ * so the TCP checksum computed at the remote end will
+ * become invalid. So we
+ * - don't let packets with src ip addr 0 thru
+ * - we flag TCP packets with src ip 0 as an error
+ */
+
+ if(ip->ip_src.s_addr == INADDR_ANY) /* -hm */
+ {
+ m_freem(m);
+ splx(s);
+ if(ip->ip_p == IPPROTO_TCP)
+ return(EADDRNOTAVAIL);
+ else
+ return(0);
+ }
+
+ /*
+ * Put low delay, telnet, rlogin and ftp control packets
+ * in front of the queue.
+ */
+ if (IF_QFULL (&sp->pp_fastq))
+ ;
+ else if (ip->ip_tos & IPTOS_LOWDELAY)
+ ifq = &sp->pp_fastq;
+ else if (m->m_len < sizeof *ip + sizeof *tcp)
+ ;
+ else if (ip->ip_p != IPPROTO_TCP)
+ ;
+ else if (INTERACTIVE (ntohs (tcp->th_sport)))
+ ifq = &sp->pp_fastq;
+ else if (INTERACTIVE (ntohs (tcp->th_dport)))
+ ifq = &sp->pp_fastq;
+ }
+#endif
+
+ /*
+ * Prepend general data packet PPP header. For now, IP only.
+ */
+ M_PREPEND (m, PPP_HEADER_LEN, M_DONTWAIT);
+ if (! m) {
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "no memory for transmit header\n",
+ SPP_ARGS(ifp));
+ ++ifp->if_oerrors;
+ splx (s);
+ return (ENOBUFS);
+ }
+ /*
+ * May want to check size of packet
+ * (albeit due to the implementation it's always enough)
+ */
+ h = mtod (m, struct ppp_header*);
+ if (sp->pp_mode == IFF_CISCO) {
+ h->address = CISCO_UNICAST; /* unicast address */
+ h->control = 0;
+ } else {
+ h->address = PPP_ALLSTATIONS; /* broadcast address */
+ h->control = PPP_UI; /* Unnumbered Info */
+ }
+
+ switch (dst->sa_family) {
+#ifdef INET
+ case AF_INET: /* Internet Protocol */
+ if (sp->pp_mode == IFF_CISCO)
+ h->protocol = htons (ETHERTYPE_IP);
+ else {
+ /*
+ * Don't choke with an ENETDOWN early. It's
+ * possible that we just started dialing out,
+ * so don't drop the packet immediately. If
+ * we notice that we run out of buffer space
+ * below, we will however remember that we are
+ * not ready to carry IP packets, and return
+ * ENETDOWN, as opposed to ENOBUFS.
+ */
+ h->protocol = htons(PPP_IP);
+ if (sp->state[IDX_IPCP] != STATE_OPENED)
+ rv = ENETDOWN;
+ }
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6: /* Internet Protocol */
+ if (sp->pp_mode == IFF_CISCO)
+ h->protocol = htons (ETHERTYPE_IPV6);
+ else {
+ goto nosupport;
+ }
+ break;
+#endif
+#ifdef NS
+ case AF_NS: /* Xerox NS Protocol */
+ h->protocol = htons (sp->pp_mode == IFF_CISCO ?
+ ETHERTYPE_NS : PPP_XNS);
+ break;
+#endif
+#ifdef IPX
+ case AF_IPX: /* Novell IPX Protocol */
+ h->protocol = htons (sp->pp_mode == IFF_CISCO ?
+ ETHERTYPE_IPX : PPP_IPX);
+ break;
+#endif
+#ifdef ISO
+ case AF_ISO: /* ISO OSI Protocol */
+ if (sp->pp_mode == IFF_CISCO)
+ goto nosupport;
+ h->protocol = htons (PPP_ISO);
+ break;
+#endif
+nosupport:
+ default:
+ m_freem (m);
+ ++ifp->if_oerrors;
+ splx (s);
+ return (EAFNOSUPPORT);
+ }
+
+ /*
+ * Queue message on interface, and start output if interface
+ * not yet active.
+ */
+ if (IF_QFULL (ifq)) {
+ IF_DROP (&ifp->if_snd);
+ m_freem (m);
+ ++ifp->if_oerrors;
+ splx (s);
+ return (rv? rv: ENOBUFS);
+ }
+ IF_ENQUEUE (ifq, m);
+ if (! (ifp->if_flags & IFF_OACTIVE))
+ (*ifp->if_start) (ifp);
+
+ /*
+ * Count output packets and bytes.
+ * The packet length includes header, FCS and 1 flag,
+ * according to RFC 1333.
+ */
+ ifp->if_obytes += m->m_pkthdr.len + 3;
+ splx (s);
+ return (0);
+}
+
+void
+sppp_attach(struct ifnet *ifp)
+{
+ struct sppp *sp = (struct sppp*) ifp;
+
+ /* Initialize keepalive handler. */
+ if (! spppq)
+ TIMEOUT(sppp_keepalive, 0, hz * 10, keepalive_ch);
+
+ /* Insert new entry into the keepalive list. */
+ sp->pp_next = spppq;
+ spppq = sp;
+
+ sp->pp_if.if_mtu = PP_MTU;
+ sp->pp_if.if_flags = IFF_POINTOPOINT | IFF_MULTICAST;
+ sp->pp_if.if_type = IFT_PPP;
+ sp->pp_if.if_output = sppp_output;
+#if 0
+ sp->pp_flags = PP_KEEPALIVE;
+#endif
+ sp->pp_fastq.ifq_maxlen = 32;
+ sp->pp_cpq.ifq_maxlen = 20;
+ sp->pp_loopcnt = 0;
+ sp->pp_alivecnt = 0;
+ sp->pp_seq = 0;
+ sp->pp_rseq = 0;
+ sp->pp_phase = PHASE_DEAD;
+ sp->pp_up = lcp.Up;
+ sp->pp_down = lcp.Down;
+
+ sppp_lcp_init(sp);
+ sppp_ipcp_init(sp);
+ sppp_pap_init(sp);
+ sppp_chap_init(sp);
+}
+
+void
+sppp_detach(struct ifnet *ifp)
+{
+ struct sppp **q, *p, *sp = (struct sppp*) ifp;
+ int i;
+
+ /* Remove the entry from the keepalive list. */
+ for (q = &spppq; (p = *q); q = &p->pp_next)
+ if (p == sp) {
+ *q = p->pp_next;
+ break;
+ }
+
+ /* Stop keepalive handler. */
+ if (! spppq)
+ UNTIMEOUT(sppp_keepalive, 0, keepalive_ch);
+
+ for (i = 0; i < IDX_COUNT; i++)
+ UNTIMEOUT((cps[i])->TO, (void *)sp, sp->ch[i]);
+ UNTIMEOUT(sppp_pap_my_TO, (void *)sp, sp->pap_my_to_ch);
+}
+
+/*
+ * Flush the interface output queue.
+ */
+void
+sppp_flush(struct ifnet *ifp)
+{
+ struct sppp *sp = (struct sppp*) ifp;
+
+ sppp_qflush (&sp->pp_if.if_snd);
+ sppp_qflush (&sp->pp_fastq);
+ sppp_qflush (&sp->pp_cpq);
+}
+
+/*
+ * Check if the output queue is empty.
+ */
+int
+sppp_isempty(struct ifnet *ifp)
+{
+ struct sppp *sp = (struct sppp*) ifp;
+ int empty, s;
+
+ s = splimp();
+ empty = !sp->pp_fastq.ifq_head && !sp->pp_cpq.ifq_head &&
+ !sp->pp_if.if_snd.ifq_head;
+ splx(s);
+ return (empty);
+}
+
+/*
+ * Get next packet to send.
+ */
+struct mbuf *
+sppp_dequeue(struct ifnet *ifp)
+{
+ struct sppp *sp = (struct sppp*) ifp;
+ struct mbuf *m;
+ int s;
+
+ s = splimp();
+ /*
+ * Process only the control protocol queue until we have at
+ * least one NCP open.
+ *
+ * Do always serve all three queues in Cisco mode.
+ */
+ IF_DEQUEUE(&sp->pp_cpq, m);
+ if (m == NULL &&
+ (sppp_ncp_check(sp) || sp->pp_mode == IFF_CISCO)) {
+ IF_DEQUEUE(&sp->pp_fastq, m);
+ if (m == NULL)
+ IF_DEQUEUE (&sp->pp_if.if_snd, m);
+ }
+ splx(s);
+ return m;
+}
+
+/*
+ * Pick the next packet, do not remove it from the queue.
+ */
+struct mbuf *
+sppp_pick(struct ifnet *ifp)
+{
+ struct sppp *sp = (struct sppp*)ifp;
+ struct mbuf *m;
+ int s;
+
+ s= splimp ();
+
+ m = sp->pp_cpq.ifq_head;
+ if (m == NULL &&
+ (sp->pp_phase == PHASE_NETWORK || sp->pp_mode == IFF_CISCO))
+ if ((m = sp->pp_fastq.ifq_head) == NULL)
+ m = sp->pp_if.if_snd.ifq_head;
+ splx (s);
+ return (m);
+}
+
+/*
+ * Process an ioctl request. Called on low priority level.
+ */
+int
+sppp_ioctl(struct ifnet *ifp, IOCTL_CMD_T cmd, void *data)
+{
+ struct ifreq *ifr = (struct ifreq*) data;
+ struct sppp *sp = (struct sppp*) ifp;
+ int s, rv, going_up, going_down, newmode;
+
+ s = splimp();
+ rv = 0;
+ switch (cmd) {
+ case SIOCAIFADDR:
+ case SIOCSIFDSTADDR:
+ break;
+
+ case SIOCSIFADDR:
+ if_up(ifp);
+ /* fall through... */
+
+ case SIOCSIFFLAGS:
+ going_up = ifp->if_flags & IFF_UP &&
+ (ifp->if_flags & IFF_RUNNING) == 0;
+ going_down = (ifp->if_flags & IFF_UP) == 0 &&
+ ifp->if_flags & IFF_RUNNING;
+
+ newmode = ifp->if_flags & IFF_PASSIVE;
+ if (!newmode)
+ newmode = ifp->if_flags & IFF_AUTO;
+ if (!newmode)
+ newmode = ifp->if_flags & IFF_CISCO;
+ ifp->if_flags &= ~(IFF_PASSIVE | IFF_AUTO | IFF_CISCO);
+ ifp->if_flags |= newmode;
+
+ if (newmode != sp->pp_mode) {
+ going_down = 1;
+ if (!going_up)
+ going_up = ifp->if_flags & IFF_RUNNING;
+ }
+
+ if (going_down) {
+ if (sp->pp_mode != IFF_CISCO)
+ lcp.Close(sp);
+ else if (sp->pp_tlf)
+ (sp->pp_tlf)(sp);
+ sppp_flush(ifp);
+ ifp->if_flags &= ~IFF_RUNNING;
+ sp->pp_mode = newmode;
+ }
+
+ if (going_up) {
+ if (sp->pp_mode != IFF_CISCO)
+ lcp.Close(sp);
+ sp->pp_mode = newmode;
+ if (sp->pp_mode == 0) {
+ ifp->if_flags |= IFF_RUNNING;
+ lcp.Open(sp);
+ }
+ if (sp->pp_mode == IFF_CISCO) {
+ if (sp->pp_tls)
+ (sp->pp_tls)(sp);
+ ifp->if_flags |= IFF_RUNNING;
+ }
+ }
+
+ break;
+
+#ifdef SIOCSIFMTU
+#ifndef ifr_mtu
+#define ifr_mtu ifr_metric
+#endif
+ case SIOCSIFMTU:
+ if (ifr->ifr_mtu < 128 || ifr->ifr_mtu > sp->lcp.their_mru)
+ return (EINVAL);
+ ifp->if_mtu = ifr->ifr_mtu;
+ break;
+#endif
+#ifdef SLIOCSETMTU
+ case SLIOCSETMTU:
+ if (*(short*)data < 128 || *(short*)data > sp->lcp.their_mru)
+ return (EINVAL);
+ ifp->if_mtu = *(short*)data;
+ break;
+#endif
+#ifdef SIOCGIFMTU
+ case SIOCGIFMTU:
+ ifr->ifr_mtu = ifp->if_mtu;
+ break;
+#endif
+#ifdef SLIOCGETMTU
+ case SLIOCGETMTU:
+ *(short*)data = ifp->if_mtu;
+ break;
+#endif
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ break;
+
+ case SIOCGIFGENERIC:
+ case SIOCSIFGENERIC:
+ rv = sppp_params(sp, cmd, data);
+ break;
+
+ default:
+ rv = ENOTTY;
+ }
+ splx(s);
+ return rv;
+}
+
+
+ /*
+ * Cisco framing implementation.
+ */
+
+/*
+ * Handle incoming Cisco keepalive protocol packets.
+ */
+static void
+sppp_cisco_input(struct sppp *sp, struct mbuf *m)
+{
+ STDDCL;
+ struct cisco_packet *h;
+ u_long me, mymask;
+
+ if (m->m_pkthdr.len < CISCO_PACKET_LEN) {
+ if (debug)
+ log(LOG_DEBUG,
+ SPP_FMT "cisco invalid packet length: %d bytes\n",
+ SPP_ARGS(ifp), m->m_pkthdr.len);
+ return;
+ }
+ h = mtod (m, struct cisco_packet*);
+ if (debug)
+ log(LOG_DEBUG,
+ SPP_FMT "cisco input: %d bytes "
+ "<0x%lx 0x%lx 0x%lx 0x%x 0x%x-0x%x>\n",
+ SPP_ARGS(ifp), m->m_pkthdr.len,
+ (u_long)ntohl (h->type), (u_long)h->par1, (u_long)h->par2, (u_int)h->rel,
+ (u_int)h->time0, (u_int)h->time1);
+ switch (ntohl (h->type)) {
+ default:
+ if (debug)
+ addlog(SPP_FMT "cisco unknown packet type: 0x%lx\n",
+ SPP_ARGS(ifp), (u_long)ntohl (h->type));
+ break;
+ case CISCO_ADDR_REPLY:
+ /* Reply on address request, ignore */
+ break;
+ case CISCO_KEEPALIVE_REQ:
+ sp->pp_alivecnt = 0;
+ sp->pp_rseq = ntohl (h->par1);
+ if (sp->pp_seq == sp->pp_rseq) {
+ /* Local and remote sequence numbers are equal.
+ * Probably, the line is in loopback mode. */
+ if (sp->pp_loopcnt >= MAXALIVECNT) {
+ printf (SPP_FMT "loopback\n",
+ SPP_ARGS(ifp));
+ sp->pp_loopcnt = 0;
+ if (ifp->if_flags & IFF_UP) {
+ if_down (ifp);
+ sppp_qflush (&sp->pp_cpq);
+ }
+ }
+ ++sp->pp_loopcnt;
+
+ /* Generate new local sequence number */
+#if defined(__FreeBSD__) && __FreeBSD__ >= 3
+ sp->pp_seq = random();
+#else
+ sp->pp_seq ^= time.tv_sec ^ time.tv_usec;
+#endif
+ break;
+ }
+ sp->pp_loopcnt = 0;
+ if (! (ifp->if_flags & IFF_UP) &&
+ (ifp->if_flags & IFF_RUNNING)) {
+ if_up(ifp);
+ printf (SPP_FMT "up\n", SPP_ARGS(ifp));
+ }
+ break;
+ case CISCO_ADDR_REQ:
+ sppp_get_ip_addrs(sp, &me, 0, &mymask);
+ if (me != 0L)
+ sppp_cisco_send(sp, CISCO_ADDR_REPLY, me, mymask);
+ break;
+ }
+}
+
+/*
+ * Send Cisco keepalive packet.
+ */
+static void
+sppp_cisco_send(struct sppp *sp, int type, long par1, long par2)
+{
+ STDDCL;
+ struct ppp_header *h;
+ struct cisco_packet *ch;
+ struct mbuf *m;
+#if defined(__FreeBSD__) && __FreeBSD__ >= 3
+ struct timeval tv;
+#else
+ u_long t = (time.tv_sec - boottime.tv_sec) * 1000;
+#endif
+
+#if defined(__FreeBSD__) && __FreeBSD__ >= 3
+ getmicrouptime(&tv);
+#endif
+
+ MGETHDR (m, M_DONTWAIT, MT_DATA);
+ if (! m)
+ return;
+ m->m_pkthdr.len = m->m_len = PPP_HEADER_LEN + CISCO_PACKET_LEN;
+ m->m_pkthdr.rcvif = 0;
+
+ h = mtod (m, struct ppp_header*);
+ h->address = CISCO_MULTICAST;
+ h->control = 0;
+ h->protocol = htons (CISCO_KEEPALIVE);
+
+ ch = (struct cisco_packet*) (h + 1);
+ ch->type = htonl (type);
+ ch->par1 = htonl (par1);
+ ch->par2 = htonl (par2);
+ ch->rel = -1;
+
+#if defined(__FreeBSD__) && __FreeBSD__ >= 3
+ ch->time0 = htons ((u_short) (tv.tv_sec >> 16));
+ ch->time1 = htons ((u_short) tv.tv_sec);
+#else
+ ch->time0 = htons ((u_short) (t >> 16));
+ ch->time1 = htons ((u_short) t);
+#endif
+
+ if (debug)
+ log(LOG_DEBUG,
+ SPP_FMT "cisco output: <0x%lx 0x%lx 0x%lx 0x%x 0x%x-0x%x>\n",
+ SPP_ARGS(ifp), (u_long)ntohl (ch->type), (u_long)ch->par1,
+ (u_long)ch->par2, (u_int)ch->rel, (u_int)ch->time0, (u_int)ch->time1);
+
+ if (IF_QFULL (&sp->pp_cpq)) {
+ IF_DROP (&sp->pp_fastq);
+ IF_DROP (&ifp->if_snd);
+ m_freem (m);
+ } else
+ IF_ENQUEUE (&sp->pp_cpq, m);
+ if (! (ifp->if_flags & IFF_OACTIVE))
+ (*ifp->if_start) (ifp);
+ ifp->if_obytes += m->m_pkthdr.len + 3;
+}
+
+ /*
+ * PPP protocol implementation.
+ */
+
+/*
+ * Send PPP control protocol packet.
+ */
+static void
+sppp_cp_send(struct sppp *sp, u_short proto, u_char type,
+ u_char ident, u_short len, void *data)
+{
+ STDDCL;
+ struct ppp_header *h;
+ struct lcp_header *lh;
+ struct mbuf *m;
+
+ if (len > MHLEN - PPP_HEADER_LEN - LCP_HEADER_LEN)
+ len = MHLEN - PPP_HEADER_LEN - LCP_HEADER_LEN;
+ MGETHDR (m, M_DONTWAIT, MT_DATA);
+ if (! m)
+ return;
+ m->m_pkthdr.len = m->m_len = PPP_HEADER_LEN + LCP_HEADER_LEN + len;
+ m->m_pkthdr.rcvif = 0;
+
+ h = mtod (m, struct ppp_header*);
+ h->address = PPP_ALLSTATIONS; /* broadcast address */
+ h->control = PPP_UI; /* Unnumbered Info */
+ h->protocol = htons (proto); /* Link Control Protocol */
+
+ lh = (struct lcp_header*) (h + 1);
+ lh->type = type;
+ lh->ident = ident;
+ lh->len = htons (LCP_HEADER_LEN + len);
+ if (len)
+ bcopy (data, lh+1, len);
+
+ if (debug) {
+ log(LOG_DEBUG, SPP_FMT "%s output <%s id=0x%x len=%d",
+ SPP_ARGS(ifp),
+ sppp_proto_name(proto),
+ sppp_cp_type_name (lh->type), lh->ident,
+ ntohs (lh->len));
+ sppp_print_bytes ((u_char*) (lh+1), len);
+ addlog(">\n");
+ }
+ if (IF_QFULL (&sp->pp_cpq)) {
+ IF_DROP (&sp->pp_fastq);
+ IF_DROP (&ifp->if_snd);
+ m_freem (m);
+ ++ifp->if_oerrors;
+ } else
+ IF_ENQUEUE (&sp->pp_cpq, m);
+ if (! (ifp->if_flags & IFF_OACTIVE))
+ (*ifp->if_start) (ifp);
+ ifp->if_obytes += m->m_pkthdr.len + 3;
+}
+
+/*
+ * Handle incoming PPP control protocol packets.
+ */
+static void
+sppp_cp_input(const struct cp *cp, struct sppp *sp, struct mbuf *m)
+{
+ STDDCL;
+ struct lcp_header *h;
+ int len = m->m_pkthdr.len;
+ int rv;
+ u_char *p;
+
+ if (len < 4) {
+ if (debug)
+ log(LOG_DEBUG,
+ SPP_FMT "%s invalid packet length: %d bytes\n",
+ SPP_ARGS(ifp), cp->name, len);
+ return;
+ }
+ h = mtod (m, struct lcp_header*);
+ if (debug) {
+ log(LOG_DEBUG,
+ SPP_FMT "%s input(%s): <%s id=0x%x len=%d",
+ SPP_ARGS(ifp), cp->name,
+ sppp_state_name(sp->state[cp->protoidx]),
+ sppp_cp_type_name (h->type), h->ident, ntohs (h->len));
+ sppp_print_bytes ((u_char*) (h+1), len-4);
+ addlog(">\n");
+ }
+ if (len > ntohs (h->len))
+ len = ntohs (h->len);
+ p = (u_char *)(h + 1);
+ switch (h->type) {
+ case CONF_REQ:
+ if (len < 4) {
+ if (debug)
+ addlog(SPP_FMT "%s invalid conf-req length %d\n",
+ SPP_ARGS(ifp), cp->name,
+ len);
+ ++ifp->if_ierrors;
+ break;
+ }
+ /* handle states where RCR doesn't get a SCA/SCN */
+ switch (sp->state[cp->protoidx]) {
+ case STATE_CLOSING:
+ case STATE_STOPPING:
+ return;
+ case STATE_CLOSED:
+ sppp_cp_send(sp, cp->proto, TERM_ACK, h->ident,
+ 0, 0);
+ return;
+ }
+ rv = (cp->RCR)(sp, h, len);
+ switch (sp->state[cp->protoidx]) {
+ case STATE_OPENED:
+ (cp->tld)(sp);
+ (cp->scr)(sp);
+ /* fall through... */
+ case STATE_ACK_SENT:
+ case STATE_REQ_SENT:
+ sppp_cp_change_state(cp, sp, rv?
+ STATE_ACK_SENT: STATE_REQ_SENT);
+ break;
+ case STATE_STOPPED:
+ sp->rst_counter[cp->protoidx] = sp->lcp.max_configure;
+ (cp->scr)(sp);
+ sppp_cp_change_state(cp, sp, rv?
+ STATE_ACK_SENT: STATE_REQ_SENT);
+ break;
+ case STATE_ACK_RCVD:
+ if (rv) {
+ sppp_cp_change_state(cp, sp, STATE_OPENED);
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "%s tlu\n",
+ SPP_ARGS(ifp),
+ cp->name);
+ (cp->tlu)(sp);
+ } else
+ sppp_cp_change_state(cp, sp, STATE_ACK_RCVD);
+ break;
+ default:
+ printf(SPP_FMT "%s illegal %s in state %s\n",
+ SPP_ARGS(ifp), cp->name,
+ sppp_cp_type_name(h->type),
+ sppp_state_name(sp->state[cp->protoidx]));
+ ++ifp->if_ierrors;
+ }
+ break;
+ case CONF_ACK:
+ if (h->ident != sp->confid[cp->protoidx]) {
+ if (debug)
+ addlog(SPP_FMT "%s id mismatch 0x%x != 0x%x\n",
+ SPP_ARGS(ifp), cp->name,
+ h->ident, sp->confid[cp->protoidx]);
+ ++ifp->if_ierrors;
+ break;
+ }
+ switch (sp->state[cp->protoidx]) {
+ case STATE_CLOSED:
+ case STATE_STOPPED:
+ sppp_cp_send(sp, cp->proto, TERM_ACK, h->ident, 0, 0);
+ break;
+ case STATE_CLOSING:
+ case STATE_STOPPING:
+ break;
+ case STATE_REQ_SENT:
+ sp->rst_counter[cp->protoidx] = sp->lcp.max_configure;
+ sppp_cp_change_state(cp, sp, STATE_ACK_RCVD);
+ break;
+ case STATE_OPENED:
+ (cp->tld)(sp);
+ /* fall through */
+ case STATE_ACK_RCVD:
+ (cp->scr)(sp);
+ sppp_cp_change_state(cp, sp, STATE_REQ_SENT);
+ break;
+ case STATE_ACK_SENT:
+ sp->rst_counter[cp->protoidx] = sp->lcp.max_configure;
+ sppp_cp_change_state(cp, sp, STATE_OPENED);
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "%s tlu\n",
+ SPP_ARGS(ifp), cp->name);
+ (cp->tlu)(sp);
+ break;
+ default:
+ printf(SPP_FMT "%s illegal %s in state %s\n",
+ SPP_ARGS(ifp), cp->name,
+ sppp_cp_type_name(h->type),
+ sppp_state_name(sp->state[cp->protoidx]));
+ ++ifp->if_ierrors;
+ }
+ break;
+ case CONF_NAK:
+ case CONF_REJ:
+ if (h->ident != sp->confid[cp->protoidx]) {
+ if (debug)
+ addlog(SPP_FMT "%s id mismatch 0x%x != 0x%x\n",
+ SPP_ARGS(ifp), cp->name,
+ h->ident, sp->confid[cp->protoidx]);
+ ++ifp->if_ierrors;
+ break;
+ }
+ if (h->type == CONF_NAK)
+ (cp->RCN_nak)(sp, h, len);
+ else /* CONF_REJ */
+ (cp->RCN_rej)(sp, h, len);
+
+ switch (sp->state[cp->protoidx]) {
+ case STATE_CLOSED:
+ case STATE_STOPPED:
+ sppp_cp_send(sp, cp->proto, TERM_ACK, h->ident, 0, 0);
+ break;
+ case STATE_REQ_SENT:
+ case STATE_ACK_SENT:
+ sp->rst_counter[cp->protoidx] = sp->lcp.max_configure;
+ (cp->scr)(sp);
+ break;
+ case STATE_OPENED:
+ (cp->tld)(sp);
+ /* fall through */
+ case STATE_ACK_RCVD:
+ sppp_cp_change_state(cp, sp, STATE_REQ_SENT);
+ (cp->scr)(sp);
+ break;
+ case STATE_CLOSING:
+ case STATE_STOPPING:
+ break;
+ default:
+ printf(SPP_FMT "%s illegal %s in state %s\n",
+ SPP_ARGS(ifp), cp->name,
+ sppp_cp_type_name(h->type),
+ sppp_state_name(sp->state[cp->protoidx]));
+ ++ifp->if_ierrors;
+ }
+ break;
+
+ case TERM_REQ:
+ switch (sp->state[cp->protoidx]) {
+ case STATE_ACK_RCVD:
+ case STATE_ACK_SENT:
+ sppp_cp_change_state(cp, sp, STATE_REQ_SENT);
+ /* fall through */
+ case STATE_CLOSED:
+ case STATE_STOPPED:
+ case STATE_CLOSING:
+ case STATE_STOPPING:
+ case STATE_REQ_SENT:
+ sta:
+ /* Send Terminate-Ack packet. */
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "%s send terminate-ack\n",
+ SPP_ARGS(ifp), cp->name);
+ sppp_cp_send(sp, cp->proto, TERM_ACK, h->ident, 0, 0);
+ break;
+ case STATE_OPENED:
+ (cp->tld)(sp);
+ sp->rst_counter[cp->protoidx] = 0;
+ sppp_cp_change_state(cp, sp, STATE_STOPPING);
+ goto sta;
+ break;
+ default:
+ printf(SPP_FMT "%s illegal %s in state %s\n",
+ SPP_ARGS(ifp), cp->name,
+ sppp_cp_type_name(h->type),
+ sppp_state_name(sp->state[cp->protoidx]));
+ ++ifp->if_ierrors;
+ }
+ break;
+ case TERM_ACK:
+ switch (sp->state[cp->protoidx]) {
+ case STATE_CLOSED:
+ case STATE_STOPPED:
+ case STATE_REQ_SENT:
+ case STATE_ACK_SENT:
+ break;
+ case STATE_CLOSING:
+ sppp_cp_change_state(cp, sp, STATE_CLOSED);
+ (cp->tlf)(sp);
+ break;
+ case STATE_STOPPING:
+ sppp_cp_change_state(cp, sp, STATE_STOPPED);
+ (cp->tlf)(sp);
+ break;
+ case STATE_ACK_RCVD:
+ sppp_cp_change_state(cp, sp, STATE_REQ_SENT);
+ break;
+ case STATE_OPENED:
+ (cp->tld)(sp);
+ (cp->scr)(sp);
+ sppp_cp_change_state(cp, sp, STATE_ACK_RCVD);
+ break;
+ default:
+ printf(SPP_FMT "%s illegal %s in state %s\n",
+ SPP_ARGS(ifp), cp->name,
+ sppp_cp_type_name(h->type),
+ sppp_state_name(sp->state[cp->protoidx]));
+ ++ifp->if_ierrors;
+ }
+ break;
+ case CODE_REJ:
+ case PROTO_REJ:
+ /* XXX catastrophic rejects (RXJ-) aren't handled yet. */
+ log(LOG_INFO,
+ SPP_FMT "%s: ignoring RXJ (%s) for proto 0x%x, "
+ "danger will robinson\n",
+ SPP_ARGS(ifp), cp->name,
+ sppp_cp_type_name(h->type), ntohs(*((u_short *)p)));
+ switch (sp->state[cp->protoidx]) {
+ case STATE_CLOSED:
+ case STATE_STOPPED:
+ case STATE_REQ_SENT:
+ case STATE_ACK_SENT:
+ case STATE_CLOSING:
+ case STATE_STOPPING:
+ case STATE_OPENED:
+ break;
+ case STATE_ACK_RCVD:
+ sppp_cp_change_state(cp, sp, STATE_REQ_SENT);
+ break;
+ default:
+ printf(SPP_FMT "%s illegal %s in state %s\n",
+ SPP_ARGS(ifp), cp->name,
+ sppp_cp_type_name(h->type),
+ sppp_state_name(sp->state[cp->protoidx]));
+ ++ifp->if_ierrors;
+ }
+ break;
+ case DISC_REQ:
+ if (cp->proto != PPP_LCP)
+ goto illegal;
+ /* Discard the packet. */
+ break;
+ case ECHO_REQ:
+ if (cp->proto != PPP_LCP)
+ goto illegal;
+ if (sp->state[cp->protoidx] != STATE_OPENED) {
+ if (debug)
+ addlog(SPP_FMT "lcp echo req but lcp closed\n",
+ SPP_ARGS(ifp));
+ ++ifp->if_ierrors;
+ break;
+ }
+ if (len < 8) {
+ if (debug)
+ addlog(SPP_FMT "invalid lcp echo request "
+ "packet length: %d bytes\n",
+ SPP_ARGS(ifp), len);
+ break;
+ }
+ if ((sp->lcp.opts & (1 << LCP_OPT_MAGIC)) &&
+ ntohl (*(long*)(h+1)) == sp->lcp.magic) {
+ /* Line loopback mode detected. */
+ printf(SPP_FMT "loopback\n", SPP_ARGS(ifp));
+ if_down (ifp);
+ sppp_qflush (&sp->pp_cpq);
+
+ /* Shut down the PPP link. */
+ /* XXX */
+ lcp.Down(sp);
+ lcp.Up(sp);
+ break;
+ }
+ *(long*)(h+1) = htonl (sp->lcp.magic);
+ if (debug)
+ addlog(SPP_FMT "got lcp echo req, sending echo rep\n",
+ SPP_ARGS(ifp));
+ sppp_cp_send (sp, PPP_LCP, ECHO_REPLY, h->ident, len-4, h+1);
+ break;
+ case ECHO_REPLY:
+ if (cp->proto != PPP_LCP)
+ goto illegal;
+ if (h->ident != sp->lcp.echoid) {
+ ++ifp->if_ierrors;
+ break;
+ }
+ if (len < 8) {
+ if (debug)
+ addlog(SPP_FMT "lcp invalid echo reply "
+ "packet length: %d bytes\n",
+ SPP_ARGS(ifp), len);
+ break;
+ }
+ if (debug)
+ addlog(SPP_FMT "lcp got echo rep\n",
+ SPP_ARGS(ifp));
+ if (!(sp->lcp.opts & (1 << LCP_OPT_MAGIC)) ||
+ ntohl (*(long*)(h+1)) != sp->lcp.magic)
+ sp->pp_alivecnt = 0;
+ break;
+ default:
+ /* Unknown packet type -- send Code-Reject packet. */
+ illegal:
+ if (debug)
+ addlog(SPP_FMT "%s send code-rej for 0x%x\n",
+ SPP_ARGS(ifp), cp->name, h->type);
+ sppp_cp_send(sp, cp->proto, CODE_REJ, ++sp->pp_seq,
+ m->m_pkthdr.len, h);
+ ++ifp->if_ierrors;
+ }
+}
+
+
+/*
+ * The generic part of all Up/Down/Open/Close/TO event handlers.
+ * Basically, the state transition handling in the automaton.
+ */
+static void
+sppp_up_event(const struct cp *cp, struct sppp *sp)
+{
+ STDDCL;
+
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "%s up(%s)\n",
+ SPP_ARGS(ifp), cp->name,
+ sppp_state_name(sp->state[cp->protoidx]));
+
+ switch (sp->state[cp->protoidx]) {
+ case STATE_INITIAL:
+ sppp_cp_change_state(cp, sp, STATE_CLOSED);
+ break;
+ case STATE_STARTING:
+ sp->rst_counter[cp->protoidx] = sp->lcp.max_configure;
+ (cp->scr)(sp);
+ sppp_cp_change_state(cp, sp, STATE_REQ_SENT);
+ break;
+ default:
+ printf(SPP_FMT "%s illegal up in state %s\n",
+ SPP_ARGS(ifp), cp->name,
+ sppp_state_name(sp->state[cp->protoidx]));
+ }
+}
+
+static void
+sppp_down_event(const struct cp *cp, struct sppp *sp)
+{
+ STDDCL;
+
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "%s down(%s)\n",
+ SPP_ARGS(ifp), cp->name,
+ sppp_state_name(sp->state[cp->protoidx]));
+
+ switch (sp->state[cp->protoidx]) {
+ case STATE_CLOSED:
+ case STATE_CLOSING:
+ sppp_cp_change_state(cp, sp, STATE_INITIAL);
+ break;
+ case STATE_STOPPED:
+ sppp_cp_change_state(cp, sp, STATE_STARTING);
+ (cp->tls)(sp);
+ break;
+ case STATE_STOPPING:
+ case STATE_REQ_SENT:
+ case STATE_ACK_RCVD:
+ case STATE_ACK_SENT:
+ sppp_cp_change_state(cp, sp, STATE_STARTING);
+ break;
+ case STATE_OPENED:
+ (cp->tld)(sp);
+ sppp_cp_change_state(cp, sp, STATE_STARTING);
+ break;
+ default:
+ printf(SPP_FMT "%s illegal down in state %s\n",
+ SPP_ARGS(ifp), cp->name,
+ sppp_state_name(sp->state[cp->protoidx]));
+ }
+}
+
+
+static void
+sppp_open_event(const struct cp *cp, struct sppp *sp)
+{
+ STDDCL;
+
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "%s open(%s)\n",
+ SPP_ARGS(ifp), cp->name,
+ sppp_state_name(sp->state[cp->protoidx]));
+
+ switch (sp->state[cp->protoidx]) {
+ case STATE_INITIAL:
+ sppp_cp_change_state(cp, sp, STATE_STARTING);
+ (cp->tls)(sp);
+ break;
+ case STATE_STARTING:
+ break;
+ case STATE_CLOSED:
+ sp->rst_counter[cp->protoidx] = sp->lcp.max_configure;
+ (cp->scr)(sp);
+ sppp_cp_change_state(cp, sp, STATE_REQ_SENT);
+ break;
+ case STATE_STOPPED:
+ case STATE_STOPPING:
+ case STATE_REQ_SENT:
+ case STATE_ACK_RCVD:
+ case STATE_ACK_SENT:
+ case STATE_OPENED:
+ break;
+ case STATE_CLOSING:
+ sppp_cp_change_state(cp, sp, STATE_STOPPING);
+ break;
+ }
+}
+
+
+static void
+sppp_close_event(const struct cp *cp, struct sppp *sp)
+{
+ STDDCL;
+
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "%s close(%s)\n",
+ SPP_ARGS(ifp), cp->name,
+ sppp_state_name(sp->state[cp->protoidx]));
+
+ switch (sp->state[cp->protoidx]) {
+ case STATE_INITIAL:
+ case STATE_CLOSED:
+ case STATE_CLOSING:
+ break;
+ case STATE_STARTING:
+ sppp_cp_change_state(cp, sp, STATE_INITIAL);
+ (cp->tlf)(sp);
+ break;
+ case STATE_STOPPED:
+ sppp_cp_change_state(cp, sp, STATE_CLOSED);
+ break;
+ case STATE_STOPPING:
+ sppp_cp_change_state(cp, sp, STATE_CLOSING);
+ break;
+ case STATE_OPENED:
+ (cp->tld)(sp);
+ /* fall through */
+ case STATE_REQ_SENT:
+ case STATE_ACK_RCVD:
+ case STATE_ACK_SENT:
+ sp->rst_counter[cp->protoidx] = sp->lcp.max_terminate;
+ sppp_cp_send(sp, cp->proto, TERM_REQ, ++sp->pp_seq, 0, 0);
+ sppp_cp_change_state(cp, sp, STATE_CLOSING);
+ break;
+ }
+}
+
+static void
+sppp_to_event(const struct cp *cp, struct sppp *sp)
+{
+ STDDCL;
+ int s;
+
+ s = splimp();
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "%s TO(%s) rst_counter = %d\n",
+ SPP_ARGS(ifp), cp->name,
+ sppp_state_name(sp->state[cp->protoidx]),
+ sp->rst_counter[cp->protoidx]);
+
+ if (--sp->rst_counter[cp->protoidx] < 0)
+ /* TO- event */
+ switch (sp->state[cp->protoidx]) {
+ case STATE_CLOSING:
+ sppp_cp_change_state(cp, sp, STATE_CLOSED);
+ (cp->tlf)(sp);
+ break;
+ case STATE_STOPPING:
+ sppp_cp_change_state(cp, sp, STATE_STOPPED);
+ (cp->tlf)(sp);
+ break;
+ case STATE_REQ_SENT:
+ case STATE_ACK_RCVD:
+ case STATE_ACK_SENT:
+ sppp_cp_change_state(cp, sp, STATE_STOPPED);
+ (cp->tlf)(sp);
+ break;
+ }
+ else
+ /* TO+ event */
+ switch (sp->state[cp->protoidx]) {
+ case STATE_CLOSING:
+ case STATE_STOPPING:
+ sppp_cp_send(sp, cp->proto, TERM_REQ, ++sp->pp_seq,
+ 0, 0);
+ TIMEOUT(cp->TO, (void *)sp, sp->lcp.timeout,
+ sp->ch[cp->protoidx]);
+ break;
+ case STATE_REQ_SENT:
+ case STATE_ACK_RCVD:
+ (cp->scr)(sp);
+ /* sppp_cp_change_state() will restart the timer */
+ sppp_cp_change_state(cp, sp, STATE_REQ_SENT);
+ break;
+ case STATE_ACK_SENT:
+ (cp->scr)(sp);
+ TIMEOUT(cp->TO, (void *)sp, sp->lcp.timeout,
+ sp->ch[cp->protoidx]);
+ break;
+ }
+
+ splx(s);
+}
+
+/*
+ * Change the state of a control protocol in the state automaton.
+ * Takes care of starting/stopping the restart timer.
+ */
+void
+sppp_cp_change_state(const struct cp *cp, struct sppp *sp, int newstate)
+{
+ sp->state[cp->protoidx] = newstate;
+
+ UNTIMEOUT(cp->TO, (void *)sp, sp->ch[cp->protoidx]);
+ switch (newstate) {
+ case STATE_INITIAL:
+ case STATE_STARTING:
+ case STATE_CLOSED:
+ case STATE_STOPPED:
+ case STATE_OPENED:
+ break;
+ case STATE_CLOSING:
+ case STATE_STOPPING:
+ case STATE_REQ_SENT:
+ case STATE_ACK_RCVD:
+ case STATE_ACK_SENT:
+ TIMEOUT(cp->TO, (void *)sp, sp->lcp.timeout,
+ sp->ch[cp->protoidx]);
+ break;
+ }
+}
+ /*
+ *--------------------------------------------------------------------------*
+ * *
+ * The LCP implementation. *
+ * *
+ *--------------------------------------------------------------------------*
+ */
+static void
+sppp_lcp_init(struct sppp *sp)
+{
+ sp->lcp.opts = (1 << LCP_OPT_MAGIC);
+ sp->lcp.magic = 0;
+ sp->state[IDX_LCP] = STATE_INITIAL;
+ sp->fail_counter[IDX_LCP] = 0;
+ sp->lcp.protos = 0;
+ sp->lcp.mru = sp->lcp.their_mru = PP_MTU;
+
+ /* Note that these values are relevant for all control protocols */
+ sp->lcp.timeout = 3 * hz;
+ sp->lcp.max_terminate = 2;
+ sp->lcp.max_configure = 10;
+ sp->lcp.max_failure = 10;
+#if defined(__FreeBSD__) && __FreeBSD__ >= 3
+ callout_handle_init(&sp->ch[IDX_LCP]);
+#endif
+}
+
+static void
+sppp_lcp_up(struct sppp *sp)
+{
+ STDDCL;
+
+ /*
+ * If this interface is passive or dial-on-demand, and we are
+ * still in Initial state, it means we've got an incoming
+ * call. Activate the interface.
+ */
+ if ((ifp->if_flags & (IFF_AUTO | IFF_PASSIVE)) != 0) {
+ if (debug)
+ log(LOG_DEBUG,
+ SPP_FMT "Up event", SPP_ARGS(ifp));
+ ifp->if_flags |= IFF_RUNNING;
+ if (sp->state[IDX_LCP] == STATE_INITIAL) {
+ if (debug)
+ addlog("(incoming call)\n");
+ sp->pp_flags |= PP_CALLIN;
+ lcp.Open(sp);
+ } else if (debug)
+ addlog("\n");
+ }
+
+ sppp_up_event(&lcp, sp);
+}
+
+static void
+sppp_lcp_down(struct sppp *sp)
+{
+ STDDCL;
+
+ sppp_down_event(&lcp, sp);
+
+ /*
+ * If this is neither a dial-on-demand nor a passive
+ * interface, simulate an ``ifconfig down'' action, so the
+ * administrator can force a redial by another ``ifconfig
+ * up''. XXX For leased line operation, should we immediately
+ * try to reopen the connection here?
+ */
+ if ((ifp->if_flags & (IFF_AUTO | IFF_PASSIVE)) == 0) {
+ log(LOG_INFO,
+ SPP_FMT "Down event, taking interface down.\n",
+ SPP_ARGS(ifp));
+ if_down(ifp);
+ } else {
+ if (debug)
+ log(LOG_DEBUG,
+ SPP_FMT "Down event (carrier loss)\n",
+ SPP_ARGS(ifp));
+ }
+ sp->pp_flags &= ~PP_CALLIN;
+ if (sp->state[IDX_LCP] != STATE_INITIAL)
+ lcp.Close(sp);
+ ifp->if_flags &= ~IFF_RUNNING;
+}
+
+static void
+sppp_lcp_open(struct sppp *sp)
+{
+ /*
+ * If we are authenticator, negotiate LCP_AUTH
+ */
+ if (sp->hisauth.proto != 0)
+ sp->lcp.opts |= (1 << LCP_OPT_AUTH_PROTO);
+ else
+ sp->lcp.opts &= ~(1 << LCP_OPT_AUTH_PROTO);
+ sp->pp_flags &= ~PP_NEEDAUTH;
+ sppp_open_event(&lcp, sp);
+}
+
+static void
+sppp_lcp_close(struct sppp *sp)
+{
+ sppp_close_event(&lcp, sp);
+}
+
+static void
+sppp_lcp_TO(void *cookie)
+{
+ sppp_to_event(&lcp, (struct sppp *)cookie);
+}
+
+/*
+ * Analyze a configure request. Return true if it was agreeable, and
+ * caused action sca, false if it has been rejected or nak'ed, and
+ * caused action scn. (The return value is used to make the state
+ * transition decision in the state automaton.)
+ */
+static int
+sppp_lcp_RCR(struct sppp *sp, struct lcp_header *h, int len)
+{
+ STDDCL;
+ u_char *buf, *r, *p;
+ int origlen, rlen;
+ u_long nmagic;
+ u_short authproto;
+
+ len -= 4;
+ origlen = len;
+ buf = r = malloc (len, M_TEMP, M_NOWAIT);
+ if (! buf)
+ return (0);
+
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "lcp parse opts: ",
+ SPP_ARGS(ifp));
+
+ /* pass 1: check for things that need to be rejected */
+ p = (void*) (h+1);
+ for (rlen=0; len>1 && p[1]; len-=p[1], p+=p[1]) {
+ if (debug)
+ addlog(" %s ", sppp_lcp_opt_name(*p));
+ switch (*p) {
+ case LCP_OPT_MAGIC:
+ /* Magic number. */
+ /* fall through, both are same length */
+ case LCP_OPT_ASYNC_MAP:
+ /* Async control character map. */
+ if (len >= 6 || p[1] == 6)
+ continue;
+ if (debug)
+ addlog("[invalid] ");
+ break;
+ case LCP_OPT_MRU:
+ /* Maximum receive unit. */
+ if (len >= 4 && p[1] == 4)
+ continue;
+ if (debug)
+ addlog("[invalid] ");
+ break;
+ case LCP_OPT_AUTH_PROTO:
+ if (len < 4) {
+ if (debug)
+ addlog("[invalid] ");
+ break;
+ }
+ authproto = (p[2] << 8) + p[3];
+ if (authproto == PPP_CHAP && p[1] != 5) {
+ if (debug)
+ addlog("[invalid chap len] ");
+ break;
+ }
+ if (sp->myauth.proto == 0) {
+ /* we are not configured to do auth */
+ if (debug)
+ addlog("[not configured] ");
+ break;
+ }
+ /*
+ * Remote want us to authenticate, remember this,
+ * so we stay in PHASE_AUTHENTICATE after LCP got
+ * up.
+ */
+ sp->pp_flags |= PP_NEEDAUTH;
+ continue;
+ default:
+ /* Others not supported. */
+ if (debug)
+ addlog("[rej] ");
+ break;
+ }
+ /* Add the option to rejected list. */
+ bcopy (p, r, p[1]);
+ r += p[1];
+ rlen += p[1];
+ }
+ if (rlen) {
+ if (debug)
+ addlog(" send conf-rej\n");
+ sppp_cp_send (sp, PPP_LCP, CONF_REJ, h->ident, rlen, buf);
+ return 0;
+ } else if (debug)
+ addlog("\n");
+
+ /*
+ * pass 2: check for option values that are unacceptable and
+ * thus require to be nak'ed.
+ */
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "lcp parse opt values: ",
+ SPP_ARGS(ifp));
+
+ p = (void*) (h+1);
+ len = origlen;
+ for (rlen=0; len>1 && p[1]; len-=p[1], p+=p[1]) {
+ if (debug)
+ addlog(" %s ", sppp_lcp_opt_name(*p));
+ switch (*p) {
+ case LCP_OPT_MAGIC:
+ /* Magic number -- extract. */
+ nmagic = (u_long)p[2] << 24 |
+ (u_long)p[3] << 16 | p[4] << 8 | p[5];
+ if (nmagic != sp->lcp.magic) {
+ if (debug)
+ addlog("0x%lx ", nmagic);
+ continue;
+ }
+ /*
+ * Local and remote magics equal -- loopback?
+ */
+ if (sp->pp_loopcnt >= MAXALIVECNT*5) {
+ printf (SPP_FMT "loopback\n",
+ SPP_ARGS(ifp));
+ sp->pp_loopcnt = 0;
+ if (ifp->if_flags & IFF_UP) {
+ if_down(ifp);
+ sppp_qflush(&sp->pp_cpq);
+ /* XXX ? */
+ lcp.Down(sp);
+ lcp.Up(sp);
+ }
+ } else if (debug)
+ addlog("[glitch] ");
+ ++sp->pp_loopcnt;
+ /*
+ * We negate our magic here, and NAK it. If
+ * we see it later in an NAK packet, we
+ * suggest a new one.
+ */
+ nmagic = ~sp->lcp.magic;
+ /* Gonna NAK it. */
+ p[2] = nmagic >> 24;
+ p[3] = nmagic >> 16;
+ p[4] = nmagic >> 8;
+ p[5] = nmagic;
+ break;
+
+ case LCP_OPT_ASYNC_MAP:
+ /* Async control character map -- check to be zero. */
+ if (! p[2] && ! p[3] && ! p[4] && ! p[5]) {
+ if (debug)
+ addlog("[empty] ");
+ continue;
+ }
+ if (debug)
+ addlog("[non-empty] ");
+ /* suggest a zero one */
+ p[2] = p[3] = p[4] = p[5] = 0;
+ break;
+
+ case LCP_OPT_MRU:
+ /*
+ * Maximum receive unit. Always agreeable,
+ * but ignored by now.
+ */
+ sp->lcp.their_mru = p[2] * 256 + p[3];
+ if (debug)
+ addlog("%lu ", sp->lcp.their_mru);
+ continue;
+
+ case LCP_OPT_AUTH_PROTO:
+ authproto = (p[2] << 8) + p[3];
+ if (sp->myauth.proto != authproto) {
+ /* not agreed, nak */
+ if (debug)
+ addlog("[mine %s != his %s] ",
+ sppp_proto_name(sp->hisauth.proto),
+ sppp_proto_name(authproto));
+ p[2] = sp->myauth.proto >> 8;
+ p[3] = sp->myauth.proto;
+ break;
+ }
+ if (authproto == PPP_CHAP && p[4] != CHAP_MD5) {
+ if (debug)
+ addlog("[chap not MD5] ");
+ p[4] = CHAP_MD5;
+ break;
+ }
+ continue;
+ }
+ /* Add the option to nak'ed list. */
+ bcopy (p, r, p[1]);
+ r += p[1];
+ rlen += p[1];
+ }
+ if (rlen) {
+ if (++sp->fail_counter[IDX_LCP] >= sp->lcp.max_failure) {
+ if (debug)
+ addlog(" max_failure (%d) exceeded, "
+ "send conf-rej\n",
+ sp->lcp.max_failure);
+ sppp_cp_send(sp, PPP_LCP, CONF_REJ, h->ident, rlen, buf);
+ } else {
+ if (debug)
+ addlog(" send conf-nak\n");
+ sppp_cp_send (sp, PPP_LCP, CONF_NAK, h->ident, rlen, buf);
+ }
+ return 0;
+ } else {
+ if (debug)
+ addlog(" send conf-ack\n");
+ sp->fail_counter[IDX_LCP] = 0;
+ sp->pp_loopcnt = 0;
+ sppp_cp_send (sp, PPP_LCP, CONF_ACK,
+ h->ident, origlen, h+1);
+ }
+
+ free (buf, M_TEMP);
+ return (rlen == 0);
+}
+
+/*
+ * Analyze the LCP Configure-Reject option list, and adjust our
+ * negotiation.
+ */
+static void
+sppp_lcp_RCN_rej(struct sppp *sp, struct lcp_header *h, int len)
+{
+ STDDCL;
+ u_char *buf, *p;
+
+ len -= 4;
+ buf = malloc (len, M_TEMP, M_NOWAIT);
+ if (!buf)
+ return;
+
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "lcp rej opts: ",
+ SPP_ARGS(ifp));
+
+ p = (void*) (h+1);
+ for (; len > 1 && p[1]; len -= p[1], p += p[1]) {
+ if (debug)
+ addlog(" %s ", sppp_lcp_opt_name(*p));
+ switch (*p) {
+ case LCP_OPT_MAGIC:
+ /* Magic number -- can't use it, use 0 */
+ sp->lcp.opts &= ~(1 << LCP_OPT_MAGIC);
+ sp->lcp.magic = 0;
+ break;
+ case LCP_OPT_MRU:
+ /*
+ * Should not be rejected anyway, since we only
+ * negotiate a MRU if explicitly requested by
+ * peer.
+ */
+ sp->lcp.opts &= ~(1 << LCP_OPT_MRU);
+ break;
+ case LCP_OPT_AUTH_PROTO:
+ /*
+ * Peer doesn't want to authenticate himself,
+ * deny unless this is a dialout call, and
+ * AUTHFLAG_NOCALLOUT is set.
+ */
+ if ((sp->pp_flags & PP_CALLIN) == 0 &&
+ (sp->hisauth.flags & AUTHFLAG_NOCALLOUT) != 0) {
+ if (debug)
+ addlog("[don't insist on auth "
+ "for callout]");
+ sp->lcp.opts &= ~(1 << LCP_OPT_AUTH_PROTO);
+ break;
+ }
+ if (debug)
+ addlog("[access denied]\n");
+ lcp.Close(sp);
+ break;
+ }
+ }
+ if (debug)
+ addlog("\n");
+ free (buf, M_TEMP);
+ return;
+}
+
+/*
+ * Analyze the LCP Configure-NAK option list, and adjust our
+ * negotiation.
+ */
+static void
+sppp_lcp_RCN_nak(struct sppp *sp, struct lcp_header *h, int len)
+{
+ STDDCL;
+ u_char *buf, *p;
+ u_long magic;
+
+ len -= 4;
+ buf = malloc (len, M_TEMP, M_NOWAIT);
+ if (!buf)
+ return;
+
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "lcp nak opts: ",
+ SPP_ARGS(ifp));
+
+ p = (void*) (h+1);
+ for (; len > 1 && p[1]; len -= p[1], p += p[1]) {
+ if (debug)
+ addlog(" %s ", sppp_lcp_opt_name(*p));
+ switch (*p) {
+ case LCP_OPT_MAGIC:
+ /* Magic number -- renegotiate */
+ if ((sp->lcp.opts & (1 << LCP_OPT_MAGIC)) &&
+ len >= 6 && p[1] == 6) {
+ magic = (u_long)p[2] << 24 |
+ (u_long)p[3] << 16 | p[4] << 8 | p[5];
+ /*
+ * If the remote magic is our negated one,
+ * this looks like a loopback problem.
+ * Suggest a new magic to make sure.
+ */
+ if (magic == ~sp->lcp.magic) {
+ if (debug)
+ addlog("magic glitch ");
+#if defined(__FreeBSD__) && __FreeBSD__ >= 3
+ sp->lcp.magic = random();
+#else
+ sp->lcp.magic = time.tv_sec + time.tv_usec;
+#endif
+ } else {
+ sp->lcp.magic = magic;
+ if (debug)
+ addlog("%lu ", magic);
+ }
+ }
+ break;
+ case LCP_OPT_MRU:
+ /*
+ * Peer wants to advise us to negotiate an MRU.
+ * Agree on it if it's reasonable, or use
+ * default otherwise.
+ */
+ if (len >= 4 && p[1] == 4) {
+ u_int mru = p[2] * 256 + p[3];
+ if (debug)
+ addlog("%d ", mru);
+ if (mru < PP_MTU || mru > PP_MAX_MRU)
+ mru = PP_MTU;
+ sp->lcp.mru = mru;
+ sp->lcp.opts |= (1 << LCP_OPT_MRU);
+ }
+ break;
+ case LCP_OPT_AUTH_PROTO:
+ /*
+ * Peer doesn't like our authentication method,
+ * deny.
+ */
+ if (debug)
+ addlog("[access denied]\n");
+ lcp.Close(sp);
+ break;
+ }
+ }
+ if (debug)
+ addlog("\n");
+ free (buf, M_TEMP);
+ return;
+}
+
+static void
+sppp_lcp_tlu(struct sppp *sp)
+{
+ STDDCL;
+ int i;
+ u_long mask;
+
+ /* XXX ? */
+ if (! (ifp->if_flags & IFF_UP) &&
+ (ifp->if_flags & IFF_RUNNING)) {
+ /* Coming out of loopback mode. */
+ if_up(ifp);
+ printf (SPP_FMT "up\n", SPP_ARGS(ifp));
+ }
+
+ for (i = 0; i < IDX_COUNT; i++)
+ if ((cps[i])->flags & CP_QUAL)
+ (cps[i])->Open(sp);
+
+ if ((sp->lcp.opts & (1 << LCP_OPT_AUTH_PROTO)) != 0 ||
+ (sp->pp_flags & PP_NEEDAUTH) != 0)
+ sp->pp_phase = PHASE_AUTHENTICATE;
+ else
+ sp->pp_phase = PHASE_NETWORK;
+
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "phase %s\n", SPP_ARGS(ifp),
+ sppp_phase_name(sp->pp_phase));
+
+ /*
+ * Open all authentication protocols. This is even required
+ * if we already proceeded to network phase, since it might be
+ * that remote wants us to authenticate, so we might have to
+ * send a PAP request. Undesired authentication protocols
+ * don't do anything when they get an Open event.
+ */
+ for (i = 0; i < IDX_COUNT; i++)
+ if ((cps[i])->flags & CP_AUTH)
+ (cps[i])->Open(sp);
+
+ if (sp->pp_phase == PHASE_NETWORK) {
+ /* Notify all NCPs. */
+ for (i = 0; i < IDX_COUNT; i++)
+ if ((cps[i])->flags & CP_NCP)
+ (cps[i])->Open(sp);
+ }
+
+ /* Send Up events to all started protos. */
+ for (i = 0, mask = 1; i < IDX_COUNT; i++, mask <<= 1)
+ if (sp->lcp.protos & mask && ((cps[i])->flags & CP_LCP) == 0)
+ (cps[i])->Up(sp);
+
+ /* notify low-level driver of state change */
+ if (sp->pp_chg)
+ sp->pp_chg(sp, (int)sp->pp_phase);
+
+ if (sp->pp_phase == PHASE_NETWORK)
+ /* if no NCP is starting, close down */
+ sppp_lcp_check_and_close(sp);
+}
+
+static void
+sppp_lcp_tld(struct sppp *sp)
+{
+ STDDCL;
+ int i;
+ u_long mask;
+
+ sp->pp_phase = PHASE_TERMINATE;
+
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "phase %s\n", SPP_ARGS(ifp),
+ sppp_phase_name(sp->pp_phase));
+
+ /*
+ * Take upper layers down. We send the Down event first and
+ * the Close second to prevent the upper layers from sending
+ * ``a flurry of terminate-request packets'', as the RFC
+ * describes it.
+ */
+ for (i = 0, mask = 1; i < IDX_COUNT; i++, mask <<= 1)
+ if (sp->lcp.protos & mask && ((cps[i])->flags & CP_LCP) == 0) {
+ (cps[i])->Down(sp);
+ (cps[i])->Close(sp);
+ }
+}
+
+static void
+sppp_lcp_tls(struct sppp *sp)
+{
+ STDDCL;
+
+ sp->pp_phase = PHASE_ESTABLISH;
+
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "phase %s\n", SPP_ARGS(ifp),
+ sppp_phase_name(sp->pp_phase));
+
+ /* Notify lower layer if desired. */
+ if (sp->pp_tls)
+ (sp->pp_tls)(sp);
+ else
+ (sp->pp_up)(sp);
+}
+
+static void
+sppp_lcp_tlf(struct sppp *sp)
+{
+ STDDCL;
+
+ sp->pp_phase = PHASE_DEAD;
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "phase %s\n", SPP_ARGS(ifp),
+ sppp_phase_name(sp->pp_phase));
+
+ /* Notify lower layer if desired. */
+ if (sp->pp_tlf)
+ (sp->pp_tlf)(sp);
+ else
+ (sp->pp_down)(sp);
+}
+
+static void
+sppp_lcp_scr(struct sppp *sp)
+{
+ char opt[6 /* magicnum */ + 4 /* mru */ + 5 /* chap */];
+ int i = 0;
+ u_short authproto;
+
+ if (sp->lcp.opts & (1 << LCP_OPT_MAGIC)) {
+ if (! sp->lcp.magic)
+#if defined(__FreeBSD__) && __FreeBSD__ >= 3
+ sp->lcp.magic = random();
+#else
+ sp->lcp.magic = time.tv_sec + time.tv_usec;
+#endif
+ opt[i++] = LCP_OPT_MAGIC;
+ opt[i++] = 6;
+ opt[i++] = sp->lcp.magic >> 24;
+ opt[i++] = sp->lcp.magic >> 16;
+ opt[i++] = sp->lcp.magic >> 8;
+ opt[i++] = sp->lcp.magic;
+ }
+
+ if (sp->lcp.opts & (1 << LCP_OPT_MRU)) {
+ opt[i++] = LCP_OPT_MRU;
+ opt[i++] = 4;
+ opt[i++] = sp->lcp.mru >> 8;
+ opt[i++] = sp->lcp.mru;
+ }
+
+ if (sp->lcp.opts & (1 << LCP_OPT_AUTH_PROTO)) {
+ authproto = sp->hisauth.proto;
+ opt[i++] = LCP_OPT_AUTH_PROTO;
+ opt[i++] = authproto == PPP_CHAP? 5: 4;
+ opt[i++] = authproto >> 8;
+ opt[i++] = authproto;
+ if (authproto == PPP_CHAP)
+ opt[i++] = CHAP_MD5;
+ }
+
+ sp->confid[IDX_LCP] = ++sp->pp_seq;
+ sppp_cp_send (sp, PPP_LCP, CONF_REQ, sp->confid[IDX_LCP], i, &opt);
+}
+
+/*
+ * Check the open NCPs, return true if at least one NCP is open.
+ */
+static int
+sppp_ncp_check(struct sppp *sp)
+{
+ int i, mask;
+
+ for (i = 0, mask = 1; i < IDX_COUNT; i++, mask <<= 1)
+ if (sp->lcp.protos & mask && (cps[i])->flags & CP_NCP)
+ return 1;
+ return 0;
+}
+
+/*
+ * Re-check the open NCPs and see if we should terminate the link.
+ * Called by the NCPs during their tlf action handling.
+ */
+static void
+sppp_lcp_check_and_close(struct sppp *sp)
+{
+
+ if (sp->pp_phase < PHASE_NETWORK)
+ /* don't bother, we are already going down */
+ return;
+
+ if (sppp_ncp_check(sp))
+ return;
+
+ lcp.Close(sp);
+}
+ /*
+ *--------------------------------------------------------------------------*
+ * *
+ * The IPCP implementation. *
+ * *
+ *--------------------------------------------------------------------------*
+ */
+
+static void
+sppp_ipcp_init(struct sppp *sp)
+{
+ sp->ipcp.opts = 0;
+ sp->ipcp.flags = 0;
+ sp->state[IDX_IPCP] = STATE_INITIAL;
+ sp->fail_counter[IDX_IPCP] = 0;
+#if defined(__FreeBSD__) && __FreeBSD__ >= 3
+ callout_handle_init(&sp->ch[IDX_IPCP]);
+#endif
+}
+
+static void
+sppp_ipcp_up(struct sppp *sp)
+{
+ sppp_up_event(&ipcp, sp);
+}
+
+static void
+sppp_ipcp_down(struct sppp *sp)
+{
+ sppp_down_event(&ipcp, sp);
+}
+
+static void
+sppp_ipcp_open(struct sppp *sp)
+{
+ STDDCL;
+ u_long myaddr, hisaddr;
+
+ sp->ipcp.flags &= ~(IPCP_HISADDR_SEEN|IPCP_MYADDR_SEEN|IPCP_MYADDR_DYN);
+
+ sppp_get_ip_addrs(sp, &myaddr, &hisaddr, 0);
+ /*
+ * If we don't have his address, this probably means our
+ * interface doesn't want to talk IP at all. (This could
+ * be the case if somebody wants to speak only IPX, for
+ * example.) Don't open IPCP in this case.
+ */
+ if (hisaddr == 0L) {
+ /* XXX this message should go away */
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "ipcp_open(): no IP interface\n",
+ SPP_ARGS(ifp));
+ return;
+ }
+
+ if (myaddr == 0L) {
+ /*
+ * I don't have an assigned address, so i need to
+ * negotiate my address.
+ */
+ sp->ipcp.flags |= IPCP_MYADDR_DYN;
+ sp->ipcp.opts |= (1 << IPCP_OPT_ADDRESS);
+ } else
+ sp->ipcp.flags |= IPCP_MYADDR_SEEN;
+ sppp_open_event(&ipcp, sp);
+}
+
+static void
+sppp_ipcp_close(struct sppp *sp)
+{
+ sppp_close_event(&ipcp, sp);
+ if (sp->ipcp.flags & IPCP_MYADDR_DYN)
+ /*
+ * My address was dynamic, clear it again.
+ */
+ sppp_set_ip_addr(sp, 0L);
+}
+
+static void
+sppp_ipcp_TO(void *cookie)
+{
+ sppp_to_event(&ipcp, (struct sppp *)cookie);
+}
+
+/*
+ * Analyze a configure request. Return true if it was agreeable, and
+ * caused action sca, false if it has been rejected or nak'ed, and
+ * caused action scn. (The return value is used to make the state
+ * transition decision in the state automaton.)
+ */
+static int
+sppp_ipcp_RCR(struct sppp *sp, struct lcp_header *h, int len)
+{
+ u_char *buf, *r, *p;
+ struct ifnet *ifp = &sp->pp_if;
+ int rlen, origlen, debug = ifp->if_flags & IFF_DEBUG;
+ u_long hisaddr, desiredaddr;
+ int gotmyaddr = 0;
+
+ len -= 4;
+ origlen = len;
+ /*
+ * Make sure to allocate a buf that can at least hold a
+ * conf-nak with an `address' option. We might need it below.
+ */
+ buf = r = malloc ((len < 6? 6: len), M_TEMP, M_NOWAIT);
+ if (! buf)
+ return (0);
+
+ /* pass 1: see if we can recognize them */
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "ipcp parse opts: ",
+ SPP_ARGS(ifp));
+ p = (void*) (h+1);
+ for (rlen=0; len>1 && p[1]; len-=p[1], p+=p[1]) {
+ if (debug)
+ addlog(" %s ", sppp_ipcp_opt_name(*p));
+ switch (*p) {
+ case IPCP_OPT_ADDRESS:
+ if (len >= 6 && p[1] == 6) {
+ /* correctly formed address option */
+ continue;
+ }
+ if (debug)
+ addlog("[invalid] ");
+ break;
+ default:
+ /* Others not supported. */
+ if (debug)
+ addlog("[rej] ");
+ break;
+ }
+ /* Add the option to rejected list. */
+ bcopy (p, r, p[1]);
+ r += p[1];
+ rlen += p[1];
+ }
+ if (rlen) {
+ if (debug)
+ addlog(" send conf-rej\n");
+ sppp_cp_send (sp, PPP_IPCP, CONF_REJ, h->ident, rlen, buf);
+ return 0;
+ } else if (debug)
+ addlog("\n");
+
+ /* pass 2: parse option values */
+ sppp_get_ip_addrs(sp, 0, &hisaddr, 0);
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "ipcp parse opt values: ",
+ SPP_ARGS(ifp));
+ p = (void*) (h+1);
+ len = origlen;
+ for (rlen=0; len>1 && p[1]; len-=p[1], p+=p[1]) {
+ if (debug)
+ addlog(" %s ", sppp_ipcp_opt_name(*p));
+ switch (*p) {
+ case IPCP_OPT_ADDRESS:
+ /* This is the address he wants in his end */
+ desiredaddr = p[2] << 24 | p[3] << 16 |
+ p[4] << 8 | p[5];
+ if (desiredaddr == hisaddr ||
+ (hisaddr == 1 && desiredaddr != 0)) {
+ /*
+ * Peer's address is same as our value,
+ * or we have set it to 0.0.0.1 to
+ * indicate that we do not really care,
+ * this is agreeable. Gonna conf-ack
+ * it.
+ */
+ if (debug)
+ addlog("%s [ack] ",
+ sppp_dotted_quad(hisaddr));
+ /* record that we've seen it already */
+ sp->ipcp.flags |= IPCP_HISADDR_SEEN;
+ continue;
+ }
+ /*
+ * The address wasn't agreeable. This is either
+ * he sent us 0.0.0.0, asking to assign him an
+ * address, or he send us another address not
+ * matching our value. Either case, we gonna
+ * conf-nak it with our value.
+ * XXX: we should "rej" if hisaddr == 0
+ */
+ if (debug) {
+ if (desiredaddr == 0)
+ addlog("[addr requested] ");
+ else
+ addlog("%s [not agreed] ",
+ sppp_dotted_quad(desiredaddr));
+
+ }
+ p[2] = hisaddr >> 24;
+ p[3] = hisaddr >> 16;
+ p[4] = hisaddr >> 8;
+ p[5] = hisaddr;
+ break;
+ }
+ /* Add the option to nak'ed list. */
+ bcopy (p, r, p[1]);
+ r += p[1];
+ rlen += p[1];
+ }
+
+ /*
+ * If we are about to conf-ack the request, but haven't seen
+ * his address so far, gonna conf-nak it instead, with the
+ * `address' option present and our idea of his address being
+ * filled in there, to request negotiation of both addresses.
+ *
+ * XXX This can result in an endless req - nak loop if peer
+ * doesn't want to send us his address. Q: What should we do
+ * about it? XXX A: implement the max-failure counter.
+ */
+ if (rlen == 0 && !(sp->ipcp.flags & IPCP_HISADDR_SEEN) && !gotmyaddr) {
+ buf[0] = IPCP_OPT_ADDRESS;
+ buf[1] = 6;
+ buf[2] = hisaddr >> 24;
+ buf[3] = hisaddr >> 16;
+ buf[4] = hisaddr >> 8;
+ buf[5] = hisaddr;
+ rlen = 6;
+ if (debug)
+ addlog("still need hisaddr ");
+ }
+
+ if (rlen) {
+ if (debug)
+ addlog(" send conf-nak\n");
+ sppp_cp_send (sp, PPP_IPCP, CONF_NAK, h->ident, rlen, buf);
+ } else {
+ if (debug)
+ addlog(" send conf-ack\n");
+ sppp_cp_send (sp, PPP_IPCP, CONF_ACK,
+ h->ident, origlen, h+1);
+ }
+
+ free (buf, M_TEMP);
+ return (rlen == 0);
+}
+
+/*
+ * Analyze the IPCP Configure-Reject option list, and adjust our
+ * negotiation.
+ */
+static void
+sppp_ipcp_RCN_rej(struct sppp *sp, struct lcp_header *h, int len)
+{
+ u_char *buf, *p;
+ struct ifnet *ifp = &sp->pp_if;
+ int debug = ifp->if_flags & IFF_DEBUG;
+
+ len -= 4;
+ buf = malloc (len, M_TEMP, M_NOWAIT);
+ if (!buf)
+ return;
+
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "ipcp rej opts: ",
+ SPP_ARGS(ifp));
+
+ p = (void*) (h+1);
+ for (; len > 1 && p[1]; len -= p[1], p += p[1]) {
+ if (debug)
+ addlog(" %s ", sppp_ipcp_opt_name(*p));
+ switch (*p) {
+ case IPCP_OPT_ADDRESS:
+ /*
+ * Peer doesn't grok address option. This is
+ * bad. XXX Should we better give up here?
+ * XXX We could try old "addresses" option...
+ */
+ sp->ipcp.opts &= ~(1 << IPCP_OPT_ADDRESS);
+ break;
+ }
+ }
+ if (debug)
+ addlog("\n");
+ free (buf, M_TEMP);
+ return;
+}
+
+/*
+ * Analyze the IPCP Configure-NAK option list, and adjust our
+ * negotiation.
+ */
+static void
+sppp_ipcp_RCN_nak(struct sppp *sp, struct lcp_header *h, int len)
+{
+ u_char *buf, *p;
+ struct ifnet *ifp = &sp->pp_if;
+ int debug = ifp->if_flags & IFF_DEBUG;
+ u_long wantaddr;
+
+ len -= 4;
+ buf = malloc (len, M_TEMP, M_NOWAIT);
+ if (!buf)
+ return;
+
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "ipcp nak opts: ",
+ SPP_ARGS(ifp));
+
+ p = (void*) (h+1);
+ for (; len > 1 && p[1]; len -= p[1], p += p[1]) {
+ if (debug)
+ addlog(" %s ", sppp_ipcp_opt_name(*p));
+ switch (*p) {
+ case IPCP_OPT_ADDRESS:
+ /*
+ * Peer doesn't like our local IP address. See
+ * if we can do something for him. We'll drop
+ * him our address then.
+ */
+ if (len >= 6 && p[1] == 6) {
+ wantaddr = p[2] << 24 | p[3] << 16 |
+ p[4] << 8 | p[5];
+ sp->ipcp.opts |= (1 << IPCP_OPT_ADDRESS);
+ if (debug)
+ addlog("[wantaddr %s] ",
+ sppp_dotted_quad(wantaddr));
+ /*
+ * When doing dynamic address assignment,
+ * we accept his offer. Otherwise, we
+ * ignore it and thus continue to negotiate
+ * our already existing value.
+ * XXX: Bogus, if he said no once, he'll
+ * just say no again, might as well die.
+ */
+ if (sp->ipcp.flags & IPCP_MYADDR_DYN) {
+ sppp_set_ip_addr(sp, wantaddr);
+ if (debug)
+ addlog("[agree] ");
+ sp->ipcp.flags |= IPCP_MYADDR_SEEN;
+ }
+ }
+ break;
+ }
+ }
+ if (debug)
+ addlog("\n");
+ free (buf, M_TEMP);
+ return;
+}
+
+static void
+sppp_ipcp_tlu(struct sppp *sp)
+{
+ /* we are up - notify isdn daemon */
+ if (sp->pp_con)
+ sp->pp_con(sp);
+}
+
+static void
+sppp_ipcp_tld(struct sppp *sp)
+{
+}
+
+static void
+sppp_ipcp_tls(struct sppp *sp)
+{
+ /* indicate to LCP that it must stay alive */
+ sp->lcp.protos |= (1 << IDX_IPCP);
+}
+
+static void
+sppp_ipcp_tlf(struct sppp *sp)
+{
+ /* we no longer need LCP */
+ sp->lcp.protos &= ~(1 << IDX_IPCP);
+ sppp_lcp_check_and_close(sp);
+}
+
+static void
+sppp_ipcp_scr(struct sppp *sp)
+{
+ char opt[6 /* compression */ + 6 /* address */];
+ u_long ouraddr;
+ int i = 0;
+
+ if (sp->ipcp.opts & (1 << IPCP_OPT_ADDRESS)) {
+ sppp_get_ip_addrs(sp, &ouraddr, 0, 0);
+ opt[i++] = IPCP_OPT_ADDRESS;
+ opt[i++] = 6;
+ opt[i++] = ouraddr >> 24;
+ opt[i++] = ouraddr >> 16;
+ opt[i++] = ouraddr >> 8;
+ opt[i++] = ouraddr;
+ }
+
+ sp->confid[IDX_IPCP] = ++sp->pp_seq;
+ sppp_cp_send(sp, PPP_IPCP, CONF_REQ, sp->confid[IDX_IPCP], i, &opt);
+}
+
+
+ /*
+ *--------------------------------------------------------------------------*
+ * *
+ * The CHAP implementation. *
+ * *
+ *--------------------------------------------------------------------------*
+ */
+
+/*
+ * The authentication protocols don't employ a full-fledged state machine as
+ * the control protocols do, since they do have Open and Close events, but
+ * not Up and Down, nor are they explicitly terminated. Also, use of the
+ * authentication protocols may be different in both directions (this makes
+ * sense, think of a machine that never accepts incoming calls but only
+ * calls out, it doesn't require the called party to authenticate itself).
+ *
+ * Our state machine for the local authentication protocol (we are requesting
+ * the peer to authenticate) looks like:
+ *
+ * RCA-
+ * +--------------------------------------------+
+ * V scn,tld|
+ * +--------+ Close +---------+ RCA+
+ * | |<----------------------------------| |------+
+ * +--->| Closed | TO* | Opened | sca |
+ * | | |-----+ +-------| |<-----+
+ * | +--------+ irc | | +---------+
+ * | ^ | | ^
+ * | | | | |
+ * | | | | |
+ * | TO-| | | |
+ * | |tld TO+ V | |
+ * | | +------->+ | |
+ * | | | | | |
+ * | +--------+ V | |
+ * | | |<----+<--------------------+ |
+ * | | Req- | scr |
+ * | | Sent | |
+ * | | | |
+ * | +--------+ |
+ * | RCA- | | RCA+ |
+ * +------+ +------------------------------------------+
+ * scn,tld sca,irc,ict,tlu
+ *
+ *
+ * with:
+ *
+ * Open: LCP reached authentication phase
+ * Close: LCP reached terminate phase
+ *
+ * RCA+: received reply (pap-req, chap-response), acceptable
+ * RCN: received reply (pap-req, chap-response), not acceptable
+ * TO+: timeout with restart counter >= 0
+ * TO-: timeout with restart counter < 0
+ * TO*: reschedule timeout for CHAP
+ *
+ * scr: send request packet (none for PAP, chap-challenge)
+ * sca: send ack packet (pap-ack, chap-success)
+ * scn: send nak packet (pap-nak, chap-failure)
+ * ict: initialize re-challenge timer (CHAP only)
+ *
+ * tlu: this-layer-up, LCP reaches network phase
+ * tld: this-layer-down, LCP enters terminate phase
+ *
+ * Note that in CHAP mode, after sending a new challenge, while the state
+ * automaton falls back into Req-Sent state, it doesn't signal a tld
+ * event to LCP, so LCP remains in network phase. Only after not getting
+ * any response (or after getting an unacceptable response), CHAP closes,
+ * causing LCP to enter terminate phase.
+ *
+ * With PAP, there is no initial request that can be sent. The peer is
+ * expected to send one based on the successful negotiation of PAP as
+ * the authentication protocol during the LCP option negotiation.
+ *
+ * Incoming authentication protocol requests (remote requests
+ * authentication, we are peer) don't employ a state machine at all,
+ * they are simply answered. Some peers [Ascend P50 firmware rev
+ * 4.50] react allergically when sending IPCP requests while they are
+ * still in authentication phase (thereby violating the standard that
+ * demands that these NCP packets are to be discarded), so we keep
+ * track of the peer demanding us to authenticate, and only proceed to
+ * phase network once we've seen a positive acknowledge for the
+ * authentication.
+ */
+
+/*
+ * Handle incoming CHAP packets.
+ */
+void
+sppp_chap_input(struct sppp *sp, struct mbuf *m)
+{
+ STDDCL;
+ struct lcp_header *h;
+ int len, x;
+ u_char *value, *name, digest[AUTHKEYLEN], dsize;
+ int value_len, name_len;
+ MD5_CTX ctx;
+
+ len = m->m_pkthdr.len;
+ if (len < 4) {
+ if (debug)
+ log(LOG_DEBUG,
+ SPP_FMT "chap invalid packet length: %d bytes\n",
+ SPP_ARGS(ifp), len);
+ return;
+ }
+ h = mtod (m, struct lcp_header*);
+ if (len > ntohs (h->len))
+ len = ntohs (h->len);
+
+ switch (h->type) {
+ /* challenge, failure and success are his authproto */
+ case CHAP_CHALLENGE:
+ value = 1 + (u_char*)(h+1);
+ value_len = value[-1];
+ name = value + value_len;
+ name_len = len - value_len - 5;
+ if (name_len < 0) {
+ if (debug) {
+ log(LOG_DEBUG,
+ SPP_FMT "chap corrupted challenge "
+ "<%s id=0x%x len=%d",
+ SPP_ARGS(ifp),
+ sppp_auth_type_name(PPP_CHAP, h->type),
+ h->ident, ntohs(h->len));
+ sppp_print_bytes((u_char*) (h+1), len-4);
+ addlog(">\n");
+ }
+ break;
+ }
+
+ if (debug) {
+ log(LOG_DEBUG,
+ SPP_FMT "chap input <%s id=0x%x len=%d name=",
+ SPP_ARGS(ifp),
+ sppp_auth_type_name(PPP_CHAP, h->type), h->ident,
+ ntohs(h->len));
+ sppp_print_string((char*) name, name_len);
+ addlog(" value-size=%d value=", value_len);
+ sppp_print_bytes(value, value_len);
+ addlog(">\n");
+ }
+
+ /* Compute reply value. */
+ MD5Init(&ctx);
+ MD5Update(&ctx, &h->ident, 1);
+ MD5Update(&ctx, sp->myauth.secret,
+ sppp_strnlen(sp->myauth.secret, AUTHKEYLEN));
+ MD5Update(&ctx, value, value_len);
+ MD5Final(digest, &ctx);
+ dsize = sizeof digest;
+
+ sppp_auth_send(&chap, sp, CHAP_RESPONSE, h->ident,
+ sizeof dsize, (const char *)&dsize,
+ sizeof digest, digest,
+ (size_t)sppp_strnlen(sp->myauth.name, AUTHNAMELEN),
+ sp->myauth.name,
+ 0);
+ break;
+
+ case CHAP_SUCCESS:
+ if (debug) {
+ log(LOG_DEBUG, SPP_FMT "chap success",
+ SPP_ARGS(ifp));
+ if (len > 4) {
+ addlog(": ");
+ sppp_print_string((char*)(h + 1), len - 4);
+ }
+ addlog("\n");
+ }
+ x = splimp();
+ sp->pp_flags &= ~PP_NEEDAUTH;
+ if (sp->myauth.proto == PPP_CHAP &&
+ (sp->lcp.opts & (1 << LCP_OPT_AUTH_PROTO)) &&
+ (sp->lcp.protos & (1 << IDX_CHAP)) == 0) {
+ /*
+ * We are authenticator for CHAP but didn't
+ * complete yet. Leave it to tlu to proceed
+ * to network phase.
+ */
+ splx(x);
+ break;
+ }
+ splx(x);
+ sppp_phase_network(sp);
+ break;
+
+ case CHAP_FAILURE:
+ if (debug) {
+ log(LOG_INFO, SPP_FMT "chap failure",
+ SPP_ARGS(ifp));
+ if (len > 4) {
+ addlog(": ");
+ sppp_print_string((char*)(h + 1), len - 4);
+ }
+ addlog("\n");
+ } else
+ log(LOG_INFO, SPP_FMT "chap failure\n",
+ SPP_ARGS(ifp));
+ /* await LCP shutdown by authenticator */
+ break;
+
+ /* response is my authproto */
+ case CHAP_RESPONSE:
+ value = 1 + (u_char*)(h+1);
+ value_len = value[-1];
+ name = value + value_len;
+ name_len = len - value_len - 5;
+ if (name_len < 0) {
+ if (debug) {
+ log(LOG_DEBUG,
+ SPP_FMT "chap corrupted response "
+ "<%s id=0x%x len=%d",
+ SPP_ARGS(ifp),
+ sppp_auth_type_name(PPP_CHAP, h->type),
+ h->ident, ntohs(h->len));
+ sppp_print_bytes((u_char*)(h+1), len-4);
+ addlog(">\n");
+ }
+ break;
+ }
+ if (h->ident != sp->confid[IDX_CHAP]) {
+ if (debug)
+ log(LOG_DEBUG,
+ SPP_FMT "chap dropping response for old ID "
+ "(got %d, expected %d)\n",
+ SPP_ARGS(ifp),
+ h->ident, sp->confid[IDX_CHAP]);
+ break;
+ }
+ if (name_len != sppp_strnlen(sp->hisauth.name, AUTHNAMELEN)
+ || bcmp(name, sp->hisauth.name, name_len) != 0) {
+ log(LOG_INFO, SPP_FMT "chap response, his name ",
+ SPP_ARGS(ifp));
+ sppp_print_string(name, name_len);
+ addlog(" != expected ");
+ sppp_print_string(sp->hisauth.name,
+ sppp_strnlen(sp->hisauth.name, AUTHNAMELEN));
+ addlog("\n");
+ }
+ if (debug) {
+ log(LOG_DEBUG, SPP_FMT "chap input(%s) "
+ "<%s id=0x%x len=%d name=",
+ SPP_ARGS(ifp),
+ sppp_state_name(sp->state[IDX_CHAP]),
+ sppp_auth_type_name(PPP_CHAP, h->type),
+ h->ident, ntohs (h->len));
+ sppp_print_string((char*)name, name_len);
+ addlog(" value-size=%d value=", value_len);
+ sppp_print_bytes(value, value_len);
+ addlog(">\n");
+ }
+ if (value_len != AUTHKEYLEN) {
+ if (debug)
+ log(LOG_DEBUG,
+ SPP_FMT "chap bad hash value length: "
+ "%d bytes, should be %d\n",
+ SPP_ARGS(ifp), value_len,
+ AUTHKEYLEN);
+ break;
+ }
+
+ MD5Init(&ctx);
+ MD5Update(&ctx, &h->ident, 1);
+ MD5Update(&ctx, sp->hisauth.secret,
+ sppp_strnlen(sp->hisauth.secret, AUTHKEYLEN));
+ MD5Update(&ctx, sp->myauth.challenge, AUTHKEYLEN);
+ MD5Final(digest, &ctx);
+
+#define FAILMSG "Failed..."
+#define SUCCMSG "Welcome!"
+
+ if (value_len != sizeof digest ||
+ bcmp(digest, value, value_len) != 0) {
+ /* action scn, tld */
+ sppp_auth_send(&chap, sp, CHAP_FAILURE, h->ident,
+ sizeof(FAILMSG) - 1, (u_char *)FAILMSG,
+ 0);
+ chap.tld(sp);
+ break;
+ }
+ /* action sca, perhaps tlu */
+ if (sp->state[IDX_CHAP] == STATE_REQ_SENT ||
+ sp->state[IDX_CHAP] == STATE_OPENED)
+ sppp_auth_send(&chap, sp, CHAP_SUCCESS, h->ident,
+ sizeof(SUCCMSG) - 1, (u_char *)SUCCMSG,
+ 0);
+ if (sp->state[IDX_CHAP] == STATE_REQ_SENT) {
+ sppp_cp_change_state(&chap, sp, STATE_OPENED);
+ chap.tlu(sp);
+ }
+ break;
+
+ default:
+ /* Unknown CHAP packet type -- ignore. */
+ if (debug) {
+ log(LOG_DEBUG, SPP_FMT "chap unknown input(%s) "
+ "<0x%x id=0x%xh len=%d",
+ SPP_ARGS(ifp),
+ sppp_state_name(sp->state[IDX_CHAP]),
+ h->type, h->ident, ntohs(h->len));
+ sppp_print_bytes((u_char*)(h+1), len-4);
+ addlog(">\n");
+ }
+ break;
+
+ }
+}
+
+static void
+sppp_chap_init(struct sppp *sp)
+{
+ /* Chap doesn't have STATE_INITIAL at all. */
+ sp->state[IDX_CHAP] = STATE_CLOSED;
+ sp->fail_counter[IDX_CHAP] = 0;
+#if defined(__FreeBSD__) && __FreeBSD__ >= 3
+ callout_handle_init(&sp->ch[IDX_CHAP]);
+#endif
+}
+
+static void
+sppp_chap_open(struct sppp *sp)
+{
+ if (sp->myauth.proto == PPP_CHAP &&
+ (sp->lcp.opts & (1 << LCP_OPT_AUTH_PROTO)) != 0) {
+ /* we are authenticator for CHAP, start it */
+ chap.scr(sp);
+ sp->rst_counter[IDX_CHAP] = sp->lcp.max_configure;
+ sppp_cp_change_state(&chap, sp, STATE_REQ_SENT);
+ }
+ /* nothing to be done if we are peer, await a challenge */
+}
+
+static void
+sppp_chap_close(struct sppp *sp)
+{
+ if (sp->state[IDX_CHAP] != STATE_CLOSED)
+ sppp_cp_change_state(&chap, sp, STATE_CLOSED);
+}
+
+static void
+sppp_chap_TO(void *cookie)
+{
+ struct sppp *sp = (struct sppp *)cookie;
+ STDDCL;
+ int s;
+
+ s = splimp();
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "chap TO(%s) rst_counter = %d\n",
+ SPP_ARGS(ifp),
+ sppp_state_name(sp->state[IDX_CHAP]),
+ sp->rst_counter[IDX_CHAP]);
+
+ if (--sp->rst_counter[IDX_CHAP] < 0)
+ /* TO- event */
+ switch (sp->state[IDX_CHAP]) {
+ case STATE_REQ_SENT:
+ chap.tld(sp);
+ sppp_cp_change_state(&chap, sp, STATE_CLOSED);
+ break;
+ }
+ else
+ /* TO+ (or TO*) event */
+ switch (sp->state[IDX_CHAP]) {
+ case STATE_OPENED:
+ /* TO* event */
+ sp->rst_counter[IDX_CHAP] = sp->lcp.max_configure;
+ /* fall through */
+ case STATE_REQ_SENT:
+ chap.scr(sp);
+ /* sppp_cp_change_state() will restart the timer */
+ sppp_cp_change_state(&chap, sp, STATE_REQ_SENT);
+ break;
+ }
+
+ splx(s);
+}
+
+static void
+sppp_chap_tlu(struct sppp *sp)
+{
+ STDDCL;
+ int i, x;
+
+ i = 0;
+ sp->rst_counter[IDX_CHAP] = sp->lcp.max_configure;
+
+ /*
+ * Some broken CHAP implementations (Conware CoNet, firmware
+ * 4.0.?) don't want to re-authenticate their CHAP once the
+ * initial challenge-response exchange has taken place.
+ * Provide for an option to avoid rechallenges.
+ */
+ if ((sp->hisauth.flags & AUTHFLAG_NORECHALLENGE) == 0) {
+ /*
+ * Compute the re-challenge timeout. This will yield
+ * a number between 300 and 810 seconds.
+ */
+ i = 300 + ((unsigned)(random() & 0xff00) >> 7);
+ TIMEOUT(chap.TO, (void *)sp, i * hz, sp->ch[IDX_CHAP]);
+ }
+
+ if (debug) {
+ log(LOG_DEBUG,
+ SPP_FMT "chap %s, ",
+ SPP_ARGS(ifp),
+ sp->pp_phase == PHASE_NETWORK? "reconfirmed": "tlu");
+ if ((sp->hisauth.flags & AUTHFLAG_NORECHALLENGE) == 0)
+ addlog("next re-challenge in %d seconds\n", i);
+ else
+ addlog("re-challenging supressed\n");
+ }
+
+ x = splimp();
+ /* indicate to LCP that we need to be closed down */
+ sp->lcp.protos |= (1 << IDX_CHAP);
+
+ if (sp->pp_flags & PP_NEEDAUTH) {
+ /*
+ * Remote is authenticator, but his auth proto didn't
+ * complete yet. Defer the transition to network
+ * phase.
+ */
+ splx(x);
+ return;
+ }
+ splx(x);
+
+ /*
+ * If we are already in phase network, we are done here. This
+ * is the case if this is a dummy tlu event after a re-challenge.
+ */
+ if (sp->pp_phase != PHASE_NETWORK)
+ sppp_phase_network(sp);
+}
+
+static void
+sppp_chap_tld(struct sppp *sp)
+{
+ STDDCL;
+
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "chap tld\n", SPP_ARGS(ifp));
+ UNTIMEOUT(chap.TO, (void *)sp, sp->ch[IDX_CHAP]);
+ sp->lcp.protos &= ~(1 << IDX_CHAP);
+
+ lcp.Close(sp);
+}
+
+static void
+sppp_chap_scr(struct sppp *sp)
+{
+ u_long *ch, seed;
+ u_char clen;
+
+ /* Compute random challenge. */
+ ch = (u_long *)sp->myauth.challenge;
+#if defined(__FreeBSD__) && __FreeBSD__ >= 3
+ read_random(&seed, sizeof seed);
+#else
+ {
+ struct timeval tv;
+ microtime(&tv);
+ seed = tv.tv_sec ^ tv.tv_usec;
+ }
+#endif
+ ch[0] = seed ^ random();
+ ch[1] = seed ^ random();
+ ch[2] = seed ^ random();
+ ch[3] = seed ^ random();
+ clen = AUTHKEYLEN;
+
+ sp->confid[IDX_CHAP] = ++sp->pp_seq;
+
+ sppp_auth_send(&chap, sp, CHAP_CHALLENGE, sp->confid[IDX_CHAP],
+ sizeof clen, (const char *)&clen,
+ (size_t)AUTHKEYLEN, sp->myauth.challenge,
+ (size_t)sppp_strnlen(sp->myauth.name, AUTHNAMELEN),
+ sp->myauth.name,
+ 0);
+}
+ /*
+ *--------------------------------------------------------------------------*
+ * *
+ * The PAP implementation. *
+ * *
+ *--------------------------------------------------------------------------*
+ */
+/*
+ * For PAP, we need to keep a little state also if we are the peer, not the
+ * authenticator. This is since we don't get a request to authenticate, but
+ * have to repeatedly authenticate ourself until we got a response (or the
+ * retry counter is expired).
+ */
+
+/*
+ * Handle incoming PAP packets. */
+static void
+sppp_pap_input(struct sppp *sp, struct mbuf *m)
+{
+ STDDCL;
+ struct lcp_header *h;
+ int len, x;
+ u_char *name, *passwd, mlen;
+ int name_len, passwd_len;
+
+ len = m->m_pkthdr.len;
+ if (len < 5) {
+ if (debug)
+ log(LOG_DEBUG,
+ SPP_FMT "pap invalid packet length: %d bytes\n",
+ SPP_ARGS(ifp), len);
+ return;
+ }
+ h = mtod (m, struct lcp_header*);
+ if (len > ntohs (h->len))
+ len = ntohs (h->len);
+ switch (h->type) {
+ /* PAP request is my authproto */
+ case PAP_REQ:
+ name = 1 + (u_char*)(h+1);
+ name_len = name[-1];
+ passwd = name + name_len + 1;
+ if (name_len > len - 6 ||
+ (passwd_len = passwd[-1]) > len - 6 - name_len) {
+ if (debug) {
+ log(LOG_DEBUG, SPP_FMT "pap corrupted input "
+ "<%s id=0x%x len=%d",
+ SPP_ARGS(ifp),
+ sppp_auth_type_name(PPP_PAP, h->type),
+ h->ident, ntohs(h->len));
+ sppp_print_bytes((u_char*)(h+1), len-4);
+ addlog(">\n");
+ }
+ break;
+ }
+ if (debug) {
+ log(LOG_DEBUG, SPP_FMT "pap input(%s) "
+ "<%s id=0x%x len=%d name=",
+ SPP_ARGS(ifp),
+ sppp_state_name(sp->state[IDX_PAP]),
+ sppp_auth_type_name(PPP_PAP, h->type),
+ h->ident, ntohs(h->len));
+ sppp_print_string((char*)name, name_len);
+ addlog(" passwd=");
+ sppp_print_string((char*)passwd, passwd_len);
+ addlog(">\n");
+ }
+ if (name_len > AUTHNAMELEN ||
+ passwd_len > AUTHKEYLEN ||
+ bcmp(name, sp->hisauth.name, name_len) != 0 ||
+ bcmp(passwd, sp->hisauth.secret, passwd_len) != 0) {
+ /* action scn, tld */
+ mlen = sizeof(FAILMSG) - 1;
+ sppp_auth_send(&pap, sp, PAP_NAK, h->ident,
+ sizeof mlen, (const char *)&mlen,
+ sizeof(FAILMSG) - 1, (u_char *)FAILMSG,
+ 0);
+ pap.tld(sp);
+ break;
+ }
+ /* action sca, perhaps tlu */
+ if (sp->state[IDX_PAP] == STATE_REQ_SENT ||
+ sp->state[IDX_PAP] == STATE_OPENED) {
+ mlen = sizeof(SUCCMSG) - 1;
+ sppp_auth_send(&pap, sp, PAP_ACK, h->ident,
+ sizeof mlen, (const char *)&mlen,
+ sizeof(SUCCMSG) - 1, (u_char *)SUCCMSG,
+ 0);
+ }
+ if (sp->state[IDX_PAP] == STATE_REQ_SENT) {
+ sppp_cp_change_state(&pap, sp, STATE_OPENED);
+ pap.tlu(sp);
+ }
+ break;
+
+ /* ack and nak are his authproto */
+ case PAP_ACK:
+ UNTIMEOUT(sppp_pap_my_TO, (void *)sp, sp->pap_my_to_ch);
+ if (debug) {
+ log(LOG_DEBUG, SPP_FMT "pap success",
+ SPP_ARGS(ifp));
+ name_len = *((char *)h);
+ if (len > 5 && name_len) {
+ addlog(": ");
+ sppp_print_string((char*)(h+1), name_len);
+ }
+ addlog("\n");
+ }
+ x = splimp();
+ sp->pp_flags &= ~PP_NEEDAUTH;
+ if (sp->myauth.proto == PPP_PAP &&
+ (sp->lcp.opts & (1 << LCP_OPT_AUTH_PROTO)) &&
+ (sp->lcp.protos & (1 << IDX_PAP)) == 0) {
+ /*
+ * We are authenticator for PAP but didn't
+ * complete yet. Leave it to tlu to proceed
+ * to network phase.
+ */
+ splx(x);
+ break;
+ }
+ splx(x);
+ sppp_phase_network(sp);
+ break;
+
+ case PAP_NAK:
+ UNTIMEOUT(sppp_pap_my_TO, (void *)sp, sp->pap_my_to_ch);
+ if (debug) {
+ log(LOG_INFO, SPP_FMT "pap failure",
+ SPP_ARGS(ifp));
+ name_len = *((char *)h);
+ if (len > 5 && name_len) {
+ addlog(": ");
+ sppp_print_string((char*)(h+1), name_len);
+ }
+ addlog("\n");
+ } else
+ log(LOG_INFO, SPP_FMT "pap failure\n",
+ SPP_ARGS(ifp));
+ /* await LCP shutdown by authenticator */
+ break;
+
+ default:
+ /* Unknown PAP packet type -- ignore. */
+ if (debug) {
+ log(LOG_DEBUG, SPP_FMT "pap corrupted input "
+ "<0x%x id=0x%x len=%d",
+ SPP_ARGS(ifp),
+ h->type, h->ident, ntohs(h->len));
+ sppp_print_bytes((u_char*)(h+1), len-4);
+ addlog(">\n");
+ }
+ break;
+
+ }
+}
+
+static void
+sppp_pap_init(struct sppp *sp)
+{
+ /* PAP doesn't have STATE_INITIAL at all. */
+ sp->state[IDX_PAP] = STATE_CLOSED;
+ sp->fail_counter[IDX_PAP] = 0;
+#if defined(__FreeBSD__) && __FreeBSD__ >= 3
+ callout_handle_init(&sp->ch[IDX_PAP]);
+ callout_handle_init(&sp->pap_my_to_ch);
+#endif
+}
+
+static void
+sppp_pap_open(struct sppp *sp)
+{
+ if (sp->hisauth.proto == PPP_PAP &&
+ (sp->lcp.opts & (1 << LCP_OPT_AUTH_PROTO)) != 0) {
+ /* we are authenticator for PAP, start our timer */
+ sp->rst_counter[IDX_PAP] = sp->lcp.max_configure;
+ sppp_cp_change_state(&pap, sp, STATE_REQ_SENT);
+ }
+ if (sp->myauth.proto == PPP_PAP) {
+ /* we are peer, send a request, and start a timer */
+ pap.scr(sp);
+ TIMEOUT(sppp_pap_my_TO, (void *)sp, sp->lcp.timeout,
+ sp->pap_my_to_ch);
+ }
+}
+
+static void
+sppp_pap_close(struct sppp *sp)
+{
+ if (sp->state[IDX_PAP] != STATE_CLOSED)
+ sppp_cp_change_state(&pap, sp, STATE_CLOSED);
+}
+
+/*
+ * That's the timeout routine if we are authenticator. Since the
+ * authenticator is basically passive in PAP, we can't do much here.
+ */
+static void
+sppp_pap_TO(void *cookie)
+{
+ struct sppp *sp = (struct sppp *)cookie;
+ STDDCL;
+ int s;
+
+ s = splimp();
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "pap TO(%s) rst_counter = %d\n",
+ SPP_ARGS(ifp),
+ sppp_state_name(sp->state[IDX_PAP]),
+ sp->rst_counter[IDX_PAP]);
+
+ if (--sp->rst_counter[IDX_PAP] < 0)
+ /* TO- event */
+ switch (sp->state[IDX_PAP]) {
+ case STATE_REQ_SENT:
+ pap.tld(sp);
+ sppp_cp_change_state(&pap, sp, STATE_CLOSED);
+ break;
+ }
+ else
+ /* TO+ event, not very much we could do */
+ switch (sp->state[IDX_PAP]) {
+ case STATE_REQ_SENT:
+ /* sppp_cp_change_state() will restart the timer */
+ sppp_cp_change_state(&pap, sp, STATE_REQ_SENT);
+ break;
+ }
+
+ splx(s);
+}
+
+/*
+ * That's the timeout handler if we are peer. Since the peer is active,
+ * we need to retransmit our PAP request since it is apparently lost.
+ * XXX We should impose a max counter.
+ */
+static void
+sppp_pap_my_TO(void *cookie)
+{
+ struct sppp *sp = (struct sppp *)cookie;
+ STDDCL;
+
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "pap peer TO\n",
+ SPP_ARGS(ifp));
+
+ pap.scr(sp);
+}
+
+static void
+sppp_pap_tlu(struct sppp *sp)
+{
+ STDDCL;
+ int x;
+
+ sp->rst_counter[IDX_PAP] = sp->lcp.max_configure;
+
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "%s tlu\n",
+ SPP_ARGS(ifp), pap.name);
+
+ x = splimp();
+ /* indicate to LCP that we need to be closed down */
+ sp->lcp.protos |= (1 << IDX_PAP);
+
+ if (sp->pp_flags & PP_NEEDAUTH) {
+ /*
+ * Remote is authenticator, but his auth proto didn't
+ * complete yet. Defer the transition to network
+ * phase.
+ */
+ splx(x);
+ return;
+ }
+ splx(x);
+ sppp_phase_network(sp);
+}
+
+static void
+sppp_pap_tld(struct sppp *sp)
+{
+ STDDCL;
+
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "pap tld\n", SPP_ARGS(ifp));
+ UNTIMEOUT(pap.TO, (void *)sp, sp->ch[IDX_PAP]);
+ UNTIMEOUT(sppp_pap_my_TO, (void *)sp, sp->pap_my_to_ch);
+ sp->lcp.protos &= ~(1 << IDX_PAP);
+
+ lcp.Close(sp);
+}
+
+static void
+sppp_pap_scr(struct sppp *sp)
+{
+ u_char idlen, pwdlen;
+
+ sp->confid[IDX_PAP] = ++sp->pp_seq;
+ pwdlen = sppp_strnlen(sp->myauth.secret, AUTHKEYLEN);
+ idlen = sppp_strnlen(sp->myauth.name, AUTHNAMELEN);
+
+ sppp_auth_send(&pap, sp, PAP_REQ, sp->confid[IDX_PAP],
+ sizeof idlen, (const char *)&idlen,
+ (size_t)idlen, sp->myauth.name,
+ sizeof pwdlen, (const char *)&pwdlen,
+ (size_t)pwdlen, sp->myauth.secret,
+ 0);
+}
+ /*
+ * Random miscellaneous functions.
+ */
+
+/*
+ * Send a PAP or CHAP proto packet.
+ *
+ * Varadic function, each of the elements for the ellipsis is of type
+ * ``size_t mlen, const u_char *msg''. Processing will stop iff
+ * mlen == 0.
+ * NOTE: never declare variadic functions with types subject to type
+ * promotion (i.e. u_char). This is asking for big trouble depending
+ * on the architecture you are on...
+ */
+
+static void
+sppp_auth_send(const struct cp *cp, struct sppp *sp,
+ unsigned int type, unsigned int id,
+ ...)
+{
+ STDDCL;
+ struct ppp_header *h;
+ struct lcp_header *lh;
+ struct mbuf *m;
+ u_char *p;
+ int len;
+ unsigned int mlen;
+ const char *msg;
+ va_list ap;
+
+ MGETHDR (m, M_DONTWAIT, MT_DATA);
+ if (! m)
+ return;
+ m->m_pkthdr.rcvif = 0;
+
+ h = mtod (m, struct ppp_header*);
+ h->address = PPP_ALLSTATIONS; /* broadcast address */
+ h->control = PPP_UI; /* Unnumbered Info */
+ h->protocol = htons(cp->proto);
+
+ lh = (struct lcp_header*)(h + 1);
+ lh->type = type;
+ lh->ident = id;
+ p = (u_char*) (lh+1);
+
+ va_start(ap, id);
+ len = 0;
+
+ while ((mlen = (unsigned int)va_arg(ap, size_t)) != 0) {
+ msg = va_arg(ap, const char *);
+ len += mlen;
+ if (len > MHLEN - PPP_HEADER_LEN - LCP_HEADER_LEN) {
+ va_end(ap);
+ m_freem(m);
+ return;
+ }
+
+ bcopy(msg, p, mlen);
+ p += mlen;
+ }
+ va_end(ap);
+
+ m->m_pkthdr.len = m->m_len = PPP_HEADER_LEN + LCP_HEADER_LEN + len;
+ lh->len = htons (LCP_HEADER_LEN + len);
+
+ if (debug) {
+ log(LOG_DEBUG, SPP_FMT "%s output <%s id=0x%x len=%d",
+ SPP_ARGS(ifp), cp->name,
+ sppp_auth_type_name(cp->proto, lh->type),
+ lh->ident, ntohs(lh->len));
+ sppp_print_bytes((u_char*) (lh+1), len);
+ addlog(">\n");
+ }
+ if (IF_QFULL (&sp->pp_cpq)) {
+ IF_DROP (&sp->pp_fastq);
+ IF_DROP (&ifp->if_snd);
+ m_freem (m);
+ ++ifp->if_oerrors;
+ } else
+ IF_ENQUEUE (&sp->pp_cpq, m);
+ if (! (ifp->if_flags & IFF_OACTIVE))
+ (*ifp->if_start) (ifp);
+ ifp->if_obytes += m->m_pkthdr.len + 3;
+}
+
+/*
+ * Flush interface queue.
+ */
+static void
+sppp_qflush(struct ifqueue *ifq)
+{
+ struct mbuf *m, *n;
+
+ n = ifq->ifq_head;
+ while ((m = n)) {
+ n = m->m_act;
+ m_freem (m);
+ }
+ ifq->ifq_head = 0;
+ ifq->ifq_tail = 0;
+ ifq->ifq_len = 0;
+}
+
+/*
+ * Send keepalive packets, every 10 seconds.
+ */
+static void
+sppp_keepalive(void *dummy)
+{
+ struct sppp *sp;
+ int s;
+
+ s = splimp();
+ for (sp=spppq; sp; sp=sp->pp_next) {
+ struct ifnet *ifp = &sp->pp_if;
+
+ /* Keepalive mode disabled or channel down? */
+ if (! (sp->pp_flags & PP_KEEPALIVE) ||
+ ! (ifp->if_flags & IFF_RUNNING))
+ continue;
+
+ /* No keepalive in PPP mode if LCP not opened yet. */
+ if (sp->pp_mode != IFF_CISCO &&
+ sp->pp_phase < PHASE_AUTHENTICATE)
+ continue;
+
+ if (sp->pp_alivecnt == MAXALIVECNT) {
+ /* No keepalive packets got. Stop the interface. */
+ printf (SPP_FMT "down\n", SPP_ARGS(ifp));
+ if_down (ifp);
+ sppp_qflush (&sp->pp_cpq);
+ if (sp->pp_mode != IFF_CISCO) {
+ /* XXX */
+ /* Shut down the PPP link. */
+ lcp.Down(sp);
+ /* Initiate negotiation. XXX */
+ lcp.Up(sp);
+ }
+ }
+ if (sp->pp_alivecnt <= MAXALIVECNT)
+ ++sp->pp_alivecnt;
+ if (sp->pp_mode == IFF_CISCO)
+ sppp_cisco_send (sp, CISCO_KEEPALIVE_REQ, ++sp->pp_seq,
+ sp->pp_rseq);
+ else if (sp->pp_phase >= PHASE_AUTHENTICATE) {
+ long nmagic = htonl (sp->lcp.magic);
+ sp->lcp.echoid = ++sp->pp_seq;
+ sppp_cp_send (sp, PPP_LCP, ECHO_REQ,
+ sp->lcp.echoid, 4, &nmagic);
+ }
+ }
+ splx(s);
+ TIMEOUT(sppp_keepalive, 0, hz * 10, keepalive_ch);
+}
+
+/*
+ * Get both IP addresses.
+ */
+static void
+sppp_get_ip_addrs(struct sppp *sp, u_long *src, u_long *dst, u_long *srcmask)
+{
+ struct ifnet *ifp = &sp->pp_if;
+ struct ifaddr *ifa;
+ struct sockaddr_in *si, *sm;
+ u_long ssrc, ddst;
+
+ sm = NULL;
+ ssrc = ddst = 0L;
+ /*
+ * Pick the first AF_INET address from the list,
+ * aliases don't make any sense on a p2p link anyway.
+ */
+ si = 0;
+#if defined(__FreeBSD__) && __FreeBSD__ >= 3
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link)
+#elif defined(__NetBSD__) || defined (__OpenBSD__)
+ for (ifa = ifp->if_addrlist.tqh_first;
+ ifa;
+ ifa = ifa->ifa_list.tqe_next)
+#else
+ for (ifa = ifp->if_addrlist;
+ ifa;
+ ifa = ifa->ifa_next)
+#endif
+ if (ifa->ifa_addr->sa_family == AF_INET) {
+ si = (struct sockaddr_in *)ifa->ifa_addr;
+ sm = (struct sockaddr_in *)ifa->ifa_netmask;
+ if (si)
+ break;
+ }
+ if (ifa) {
+ if (si && si->sin_addr.s_addr) {
+ ssrc = si->sin_addr.s_addr;
+ if (srcmask)
+ *srcmask = ntohl(sm->sin_addr.s_addr);
+ }
+
+ si = (struct sockaddr_in *)ifa->ifa_dstaddr;
+ if (si && si->sin_addr.s_addr)
+ ddst = si->sin_addr.s_addr;
+ }
+
+ if (dst) *dst = ntohl(ddst);
+ if (src) *src = ntohl(ssrc);
+}
+
+/*
+ * Set my IP address. Must be called at splimp.
+ */
+static void
+sppp_set_ip_addr(struct sppp *sp, u_long src)
+{
+ STDDCL;
+ struct ifaddr *ifa;
+ struct sockaddr_in *si;
+
+ /*
+ * Pick the first AF_INET address from the list,
+ * aliases don't make any sense on a p2p link anyway.
+ */
+ si = 0;
+#if defined(__FreeBSD__) && __FreeBSD__ >= 3
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link)
+#elif defined(__NetBSD__) || defined (__OpenBSD__)
+ for (ifa = ifp->if_addrlist.tqh_first;
+ ifa;
+ ifa = ifa->ifa_list.tqe_next)
+#else
+ for (ifa = ifp->if_addrlist;
+ ifa;
+ ifa = ifa->ifa_next)
+#endif
+ {
+ if (ifa->ifa_addr->sa_family == AF_INET)
+ {
+ si = (struct sockaddr_in *)ifa->ifa_addr;
+ if (si)
+ break;
+ }
+ }
+
+ if (ifa && si)
+ {
+ int error;
+#if __NetBSD_Version__ >= 103080000
+ struct sockaddr_in new_sin = *si;
+
+ new_sin.sin_addr.s_addr = htonl(src);
+ error = in_ifinit(ifp, ifatoia(ifa), &new_sin, 1);
+ if(debug && error)
+ {
+ log(LOG_DEBUG, SPP_FMT "sppp_set_ip_addr: in_ifinit "
+ " failed, error=%d\n", SPP_ARGS(ifp), error);
+ }
+#else
+ /* delete old route */
+ error = rtinit(ifa, (int)RTM_DELETE, RTF_HOST);
+ if(debug && error)
+ {
+ log(LOG_DEBUG, SPP_FMT "sppp_set_ip_addr: rtinit DEL failed, error=%d\n",
+ SPP_ARGS(ifp), error);
+ }
+
+ /* set new address */
+ si->sin_addr.s_addr = htonl(src);
+
+ /* add new route */
+ error = rtinit(ifa, (int)RTM_ADD, RTF_HOST);
+ if (debug && error)
+ {
+ log(LOG_DEBUG, SPP_FMT "sppp_set_ip_addr: rtinit ADD failed, error=%d",
+ SPP_ARGS(ifp), error);
+ }
+#endif
+ }
+}
+
+static int
+sppp_params(struct sppp *sp, u_long cmd, void *data)
+{
+ u_long subcmd;
+ struct ifreq *ifr = (struct ifreq *)data;
+ struct spppreq spr;
+
+ /*
+ * ifr->ifr_data is supposed to point to a struct spppreq.
+ * Check the cmd word first before attempting to fetch all the
+ * data.
+ */
+ if ((subcmd = fuword(ifr->ifr_data)) == -1)
+ return EFAULT;
+
+ if (copyin((caddr_t)ifr->ifr_data, &spr, sizeof spr) != 0)
+ return EFAULT;
+
+ switch (subcmd) {
+ case SPPPIOGDEFS:
+ if (cmd != SIOCGIFGENERIC)
+ return EINVAL;
+ /*
+ * We copy over the entire current state, but clean
+ * out some of the stuff we don't wanna pass up.
+ * Remember, SIOCGIFGENERIC is unprotected, and can be
+ * called by any user. No need to ever get PAP or
+ * CHAP secrets back to userland anyway.
+ */
+ bcopy(sp, &spr.defs, sizeof(struct sppp));
+ bzero(spr.defs.myauth.secret, AUTHKEYLEN);
+ bzero(spr.defs.myauth.challenge, AUTHKEYLEN);
+ bzero(spr.defs.hisauth.secret, AUTHKEYLEN);
+ bzero(spr.defs.hisauth.challenge, AUTHKEYLEN);
+ return copyout(&spr, (caddr_t)ifr->ifr_data, sizeof spr);
+
+ case SPPPIOSDEFS:
+ if (cmd != SIOCSIFGENERIC)
+ return EINVAL;
+ /*
+ * We have a very specific idea of which fields we allow
+ * being passed back from userland, so to not clobber our
+ * current state. For one, we only allow setting
+ * anything if LCP is in dead phase. Once the LCP
+ * negotiations started, the authentication settings must
+ * not be changed again. (The administrator can force an
+ * ifconfig down in order to get LCP back into dead
+ * phase.)
+ *
+ * Also, we only allow for authentication parameters to be
+ * specified.
+ *
+ * XXX Should allow to set or clear pp_flags.
+ *
+ * Finally, if the respective authentication protocol to
+ * be used is set differently than 0, but the secret is
+ * passed as all zeros, we don't trash the existing secret.
+ * This allows an administrator to change the system name
+ * only without clobbering the secret (which he didn't get
+ * back in a previous SPPPIOGDEFS call). However, the
+ * secrets are cleared if the authentication protocol is
+ * reset to 0.
+ */
+ if (sp->pp_phase != PHASE_DEAD)
+ return EBUSY;
+
+ if ((spr.defs.myauth.proto != 0 && spr.defs.myauth.proto != PPP_PAP &&
+ spr.defs.myauth.proto != PPP_CHAP) ||
+ (spr.defs.hisauth.proto != 0 && spr.defs.hisauth.proto != PPP_PAP &&
+ spr.defs.hisauth.proto != PPP_CHAP))
+ return EINVAL;
+
+ if (spr.defs.myauth.proto == 0)
+ /* resetting myauth */
+ bzero(&sp->myauth, sizeof sp->myauth);
+ else {
+ /* setting/changing myauth */
+ sp->myauth.proto = spr.defs.myauth.proto;
+ bcopy(spr.defs.myauth.name, sp->myauth.name, AUTHNAMELEN);
+ if (spr.defs.myauth.secret[0] != '\0')
+ bcopy(spr.defs.myauth.secret, sp->myauth.secret,
+ AUTHKEYLEN);
+ }
+ if (spr.defs.hisauth.proto == 0)
+ /* resetting hisauth */
+ bzero(&sp->hisauth, sizeof sp->hisauth);
+ else {
+ /* setting/changing hisauth */
+ sp->hisauth.proto = spr.defs.hisauth.proto;
+ sp->hisauth.flags = spr.defs.hisauth.flags;
+ bcopy(spr.defs.hisauth.name, sp->hisauth.name, AUTHNAMELEN);
+ if (spr.defs.hisauth.secret[0] != '\0')
+ bcopy(spr.defs.hisauth.secret, sp->hisauth.secret,
+ AUTHKEYLEN);
+ }
+ break;
+
+ default:
+ return EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+sppp_phase_network(struct sppp *sp)
+{
+ STDDCL;
+ int i;
+ u_long mask;
+
+ sp->pp_phase = PHASE_NETWORK;
+
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "phase %s\n", SPP_ARGS(ifp),
+ sppp_phase_name(sp->pp_phase));
+
+ /* Notify NCPs now. */
+ for (i = 0; i < IDX_COUNT; i++)
+ if ((cps[i])->flags & CP_NCP)
+ (cps[i])->Open(sp);
+
+ /* Send Up events to all NCPs. */
+ for (i = 0, mask = 1; i < IDX_COUNT; i++, mask <<= 1)
+ if (sp->lcp.protos & mask && ((cps[i])->flags & CP_NCP))
+ (cps[i])->Up(sp);
+
+ /* if no NCP is starting, all this was in vain, close down */
+ sppp_lcp_check_and_close(sp);
+}
+
+
+static const char *
+sppp_cp_type_name(u_char type)
+{
+ static char buf[12];
+ switch (type) {
+ case CONF_REQ: return "conf-req";
+ case CONF_ACK: return "conf-ack";
+ case CONF_NAK: return "conf-nak";
+ case CONF_REJ: return "conf-rej";
+ case TERM_REQ: return "term-req";
+ case TERM_ACK: return "term-ack";
+ case CODE_REJ: return "code-rej";
+ case PROTO_REJ: return "proto-rej";
+ case ECHO_REQ: return "echo-req";
+ case ECHO_REPLY: return "echo-reply";
+ case DISC_REQ: return "discard-req";
+ }
+ snprintf (buf, sizeof(buf), "cp/0x%x", type);
+ return buf;
+}
+
+static const char *
+sppp_auth_type_name(u_short proto, u_char type)
+{
+ static char buf[12];
+ switch (proto) {
+ case PPP_CHAP:
+ switch (type) {
+ case CHAP_CHALLENGE: return "challenge";
+ case CHAP_RESPONSE: return "response";
+ case CHAP_SUCCESS: return "success";
+ case CHAP_FAILURE: return "failure";
+ }
+ case PPP_PAP:
+ switch (type) {
+ case PAP_REQ: return "req";
+ case PAP_ACK: return "ack";
+ case PAP_NAK: return "nak";
+ }
+ }
+ snprintf (buf, sizeof(buf), "auth/0x%x", type);
+ return buf;
+}
+
+static const char *
+sppp_lcp_opt_name(u_char opt)
+{
+ static char buf[12];
+ switch (opt) {
+ case LCP_OPT_MRU: return "mru";
+ case LCP_OPT_ASYNC_MAP: return "async-map";
+ case LCP_OPT_AUTH_PROTO: return "auth-proto";
+ case LCP_OPT_QUAL_PROTO: return "qual-proto";
+ case LCP_OPT_MAGIC: return "magic";
+ case LCP_OPT_PROTO_COMP: return "proto-comp";
+ case LCP_OPT_ADDR_COMP: return "addr-comp";
+ }
+ snprintf (buf, sizeof(buf), "lcp/0x%x", opt);
+ return buf;
+}
+
+static const char *
+sppp_ipcp_opt_name(u_char opt)
+{
+ static char buf[12];
+ switch (opt) {
+ case IPCP_OPT_ADDRESSES: return "addresses";
+ case IPCP_OPT_COMPRESSION: return "compression";
+ case IPCP_OPT_ADDRESS: return "address";
+ }
+ snprintf (buf, sizeof(buf), "ipcp/0x%x", opt);
+ return buf;
+}
+
+static const char *
+sppp_state_name(int state)
+{
+ switch (state) {
+ case STATE_INITIAL: return "initial";
+ case STATE_STARTING: return "starting";
+ case STATE_CLOSED: return "closed";
+ case STATE_STOPPED: return "stopped";
+ case STATE_CLOSING: return "closing";
+ case STATE_STOPPING: return "stopping";
+ case STATE_REQ_SENT: return "req-sent";
+ case STATE_ACK_RCVD: return "ack-rcvd";
+ case STATE_ACK_SENT: return "ack-sent";
+ case STATE_OPENED: return "opened";
+ }
+ return "illegal";
+}
+
+static const char *
+sppp_phase_name(enum ppp_phase phase)
+{
+ switch (phase) {
+ case PHASE_DEAD: return "dead";
+ case PHASE_ESTABLISH: return "establish";
+ case PHASE_TERMINATE: return "terminate";
+ case PHASE_AUTHENTICATE: return "authenticate";
+ case PHASE_NETWORK: return "network";
+ }
+ return "illegal";
+}
+
+static const char *
+sppp_proto_name(u_short proto)
+{
+ static char buf[12];
+ switch (proto) {
+ case PPP_LCP: return "lcp";
+ case PPP_IPCP: return "ipcp";
+ case PPP_PAP: return "pap";
+ case PPP_CHAP: return "chap";
+ }
+ snprintf(buf, sizeof(buf), "proto/0x%x", (unsigned)proto);
+ return buf;
+}
+
+static void
+sppp_print_bytes(const u_char *p, u_short len)
+{
+ if (len)
+ addlog(" %*D", len, p, "-");
+}
+
+static void
+sppp_print_string(const char *p, u_short len)
+{
+ u_char c;
+
+ while (len-- > 0) {
+ c = *p++;
+ /*
+ * Print only ASCII chars directly. RFC 1994 recommends
+ * using only them, but we don't rely on it. */
+ if (c < ' ' || c > '~')
+ addlog("\\x%x", c);
+ else
+ addlog("%c", c);
+ }
+}
+
+static const char *
+sppp_dotted_quad(u_long addr)
+{
+ static char s[16];
+ sprintf(s, "%d.%d.%d.%d",
+ (int)((addr >> 24) & 0xff),
+ (int)((addr >> 16) & 0xff),
+ (int)((addr >> 8) & 0xff),
+ (int)(addr & 0xff));
+ return s;
+}
+
+static int
+sppp_strnlen(u_char *p, int max)
+{
+ int len;
+
+ for (len = 0; len < max && *p; ++p)
+ ++len;
+ return len;
+}
+
+/* a dummy, used to drop uninteresting events */
+static void
+sppp_null(struct sppp *unused)
+{
+ /* do just nothing */
+}
diff --git a/sys/net/if_tun.c b/sys/net/if_tun.c
new file mode 100644
index 0000000..ff60749
--- /dev/null
+++ b/sys/net/if_tun.c
@@ -0,0 +1,690 @@
+/* $NetBSD: if_tun.c,v 1.14 1994/06/29 06:36:25 cgd Exp $ */
+
+/*
+ * Copyright (c) 1988, Julian Onions <jpo@cs.nott.ac.uk>
+ * Nottingham University 1987.
+ *
+ * This source may be freely distributed, however I would be interested
+ * in any changes that are made.
+ *
+ * This driver takes packets off the IP i/f and hands them up to a
+ * user process to have its wicked way with. This driver has it's
+ * roots in a similar driver written by Phil Cockcroft (formerly) at
+ * UCL. This driver is based much more on read/write/poll mode of
+ * operation though.
+ *
+ * $FreeBSD$
+ */
+
+#include "opt_inet.h"
+
+#include <sys/param.h>
+#include <sys/proc.h>
+#include <sys/systm.h>
+#include <sys/mbuf.h>
+#include <sys/socket.h>
+#include <sys/filio.h>
+#include <sys/sockio.h>
+#include <sys/ttycom.h>
+#include <sys/poll.h>
+#include <sys/signalvar.h>
+#include <sys/filedesc.h>
+#include <sys/kernel.h>
+#include <sys/sysctl.h>
+#include <sys/conf.h>
+#include <sys/uio.h>
+#include <sys/vnode.h>
+#include <sys/malloc.h>
+
+#include <net/if.h>
+#include <net/netisr.h>
+#include <net/route.h>
+
+#ifdef INET
+#include <netinet/in.h>
+#include <netinet/in_var.h>
+#endif
+
+#ifdef NS
+#include <netns/ns.h>
+#include <netns/ns_if.h>
+#endif
+
+#include <net/bpf.h>
+
+#include <net/if_tunvar.h>
+#include <net/if_tun.h>
+
+static MALLOC_DEFINE(M_TUN, "tun", "Tunnel Interface");
+
+static void tunattach __P((void *));
+PSEUDO_SET(tunattach, if_tun);
+
+static void tuncreate __P((dev_t dev));
+
+#define TUNDEBUG if (tundebug) printf
+static int tundebug = 0;
+SYSCTL_INT(_debug, OID_AUTO, if_tun_debug, CTLFLAG_RW, &tundebug, 0, "");
+
+static int tunoutput __P((struct ifnet *, struct mbuf *, struct sockaddr *,
+ struct rtentry *rt));
+static int tunifioctl __P((struct ifnet *, u_long, caddr_t));
+static int tuninit __P((struct ifnet *));
+
+static d_open_t tunopen;
+static d_close_t tunclose;
+static d_read_t tunread;
+static d_write_t tunwrite;
+static d_ioctl_t tunioctl;
+static d_poll_t tunpoll;
+
+#define CDEV_MAJOR 52
+static struct cdevsw tun_cdevsw = {
+ /* open */ tunopen,
+ /* close */ tunclose,
+ /* read */ tunread,
+ /* write */ tunwrite,
+ /* ioctl */ tunioctl,
+ /* poll */ tunpoll,
+ /* mmap */ nommap,
+ /* strategy */ nostrategy,
+ /* name */ "tun",
+ /* maj */ CDEV_MAJOR,
+ /* dump */ nodump,
+ /* psize */ nopsize,
+ /* flags */ 0,
+ /* bmaj */ -1
+};
+
+static void
+tunattach(dummy)
+ void *dummy;
+{
+
+ cdevsw_add(&tun_cdevsw);
+}
+
+static void
+tuncreate(dev)
+ dev_t dev;
+{
+ struct tun_softc *sc;
+ struct ifnet *ifp;
+
+ dev = make_dev(&tun_cdevsw, minor(dev),
+ UID_UUCP, GID_DIALER, 0600, "tun%d", lminor(dev));
+
+ MALLOC(sc, struct tun_softc *, sizeof(*sc), M_TUN, M_WAITOK);
+ bzero(sc, sizeof *sc);
+ sc->tun_flags = TUN_INITED;
+
+ ifp = &sc->tun_if;
+ ifp->if_unit = lminor(dev);
+ ifp->if_name = "tun";
+ ifp->if_mtu = TUNMTU;
+ ifp->if_ioctl = tunifioctl;
+ ifp->if_output = tunoutput;
+ ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST;
+ ifp->if_snd.ifq_maxlen = ifqmaxlen;
+ ifp->if_softc = sc;
+ if_attach(ifp);
+ bpfattach(ifp, DLT_NULL, sizeof(u_int));
+ dev->si_drv1 = sc;
+}
+
+/*
+ * tunnel open - must be superuser & the device must be
+ * configured in
+ */
+static int
+tunopen(dev, flag, mode, p)
+ dev_t dev;
+ int flag, mode;
+ struct proc *p;
+{
+ struct ifnet *ifp;
+ struct tun_softc *tp;
+ register int error;
+
+ error = suser(p);
+ if (error)
+ return (error);
+
+ tp = dev->si_drv1;
+ if (!tp) {
+ tuncreate(dev);
+ tp = dev->si_drv1;
+ }
+ if (tp->tun_flags & TUN_OPEN)
+ return EBUSY;
+ tp->tun_pid = p->p_pid;
+ ifp = &tp->tun_if;
+ tp->tun_flags |= TUN_OPEN;
+ TUNDEBUG("%s%d: open\n", ifp->if_name, ifp->if_unit);
+ return (0);
+}
+
+/*
+ * tunclose - close the device - mark i/f down & delete
+ * routing info
+ */
+static int
+tunclose(dev, foo, bar, p)
+ dev_t dev;
+ int foo;
+ int bar;
+ struct proc *p;
+{
+ register int s;
+ struct tun_softc *tp;
+ struct ifnet *ifp;
+ struct mbuf *m;
+
+ tp = dev->si_drv1;
+ ifp = &tp->tun_if;
+
+ tp->tun_flags &= ~TUN_OPEN;
+ tp->tun_pid = 0;
+
+ /*
+ * junk all pending output
+ */
+ do {
+ s = splimp();
+ IF_DEQUEUE(&ifp->if_snd, m);
+ splx(s);
+ if (m)
+ m_freem(m);
+ } while (m);
+
+ if (ifp->if_flags & IFF_UP) {
+ s = splimp();
+ if_down(ifp);
+ splx(s);
+ }
+
+ if (ifp->if_flags & IFF_RUNNING) {
+ register struct ifaddr *ifa;
+
+ s = splimp();
+ /* find internet addresses and delete routes */
+ for (ifa = ifp->if_addrhead.tqh_first; ifa;
+ ifa = ifa->ifa_link.tqe_next)
+ if (ifa->ifa_addr->sa_family == AF_INET)
+ rtinit(ifa, (int)RTM_DELETE,
+ tp->tun_flags & TUN_DSTADDR ? RTF_HOST : 0);
+ ifp->if_flags &= ~IFF_RUNNING;
+ splx(s);
+ }
+
+ funsetown(tp->tun_sigio);
+ selwakeup(&tp->tun_rsel);
+
+ TUNDEBUG ("%s%d: closed\n", ifp->if_name, ifp->if_unit);
+ return (0);
+}
+
+static int
+tuninit(ifp)
+ struct ifnet *ifp;
+{
+ struct tun_softc *tp = ifp->if_softc;
+ register struct ifaddr *ifa;
+
+ TUNDEBUG("%s%d: tuninit\n", ifp->if_name, ifp->if_unit);
+
+ ifp->if_flags |= IFF_UP | IFF_RUNNING;
+ getmicrotime(&ifp->if_lastchange);
+
+ for (ifa = ifp->if_addrhead.tqh_first; ifa;
+ ifa = ifa->ifa_link.tqe_next) {
+#ifdef INET
+ if (ifa->ifa_addr->sa_family == AF_INET) {
+ struct sockaddr_in *si;
+
+ si = (struct sockaddr_in *)ifa->ifa_addr;
+ if (si && si->sin_addr.s_addr)
+ tp->tun_flags |= TUN_IASET;
+
+ si = (struct sockaddr_in *)ifa->ifa_dstaddr;
+ if (si && si->sin_addr.s_addr)
+ tp->tun_flags |= TUN_DSTADDR;
+ }
+#endif
+ }
+ return 0;
+}
+
+/*
+ * Process an ioctl request.
+ */
+int
+tunifioctl(ifp, cmd, data)
+ struct ifnet *ifp;
+ u_long cmd;
+ caddr_t data;
+{
+ struct ifreq *ifr = (struct ifreq *)data;
+ struct tun_softc *tp = ifp->if_softc;
+ struct ifstat *ifs;
+ int error = 0, s;
+
+ s = splimp();
+ switch(cmd) {
+ case SIOCGIFSTATUS:
+ ifs = (struct ifstat *)data;
+ if (tp->tun_pid)
+ sprintf(ifs->ascii + strlen(ifs->ascii),
+ "\tOpened by PID %d\n", tp->tun_pid);
+ return(0);
+ case SIOCSIFADDR:
+ tuninit(ifp);
+ TUNDEBUG("%s%d: address set\n",
+ ifp->if_name, ifp->if_unit);
+ break;
+ case SIOCSIFDSTADDR:
+ tuninit(ifp);
+ TUNDEBUG("%s%d: destination address set\n",
+ ifp->if_name, ifp->if_unit);
+ break;
+ case SIOCSIFMTU:
+ ifp->if_mtu = ifr->ifr_mtu;
+ TUNDEBUG("%s%d: mtu set\n",
+ ifp->if_name, ifp->if_unit);
+ break;
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ break;
+
+
+ default:
+ error = EINVAL;
+ }
+ splx(s);
+ return (error);
+}
+
+/*
+ * tunoutput - queue packets from higher level ready to put out.
+ */
+int
+tunoutput(ifp, m0, dst, rt)
+ struct ifnet *ifp;
+ struct mbuf *m0;
+ struct sockaddr *dst;
+ struct rtentry *rt;
+{
+ struct tun_softc *tp = ifp->if_softc;
+ int s;
+
+ TUNDEBUG ("%s%d: tunoutput\n", ifp->if_name, ifp->if_unit);
+
+ if ((tp->tun_flags & TUN_READY) != TUN_READY) {
+ TUNDEBUG ("%s%d: not ready 0%o\n", ifp->if_name,
+ ifp->if_unit, tp->tun_flags);
+ m_freem (m0);
+ return EHOSTDOWN;
+ }
+
+ /* BPF write needs to be handled specially */
+ if (dst->sa_family == AF_UNSPEC) {
+ dst->sa_family = *(mtod(m0, int *));
+ m0->m_len -= sizeof(int);
+ m0->m_pkthdr.len -= sizeof(int);
+ m0->m_data += sizeof(int);
+ }
+
+ if (ifp->if_bpf) {
+ /*
+ * We need to prepend the address family as
+ * a four byte field. Cons up a dummy header
+ * to pacify bpf. This is safe because bpf
+ * will only read from the mbuf (i.e., it won't
+ * try to free it or keep a pointer to it).
+ */
+ struct mbuf m;
+ u_int af = dst->sa_family;
+
+ m.m_next = m0;
+ m.m_len = 4;
+ m.m_data = (char *)&af;
+
+ bpf_mtap(ifp, &m);
+ }
+
+ /* prepend sockaddr? this may abort if the mbuf allocation fails */
+ if (tp->tun_flags & TUN_LMODE) {
+ /* allocate space for sockaddr */
+ M_PREPEND(m0, dst->sa_len, M_DONTWAIT);
+
+ /* if allocation failed drop packet */
+ if (m0 == NULL){
+ s = splimp(); /* spl on queue manipulation */
+ IF_DROP(&ifp->if_snd);
+ splx(s);
+ ifp->if_oerrors++;
+ return (ENOBUFS);
+ } else {
+ bcopy(dst, m0->m_data, dst->sa_len);
+ }
+ }
+
+ switch(dst->sa_family) {
+#ifdef INET
+ case AF_INET:
+ s = splimp();
+ if (IF_QFULL(&ifp->if_snd)) {
+ IF_DROP(&ifp->if_snd);
+ m_freem(m0);
+ splx(s);
+ ifp->if_collisions++;
+ return (ENOBUFS);
+ }
+ ifp->if_obytes += m0->m_pkthdr.len;
+ IF_ENQUEUE(&ifp->if_snd, m0);
+ splx(s);
+ ifp->if_opackets++;
+ break;
+#endif
+ default:
+ m_freem(m0);
+ return EAFNOSUPPORT;
+ }
+
+ if (tp->tun_flags & TUN_RWAIT) {
+ tp->tun_flags &= ~TUN_RWAIT;
+ wakeup((caddr_t)tp);
+ }
+ if (tp->tun_flags & TUN_ASYNC && tp->tun_sigio)
+ pgsigio(tp->tun_sigio, SIGIO, 0);
+ selwakeup(&tp->tun_rsel);
+ return 0;
+}
+
+/*
+ * the cdevsw interface is now pretty minimal.
+ */
+static int
+tunioctl(dev, cmd, data, flag, p)
+ dev_t dev;
+ u_long cmd;
+ caddr_t data;
+ int flag;
+ struct proc *p;
+{
+ int s;
+ struct tun_softc *tp = dev->si_drv1;
+ struct tuninfo *tunp;
+
+ switch (cmd) {
+ case TUNSIFINFO:
+ tunp = (struct tuninfo *)data;
+ if (tunp->mtu < IF_MINMTU)
+ return (EINVAL);
+ tp->tun_if.if_mtu = tunp->mtu;
+ tp->tun_if.if_type = tunp->type;
+ tp->tun_if.if_baudrate = tunp->baudrate;
+ break;
+ case TUNGIFINFO:
+ tunp = (struct tuninfo *)data;
+ tunp->mtu = tp->tun_if.if_mtu;
+ tunp->type = tp->tun_if.if_type;
+ tunp->baudrate = tp->tun_if.if_baudrate;
+ break;
+ case TUNSDEBUG:
+ tundebug = *(int *)data;
+ break;
+ case TUNGDEBUG:
+ *(int *)data = tundebug;
+ break;
+ case TUNSLMODE:
+ if (*(int *)data)
+ tp->tun_flags |= TUN_LMODE;
+ else
+ tp->tun_flags &= ~TUN_LMODE;
+ break;
+ case TUNSIFMODE:
+ /* deny this if UP */
+ if (tp->tun_if.if_flags & IFF_UP)
+ return(EBUSY);
+
+ switch (*(int *)data) {
+ case IFF_POINTOPOINT:
+ tp->tun_if.if_flags |= IFF_POINTOPOINT;
+ tp->tun_if.if_flags &= ~IFF_BROADCAST;
+ break;
+ case IFF_BROADCAST:
+ tp->tun_if.if_flags &= ~IFF_POINTOPOINT;
+ tp->tun_if.if_flags |= IFF_BROADCAST;
+ break;
+ default:
+ return(EINVAL);
+ }
+ break;
+ case FIONBIO:
+ break;
+ case FIOASYNC:
+ if (*(int *)data)
+ tp->tun_flags |= TUN_ASYNC;
+ else
+ tp->tun_flags &= ~TUN_ASYNC;
+ break;
+ case FIONREAD:
+ s = splimp();
+ if (tp->tun_if.if_snd.ifq_head) {
+ struct mbuf *mb = tp->tun_if.if_snd.ifq_head;
+ for( *(int *)data = 0; mb != 0; mb = mb->m_next)
+ *(int *)data += mb->m_len;
+ } else
+ *(int *)data = 0;
+ splx(s);
+ break;
+ case FIOSETOWN:
+ return (fsetown(*(int *)data, &tp->tun_sigio));
+
+ case FIOGETOWN:
+ *(int *)data = fgetown(tp->tun_sigio);
+ return (0);
+
+ /* This is deprecated, FIOSETOWN should be used instead. */
+ case TIOCSPGRP:
+ return (fsetown(-(*(int *)data), &tp->tun_sigio));
+
+ /* This is deprecated, FIOGETOWN should be used instead. */
+ case TIOCGPGRP:
+ *(int *)data = -fgetown(tp->tun_sigio);
+ return (0);
+
+ default:
+ return (ENOTTY);
+ }
+ return (0);
+}
+
+/*
+ * The cdevsw read interface - reads a packet at a time, or at
+ * least as much of a packet as can be read.
+ */
+static int
+tunread(dev, uio, flag)
+ dev_t dev;
+ struct uio *uio;
+ int flag;
+{
+ struct tun_softc *tp = dev->si_drv1;
+ struct ifnet *ifp = &tp->tun_if;
+ struct mbuf *m, *m0;
+ int error=0, len, s;
+
+ TUNDEBUG ("%s%d: read\n", ifp->if_name, ifp->if_unit);
+ if ((tp->tun_flags & TUN_READY) != TUN_READY) {
+ TUNDEBUG ("%s%d: not ready 0%o\n", ifp->if_name,
+ ifp->if_unit, tp->tun_flags);
+ return EHOSTDOWN;
+ }
+
+ tp->tun_flags &= ~TUN_RWAIT;
+
+ s = splimp();
+ do {
+ IF_DEQUEUE(&ifp->if_snd, m0);
+ if (m0 == 0) {
+ if (flag & IO_NDELAY) {
+ splx(s);
+ return EWOULDBLOCK;
+ }
+ tp->tun_flags |= TUN_RWAIT;
+ if((error = tsleep((caddr_t)tp, PCATCH | (PZERO + 1),
+ "tunread", 0)) != 0) {
+ splx(s);
+ return error;
+ }
+ }
+ } while (m0 == 0);
+ splx(s);
+
+ while (m0 && uio->uio_resid > 0 && error == 0) {
+ len = min(uio->uio_resid, m0->m_len);
+ if (len == 0)
+ break;
+ error = uiomove(mtod(m0, caddr_t), len, uio);
+ MFREE(m0, m);
+ m0 = m;
+ }
+
+ if (m0) {
+ TUNDEBUG("Dropping mbuf\n");
+ m_freem(m0);
+ }
+ return error;
+}
+
+/*
+ * the cdevsw write interface - an atomic write is a packet - or else!
+ */
+static int
+tunwrite(dev, uio, flag)
+ dev_t dev;
+ struct uio *uio;
+ int flag;
+{
+ struct tun_softc *tp = dev->si_drv1;
+ struct ifnet *ifp = &tp->tun_if;
+ struct mbuf *top, **mp, *m;
+ int error=0, s, tlen, mlen;
+
+ TUNDEBUG("%s%d: tunwrite\n", ifp->if_name, ifp->if_unit);
+
+ if (uio->uio_resid == 0)
+ return 0;
+
+ if (uio->uio_resid < 0 || uio->uio_resid > TUNMRU) {
+ TUNDEBUG("%s%d: len=%d!\n", ifp->if_name, ifp->if_unit,
+ uio->uio_resid);
+ return EIO;
+ }
+ tlen = uio->uio_resid;
+
+ /* get a header mbuf */
+ MGETHDR(m, M_DONTWAIT, MT_DATA);
+ if (m == NULL)
+ return ENOBUFS;
+ mlen = MHLEN;
+
+ top = 0;
+ mp = &top;
+ while (error == 0 && uio->uio_resid > 0) {
+ m->m_len = min(mlen, uio->uio_resid);
+ error = uiomove(mtod (m, caddr_t), m->m_len, uio);
+ *mp = m;
+ mp = &m->m_next;
+ if (uio->uio_resid > 0) {
+ MGET (m, M_DONTWAIT, MT_DATA);
+ if (m == 0) {
+ error = ENOBUFS;
+ break;
+ }
+ mlen = MLEN;
+ }
+ }
+ if (error) {
+ if (top)
+ m_freem (top);
+ return error;
+ }
+
+ top->m_pkthdr.len = tlen;
+ top->m_pkthdr.rcvif = ifp;
+
+ if (ifp->if_bpf) {
+ /*
+ * We need to prepend the address family as
+ * a four byte field. Cons up a dummy header
+ * to pacify bpf. This is safe because bpf
+ * will only read from the mbuf (i.e., it won't
+ * try to free it or keep a pointer to it).
+ */
+ struct mbuf m;
+ u_int af = AF_INET;
+
+ m.m_next = top;
+ m.m_len = 4;
+ m.m_data = (char *)&af;
+
+ bpf_mtap(ifp, &m);
+ }
+
+#ifdef INET
+ s = splimp();
+ if (IF_QFULL (&ipintrq)) {
+ IF_DROP(&ipintrq);
+ splx(s);
+ ifp->if_collisions++;
+ m_freem(top);
+ return ENOBUFS;
+ }
+ IF_ENQUEUE(&ipintrq, top);
+ splx(s);
+ ifp->if_ibytes += tlen;
+ ifp->if_ipackets++;
+ schednetisr(NETISR_IP);
+#endif
+ return error;
+}
+
+/*
+ * tunpoll - the poll interface, this is only useful on reads
+ * really. The write detect always returns true, write never blocks
+ * anyway, it either accepts the packet or drops it.
+ */
+static int
+tunpoll(dev, events, p)
+ dev_t dev;
+ int events;
+ struct proc *p;
+{
+ int s;
+ struct tun_softc *tp = dev->si_drv1;
+ struct ifnet *ifp = &tp->tun_if;
+ int revents = 0;
+
+ s = splimp();
+ TUNDEBUG("%s%d: tunpoll\n", ifp->if_name, ifp->if_unit);
+
+ if (events & (POLLIN | POLLRDNORM)) {
+ if (ifp->if_snd.ifq_len > 0) {
+ TUNDEBUG("%s%d: tunpoll q=%d\n", ifp->if_name,
+ ifp->if_unit, ifp->if_snd.ifq_len);
+ revents |= events & (POLLIN | POLLRDNORM);
+ } else {
+ TUNDEBUG("%s%d: tunpoll waiting\n", ifp->if_name,
+ ifp->if_unit);
+ selrecord(p, &tp->tun_rsel);
+ }
+ }
+ if (events & (POLLOUT | POLLWRNORM))
+ revents |= events & (POLLOUT | POLLWRNORM);
+
+ splx(s);
+ return (revents);
+}
diff --git a/sys/net/if_tun.h b/sys/net/if_tun.h
new file mode 100644
index 0000000..999372b
--- /dev/null
+++ b/sys/net/if_tun.h
@@ -0,0 +1,45 @@
+/* $NetBSD: if_tun.h,v 1.5 1994/06/29 06:36:27 cgd Exp $ */
+
+/*
+ * Copyright (c) 1988, Julian Onions <jpo@cs.nott.ac.uk>
+ * Nottingham University 1987.
+ *
+ * This source may be freely distributed, however I would be interested
+ * in any changes that are made.
+ *
+ * This driver takes packets off the IP i/f and hands them up to a
+ * user process to have its wicked way with. This driver has it's
+ * roots in a similar driver written by Phil Cockcroft (formerly) at
+ * UCL. This driver is based much more on read/write/select mode of
+ * operation though.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NET_IF_TUN_H_
+#define _NET_IF_TUN_H_
+
+/* Refer to if_tunvar.h for the softc stuff */
+
+/* Maximum transmit packet size (default) */
+#define TUNMTU 1500
+
+/* Maximum receive packet size (hard limit) */
+#define TUNMRU 16384
+
+struct tuninfo {
+ int baudrate; /* linespeed */
+ short mtu; /* maximum transmission unit */
+ u_char type; /* ethernet, tokenring, etc. */
+ u_char dummy; /* place holder */
+};
+
+/* ioctl's for get/set debug */
+#define TUNSDEBUG _IOW('t', 90, int)
+#define TUNGDEBUG _IOR('t', 89, int)
+#define TUNSIFINFO _IOW('t', 91, struct tuninfo)
+#define TUNGIFINFO _IOR('t', 92, struct tuninfo)
+#define TUNSLMODE _IOW('t', 93, int)
+#define TUNSIFMODE _IOW('t', 94, int)
+
+#endif /* !_NET_IF_TUN_H_ */
diff --git a/sys/net/if_tunvar.h b/sys/net/if_tunvar.h
new file mode 100644
index 0000000..2e9703f
--- /dev/null
+++ b/sys/net/if_tunvar.h
@@ -0,0 +1,52 @@
+/*-
+ * Copyright (c) 1998 Brian Somers <brian@Awfulhak.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NET_IF_TUNVAR_H_
+#define _NET_IF_TUNVAR_H_
+
+struct tun_softc {
+ u_short tun_flags; /* misc flags */
+#define TUN_OPEN 0x0001
+#define TUN_INITED 0x0002
+#define TUN_RCOLL 0x0004
+#define TUN_IASET 0x0008
+#define TUN_DSTADDR 0x0010
+#define TUN_LMODE 0x0020
+#define TUN_RWAIT 0x0040
+#define TUN_ASYNC 0x0080
+
+#define TUN_READY (TUN_OPEN | TUN_INITED)
+
+ pid_t tun_pid; /* PID of process to open */
+ struct ifnet tun_if; /* the interface */
+ struct sigio *tun_sigio; /* information for async I/O */
+ struct selinfo tun_rsel; /* read select */
+ struct selinfo tun_wsel; /* write select (not used) */
+};
+
+#endif /* !_NET_IF_TUNVAR_H_ */
diff --git a/sys/net/if_types.h b/sys/net/if_types.h
new file mode 100644
index 0000000..318b356
--- /dev/null
+++ b/sys/net/if_types.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 1989, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)if_types.h 8.2 (Berkeley) 4/20/94
+ * $FreeBSD$
+ */
+
+#ifndef _NET_IF_TYPES_H_
+#define _NET_IF_TYPES_H_
+
+/*
+ * Interface types for benefit of parsing media address headers.
+ * This list is derived from the SNMP list of ifTypes, currently
+ * documented in RFC1573.
+ */
+
+#define IFT_OTHER 0x1 /* none of the following */
+#define IFT_1822 0x2 /* old-style arpanet imp */
+#define IFT_HDH1822 0x3 /* HDH arpanet imp */
+#define IFT_X25DDN 0x4 /* x25 to imp */
+#define IFT_X25 0x5 /* PDN X25 interface (RFC877) */
+#define IFT_ETHER 0x6 /* Ethernet CSMACD */
+#define IFT_ISO88023 0x7 /* CMSA CD */
+#define IFT_ISO88024 0x8 /* Token Bus */
+#define IFT_ISO88025 0x9 /* Token Ring */
+#define IFT_ISO88026 0xa /* MAN */
+#define IFT_STARLAN 0xb
+#define IFT_P10 0xc /* Proteon 10MBit ring */
+#define IFT_P80 0xd /* Proteon 80MBit ring */
+#define IFT_HY 0xe /* Hyperchannel */
+#define IFT_FDDI 0xf
+#define IFT_LAPB 0x10
+#define IFT_SDLC 0x11
+#define IFT_T1 0x12
+#define IFT_CEPT 0x13 /* E1 - european T1 */
+#define IFT_ISDNBASIC 0x14
+#define IFT_ISDNPRIMARY 0x15
+#define IFT_PTPSERIAL 0x16 /* Proprietary PTP serial */
+#define IFT_PPP 0x17 /* RFC 1331 */
+#define IFT_LOOP 0x18 /* loopback */
+#define IFT_EON 0x19 /* ISO over IP */
+#define IFT_XETHER 0x1a /* obsolete 3MB experimental ethernet */
+#define IFT_NSIP 0x1b /* XNS over IP */
+#define IFT_SLIP 0x1c /* IP over generic TTY */
+#define IFT_ULTRA 0x1d /* Ultra Technologies */
+#define IFT_DS3 0x1e /* Generic T3 */
+#define IFT_SIP 0x1f /* SMDS */
+#define IFT_FRELAY 0x20 /* Frame Relay DTE only */
+#define IFT_RS232 0x21
+#define IFT_PARA 0x22 /* parallel-port */
+#define IFT_ARCNET 0x23
+#define IFT_ARCNETPLUS 0x24
+#define IFT_ATM 0x25 /* ATM cells */
+#define IFT_MIOX25 0x26
+#define IFT_SONET 0x27 /* SONET or SDH */
+#define IFT_X25PLE 0x28
+#define IFT_ISO88022LLC 0x29
+#define IFT_LOCALTALK 0x2a
+#define IFT_SMDSDXI 0x2b
+#define IFT_FRELAYDCE 0x2c /* Frame Relay DCE */
+#define IFT_V35 0x2d
+#define IFT_HSSI 0x2e
+#define IFT_HIPPI 0x2f
+#define IFT_MODEM 0x30 /* Generic Modem */
+#define IFT_AAL5 0x31 /* AAL5 over ATM */
+#define IFT_SONETPATH 0x32
+#define IFT_SONETVT 0x33
+#define IFT_SMDSICIP 0x34 /* SMDS InterCarrier Interface */
+#define IFT_PROPVIRTUAL 0x35 /* Proprietary Virtual/internal */
+#define IFT_PROPMUX 0x36 /* Proprietary Multiplexing */
+#define IFT_GIF 0x37
+#define IFT_FAITH 0x38
+
+#endif
diff --git a/sys/net/if_var.h b/sys/net/if_var.h
new file mode 100644
index 0000000..8d497d4
--- /dev/null
+++ b/sys/net/if_var.h
@@ -0,0 +1,370 @@
+/*
+ * Copyright (c) 1982, 1986, 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * From: @(#)if.h 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#ifndef _NET_IF_VAR_H_
+#define _NET_IF_VAR_H_
+
+/*
+ * Structures defining a network interface, providing a packet
+ * transport mechanism (ala level 0 of the PUP protocols).
+ *
+ * Each interface accepts output datagrams of a specified maximum
+ * length, and provides higher level routines with input datagrams
+ * received from its medium.
+ *
+ * Output occurs when the routine if_output is called, with three parameters:
+ * (*ifp->if_output)(ifp, m, dst, rt)
+ * Here m is the mbuf chain to be sent and dst is the destination address.
+ * The output routine encapsulates the supplied datagram if necessary,
+ * and then transmits it on its medium.
+ *
+ * On input, each interface unwraps the data received by it, and either
+ * places it on the input queue of a internetwork datagram routine
+ * and posts the associated software interrupt, or passes the datagram to a raw
+ * packet input routine.
+ *
+ * Routines exist for locating interfaces by their addresses
+ * or for locating a interface on a certain network, as well as more general
+ * routing and gateway routines maintaining information used to locate
+ * interfaces. These routines live in the files if.c and route.c
+ */
+
+#ifdef __STDC__
+/*
+ * Forward structure declarations for function prototypes [sic].
+ */
+struct mbuf;
+struct proc;
+struct rtentry;
+struct socket;
+struct ether_header;
+#endif
+
+#include <sys/queue.h> /* get TAILQ macros */
+
+TAILQ_HEAD(ifnethead, ifnet); /* we use TAILQs so that the order of */
+TAILQ_HEAD(ifaddrhead, ifaddr); /* instantiation is preserved in the list */
+TAILQ_HEAD(ifprefixhead, ifprefix);
+LIST_HEAD(ifmultihead, ifmultiaddr);
+
+/*
+ * Structure defining a queue for a network interface.
+ */
+struct ifqueue {
+ struct mbuf *ifq_head;
+ struct mbuf *ifq_tail;
+ int ifq_len;
+ int ifq_maxlen;
+ int ifq_drops;
+};
+
+/*
+ * Structure defining a network interface.
+ *
+ * (Would like to call this struct ``if'', but C isn't PL/1.)
+ */
+struct ifnet {
+ void *if_softc; /* pointer to driver state */
+ char *if_name; /* name, e.g. ``en'' or ``lo'' */
+ TAILQ_ENTRY(ifnet) if_link; /* all struct ifnets are chained */
+ struct ifaddrhead if_addrhead; /* linked list of addresses per if */
+ int if_pcount; /* number of promiscuous listeners */
+ struct bpf_if *if_bpf; /* packet filter structure */
+ u_short if_index; /* numeric abbreviation for this if */
+ short if_unit; /* sub-unit for lower level driver */
+ short if_timer; /* time 'til if_watchdog called */
+ short if_flags; /* up/down, broadcast, etc. */
+ int if_ipending; /* interrupts pending */
+ void *if_linkmib; /* link-type-specific MIB data */
+ size_t if_linkmiblen; /* length of above data */
+ struct if_data if_data;
+ struct ifmultihead if_multiaddrs; /* multicast addresses configured */
+ int if_amcount; /* number of all-multicast requests */
+/* procedure handles */
+ int (*if_output) /* output routine (enqueue) */
+ __P((struct ifnet *, struct mbuf *, struct sockaddr *,
+ struct rtentry *));
+ void (*if_start) /* initiate output routine */
+ __P((struct ifnet *));
+ int (*if_done) /* output complete routine */
+ __P((struct ifnet *)); /* (XXX not used; fake prototype) */
+ int (*if_ioctl) /* ioctl routine */
+ __P((struct ifnet *, u_long, caddr_t));
+ void (*if_watchdog) /* timer routine */
+ __P((struct ifnet *));
+ int (*if_poll_recv) /* polled receive routine */
+ __P((struct ifnet *, int *));
+ int (*if_poll_xmit) /* polled transmit routine */
+ __P((struct ifnet *, int *));
+ void (*if_poll_intren) /* polled interrupt reenable routine */
+ __P((struct ifnet *));
+ void (*if_poll_slowinput) /* input routine for slow devices */
+ __P((struct ifnet *, struct mbuf *));
+ void (*if_init) /* Init routine */
+ __P((void *));
+ int (*if_resolvemulti) /* validate/resolve multicast */
+ __P((struct ifnet *, struct sockaddr **, struct sockaddr *));
+ struct ifqueue if_snd; /* output queue */
+ struct ifqueue *if_poll_slowq; /* input queue for slow devices */
+ struct ifprefixhead if_prefixhead; /* list of prefixes per if */
+};
+typedef void if_init_f_t __P((void *));
+
+#define if_mtu if_data.ifi_mtu
+#define if_type if_data.ifi_type
+#define if_physical if_data.ifi_physical
+#define if_addrlen if_data.ifi_addrlen
+#define if_hdrlen if_data.ifi_hdrlen
+#define if_metric if_data.ifi_metric
+#define if_baudrate if_data.ifi_baudrate
+#define if_ipackets if_data.ifi_ipackets
+#define if_ierrors if_data.ifi_ierrors
+#define if_opackets if_data.ifi_opackets
+#define if_oerrors if_data.ifi_oerrors
+#define if_collisions if_data.ifi_collisions
+#define if_ibytes if_data.ifi_ibytes
+#define if_obytes if_data.ifi_obytes
+#define if_imcasts if_data.ifi_imcasts
+#define if_omcasts if_data.ifi_omcasts
+#define if_iqdrops if_data.ifi_iqdrops
+#define if_noproto if_data.ifi_noproto
+#define if_lastchange if_data.ifi_lastchange
+#define if_recvquota if_data.ifi_recvquota
+#define if_xmitquota if_data.ifi_xmitquota
+#define if_rawoutput(if, m, sa) if_output(if, m, sa, (struct rtentry *)0)
+
+/* for compatibility with other BSDs */
+#define if_addrlist if_addrhead
+#define if_list if_link
+
+/*
+ * Bit values in if_ipending
+ */
+#define IFI_RECV 1 /* I want to receive */
+#define IFI_XMIT 2 /* I want to transmit */
+
+/*
+ * Output queues (ifp->if_snd) and slow device input queues (*ifp->if_slowq)
+ * are queues of messages stored on ifqueue structures
+ * (defined above). Entries are added to and deleted from these structures
+ * by these macros, which should be called with ipl raised to splimp().
+ */
+#define IF_QFULL(ifq) ((ifq)->ifq_len >= (ifq)->ifq_maxlen)
+#define IF_DROP(ifq) ((ifq)->ifq_drops++)
+#define IF_ENQUEUE(ifq, m) { \
+ (m)->m_nextpkt = 0; \
+ if ((ifq)->ifq_tail == 0) \
+ (ifq)->ifq_head = m; \
+ else \
+ (ifq)->ifq_tail->m_nextpkt = m; \
+ (ifq)->ifq_tail = m; \
+ (ifq)->ifq_len++; \
+}
+#define IF_PREPEND(ifq, m) { \
+ (m)->m_nextpkt = (ifq)->ifq_head; \
+ if ((ifq)->ifq_tail == 0) \
+ (ifq)->ifq_tail = (m); \
+ (ifq)->ifq_head = (m); \
+ (ifq)->ifq_len++; \
+}
+#define IF_DEQUEUE(ifq, m) { \
+ (m) = (ifq)->ifq_head; \
+ if (m) { \
+ if (((ifq)->ifq_head = (m)->m_nextpkt) == 0) \
+ (ifq)->ifq_tail = 0; \
+ (m)->m_nextpkt = 0; \
+ (ifq)->ifq_len--; \
+ } \
+}
+
+#ifdef KERNEL
+#define IF_ENQ_DROP(ifq, m) if_enq_drop(ifq, m)
+
+#if defined(__GNUC__) && defined(MT_HEADER)
+static __inline int
+if_queue_drop(struct ifqueue *ifq, struct mbuf *m)
+{
+ IF_DROP(ifq);
+ return 0;
+}
+
+static __inline int
+if_enq_drop(struct ifqueue *ifq, struct mbuf *m)
+{
+ if (IF_QFULL(ifq) &&
+ !if_queue_drop(ifq, m))
+ return 0;
+ IF_ENQUEUE(ifq, m);
+ return 1;
+}
+#else
+
+#ifdef MT_HEADER
+int if_enq_drop __P((struct ifqueue *, struct mbuf *));
+#endif
+
+#endif
+
+/*
+ * 72 was chosen below because it is the size of a TCP/IP
+ * header (40) + the minimum mss (32).
+ */
+#define IF_MINMTU 72
+#define IF_MAXMTU 65535
+
+#endif /* KERNEL */
+
+/*
+ * The ifaddr structure contains information about one address
+ * of an interface. They are maintained by the different address families,
+ * are allocated and attached when an address is set, and are linked
+ * together so all addresses for an interface can be located.
+ */
+struct ifaddr {
+ struct sockaddr *ifa_addr; /* address of interface */
+ struct sockaddr *ifa_dstaddr; /* other end of p-to-p link */
+#define ifa_broadaddr ifa_dstaddr /* broadcast address interface */
+ struct sockaddr *ifa_netmask; /* used to determine subnet */
+ struct ifnet *ifa_ifp; /* back-pointer to interface */
+ TAILQ_ENTRY(ifaddr) ifa_link; /* queue macro glue */
+ void (*ifa_rtrequest) /* check or clean routes (+ or -)'d */
+ __P((int, struct rtentry *, struct sockaddr *));
+ u_short ifa_flags; /* mostly rt_flags for cloning */
+ u_int ifa_refcnt; /* references to this structure */
+ int ifa_metric; /* cost of going out this interface */
+#ifdef notdef
+ struct rtentry *ifa_rt; /* XXXX for ROUTETOIF ????? */
+#endif
+ int (*ifa_claim_addr) /* check if an addr goes to this if */
+ __P((struct ifaddr *, struct sockaddr *));
+
+};
+#define IFA_ROUTE RTF_UP /* route installed */
+
+/* for compatibility with other BSDs */
+#define ifa_list ifa_link
+
+/*
+ * The prefix structure contains information about one prefix
+ * of an interface. They are maintained by the different address families,
+ * are allocated and attached when an prefix or an address is set,
+ * and are linked together so all prefixes for an interface can be located.
+ */
+struct ifprefix {
+ struct sockaddr *ifpr_prefix; /* prefix of interface */
+ struct ifnet *ifpr_ifp; /* back-pointer to interface */
+ TAILQ_ENTRY(ifprefix) ifpr_list; /* queue macro glue */
+ u_char ifpr_plen; /* prefix length in bits */
+ u_char ifpr_type; /* protocol dependent prefix type */
+};
+
+/*
+ * Multicast address structure. This is analogous to the ifaddr
+ * structure except that it keeps track of multicast addresses.
+ * Also, the reference count here is a count of requests for this
+ * address, not a count of pointers to this structure.
+ */
+struct ifmultiaddr {
+ LIST_ENTRY(ifmultiaddr) ifma_link; /* queue macro glue */
+ struct sockaddr *ifma_addr; /* address this membership is for */
+ struct sockaddr *ifma_lladdr; /* link-layer translation, if any */
+ struct ifnet *ifma_ifp; /* back-pointer to interface */
+ u_int ifma_refcount; /* reference count */
+ void *ifma_protospec; /* protocol-specific state, if any */
+};
+
+#ifdef KERNEL
+#define IFAFREE(ifa) \
+ do { \
+ if ((ifa)->ifa_refcnt <= 0) \
+ ifafree(ifa); \
+ else \
+ (ifa)->ifa_refcnt--; \
+ } while (0)
+
+extern struct ifnethead ifnet;
+extern struct ifnet **ifindex2ifnet;
+extern int ifqmaxlen;
+extern struct ifnet loif[];
+extern int if_index;
+extern struct ifaddr **ifnet_addrs;
+
+void ether_ifattach __P((struct ifnet *));
+void ether_input __P((struct ifnet *, struct ether_header *, struct mbuf *));
+int ether_output __P((struct ifnet *,
+ struct mbuf *, struct sockaddr *, struct rtentry *));
+int ether_ioctl __P((struct ifnet *, int, caddr_t));
+
+int if_addmulti __P((struct ifnet *, struct sockaddr *,
+ struct ifmultiaddr **));
+int if_allmulti __P((struct ifnet *, int));
+void if_attach __P((struct ifnet *));
+int if_delmulti __P((struct ifnet *, struct sockaddr *));
+void if_detach __P((struct ifnet *));
+void if_down __P((struct ifnet *));
+void if_route __P((struct ifnet *, int flag, int fam));
+void if_unroute __P((struct ifnet *, int flag, int fam));
+void if_up __P((struct ifnet *));
+/*void ifinit __P((void));*/ /* declared in systm.h for main() */
+int ifioctl __P((struct socket *, u_long, caddr_t, struct proc *));
+int ifpromisc __P((struct ifnet *, int));
+struct ifnet *ifunit __P((char *));
+struct ifnet *if_withname __P((struct sockaddr *));
+
+int if_poll_recv_slow __P((struct ifnet *ifp, int *quotap));
+void if_poll_xmit_slow __P((struct ifnet *ifp, int *quotap));
+void if_poll_throttle __P((void));
+void if_poll_unthrottle __P((void *));
+void if_poll_init __P((void));
+void if_poll __P((void));
+
+struct ifaddr *ifa_ifwithaddr __P((struct sockaddr *));
+struct ifaddr *ifa_ifwithdstaddr __P((struct sockaddr *));
+struct ifaddr *ifa_ifwithnet __P((struct sockaddr *));
+struct ifaddr *ifa_ifwithroute __P((int, struct sockaddr *,
+ struct sockaddr *));
+struct ifaddr *ifaof_ifpforaddr __P((struct sockaddr *, struct ifnet *));
+void ifafree __P((struct ifaddr *));
+
+struct ifmultiaddr *ifmaof_ifpforaddr __P((struct sockaddr *,
+ struct ifnet *));
+int if_simloop __P((struct ifnet *ifp, struct mbuf *m,
+ struct sockaddr *dst, int hlen));
+
+#endif /* KERNEL */
+
+
+#endif /* !_NET_IF_VAR_H_ */
diff --git a/sys/net/if_vlan.c b/sys/net/if_vlan.c
new file mode 100644
index 0000000..218bff7
--- /dev/null
+++ b/sys/net/if_vlan.c
@@ -0,0 +1,557 @@
+/*
+ * Copyright 1998 Massachusetts Institute of Technology
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby
+ * granted, provided that both the above copyright notice and this
+ * permission notice appear in all copies, that both the above
+ * copyright notice and this permission notice appear in all
+ * supporting documentation, and that the name of M.I.T. not be used
+ * in advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission. M.I.T. makes
+ * no representations about the suitability of this software for any
+ * purpose. It is provided "as is" without express or implied
+ * warranty.
+ *
+ * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
+ * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
+ * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * if_vlan.c - pseudo-device driver for IEEE 802.1Q virtual LANs.
+ * Might be extended some day to also handle IEEE 802.1p priority
+ * tagging. This is sort of sneaky in the implementation, since
+ * we need to pretend to be enough of an Ethernet implementation
+ * to make arp work. The way we do this is by telling everyone
+ * that we are an Ethernet, and then catch the packets that
+ * ether_output() left on our output queue queue when it calls
+ * if_start(), rewrite them for use by the real outgoing interface,
+ * and ask it to send them.
+ *
+ *
+ * XXX It's incorrect to assume that we must always kludge up
+ * headers on the physical device's behalf: some devices support
+ * VLAN tag insersion and extraction in firmware. For these cases,
+ * one can change the behavior of the vlan interface by setting
+ * the LINK0 flag on it (that is setting the vlan interface's LINK0
+ * flag, _not_ the parent's LINK0 flag; we try to leave the parent
+ * alone). If the interface as the LINK0 flag set, then it will
+ * not modify the ethernet header on output because the parent
+ * can do that for itself. On input, the parent can call vlan_input_tag()
+ * directly in order to supply us with an incoming mbuf and the vlan
+ * tag value that goes with it.
+ */
+
+#include "vlan.h"
+#if NVLAN > 0
+#include "opt_inet.h"
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/queue.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/sysctl.h>
+#include <sys/systm.h>
+
+#include <net/bpf.h>
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <net/if_arp.h>
+#include <net/if_dl.h>
+#include <net/if_types.h>
+#include <net/if_vlan_var.h>
+
+#ifdef INET
+#include <netinet/in.h>
+#include <netinet/if_ether.h>
+#endif
+
+SYSCTL_DECL(_net_link);
+SYSCTL_NODE(_net_link, IFT_8021_VLAN, vlan, CTLFLAG_RW, 0, "IEEE 802.1Q VLAN");
+SYSCTL_NODE(_net_link_vlan, PF_LINK, link, CTLFLAG_RW, 0, "for consistency");
+
+u_int vlan_proto = ETHERTYPE_VLAN;
+SYSCTL_INT(_net_link_vlan_link, VLANCTL_PROTO, proto, CTLFLAG_RW, &vlan_proto,
+ 0, "Ethernet protocol used for VLAN encapsulation");
+
+static struct ifvlan ifv_softc[NVLAN];
+
+static void vlan_start(struct ifnet *ifp);
+static void vlan_ifinit(void *foo);
+static int vlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr);
+static int vlan_setmulti(struct ifnet *ifp);
+static int vlan_unconfig(struct ifnet *ifp);
+static int vlan_config(struct ifvlan *ifv, struct ifnet *p);
+
+/*
+ * Program our multicast filter. What we're actually doing is
+ * programming the multicast filter of the parent. This has the
+ * side effect of causing the parent interface to receive multicast
+ * traffic that it doesn't really want, which ends up being discarded
+ * later by the upper protocol layers. Unfortunately, there's no way
+ * to avoid this: there really is only one physical interface.
+ */
+static int vlan_setmulti(struct ifnet *ifp)
+{
+ struct ifnet *ifp_p;
+ struct ifmultiaddr *ifma, *rifma = NULL;
+ struct ifvlan *sc;
+ struct vlan_mc_entry *mc = NULL;
+ struct sockaddr_dl sdl;
+ int error;
+
+ /* Find the parent. */
+ sc = ifp->if_softc;
+ ifp_p = sc->ifv_p;
+
+ sdl.sdl_len = ETHER_ADDR_LEN;
+ sdl.sdl_family = AF_LINK;
+
+ /* First, remove any existing filter entries. */
+ while(sc->vlan_mc_listhead.slh_first != NULL) {
+ mc = sc->vlan_mc_listhead.slh_first;
+ bcopy((char *)&mc->mc_addr, LLADDR(&sdl), ETHER_ADDR_LEN);
+ error = if_delmulti(ifp_p, (struct sockaddr *)&sdl);
+ if (error)
+ return(error);
+ SLIST_REMOVE_HEAD(&sc->vlan_mc_listhead, mc_entries);
+ free(mc, M_DEVBUF);
+ }
+
+ /* Now program new ones. */
+ for (ifma = ifp->if_multiaddrs.lh_first;
+ ifma != NULL;ifma = ifma->ifma_link.le_next) {
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ continue;
+ mc = malloc(sizeof(struct vlan_mc_entry), M_DEVBUF, M_NOWAIT);
+ bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
+ (char *)&mc->mc_addr, ETHER_ADDR_LEN);
+ SLIST_INSERT_HEAD(&sc->vlan_mc_listhead, mc, mc_entries);
+ error = if_addmulti(ifp_p, (struct sockaddr *)&sdl, &rifma);
+ if (error)
+ return(error);
+ }
+
+ return(0);
+}
+
+static void
+vlaninit(void *dummy)
+{
+ int i;
+
+ for (i = 0; i < NVLAN; i++) {
+ struct ifnet *ifp = &ifv_softc[i].ifv_if;
+
+ ifp->if_softc = &ifv_softc[i];
+ ifp->if_name = "vlan";
+ ifp->if_unit = i;
+ /* NB: flags are not set here */
+ ifp->if_linkmib = &ifv_softc[i].ifv_mib;
+ ifp->if_linkmiblen = sizeof ifv_softc[i].ifv_mib;
+ /* NB: mtu is not set here */
+
+ ifp->if_init = vlan_ifinit;
+ ifp->if_start = vlan_start;
+ ifp->if_ioctl = vlan_ioctl;
+ ifp->if_output = ether_output;
+ ifp->if_snd.ifq_maxlen = ifqmaxlen;
+ if_attach(ifp);
+ ether_ifattach(ifp);
+ bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header));
+ /* Now undo some of the damage... */
+ ifp->if_data.ifi_type = IFT_8021_VLAN;
+ ifp->if_data.ifi_hdrlen = EVL_ENCAPLEN;
+ ifp->if_resolvemulti = 0;
+ }
+}
+PSEUDO_SET(vlaninit, if_vlan);
+
+static void
+vlan_ifinit(void *foo)
+{
+ return;
+}
+
+static void
+vlan_start(struct ifnet *ifp)
+{
+ struct ifvlan *ifv;
+ struct ifnet *p;
+ struct ether_vlan_header *evl;
+ struct mbuf *m;
+
+ ifv = ifp->if_softc;
+ p = ifv->ifv_p;
+
+ ifp->if_flags |= IFF_OACTIVE;
+ for (;;) {
+ IF_DEQUEUE(&ifp->if_snd, m);
+ if (m == 0)
+ break;
+ if (ifp->if_bpf)
+ bpf_mtap(ifp, m);
+
+ /*
+ * If the LINK0 flag is set, it means the underlying interface
+ * can do VLAN tag insertion itself and doesn't require us to
+ * create a special header for it. In this case, we just pass
+ * the packet along. However, we need some way to tell the
+ * interface where the packet came from so that it knows how
+ * to find the VLAN tag to use, so we set the rcvif in the
+ * mbuf header to our ifnet.
+ *
+ * Note: we also set the M_PROTO1 flag in the mbuf to let
+ * the parent driver know that the rcvif pointer is really
+ * valid. We need to do this because sometimes mbufs will
+ * be allocated by other parts of the system that contain
+ * garbage in the rcvif pointer. Using the M_PROTO1 flag
+ * lets the driver perform a proper sanity check and avoid
+ * following potentially bogus rcvif pointers off into
+ * never-never land.
+ */
+ if (ifp->if_flags & IFF_LINK0) {
+ m->m_pkthdr.rcvif = ifp;
+ m->m_flags |= M_PROTO1;
+ } else {
+ M_PREPEND(m, EVL_ENCAPLEN, M_DONTWAIT);
+ if (m == 0)
+ continue;
+ /* M_PREPEND takes care of m_len, m_pkthdr.len for us */
+
+ /*
+ * Transform the Ethernet header into an Ethernet header
+ * with 802.1Q encapsulation.
+ */
+ bcopy(mtod(m, char *) + EVL_ENCAPLEN, mtod(m, char *),
+ sizeof(struct ether_header));
+ evl = mtod(m, struct ether_vlan_header *);
+ evl->evl_proto = evl->evl_encap_proto;
+ evl->evl_encap_proto = htons(vlan_proto);
+ evl->evl_tag = htons(ifv->ifv_tag);
+#ifdef DEBUG
+ printf("vlan_start: %*D\n", sizeof *evl,
+ (char *)evl, ":");
+#endif
+ }
+
+ /*
+ * Send it, precisely as ether_output() would have.
+ * We are already running at splimp.
+ */
+ if (IF_QFULL(&p->if_snd)) {
+ IF_DROP(&p->if_snd);
+ /* XXX stats */
+ ifp->if_oerrors++;
+ m_freem(m);
+ continue;
+ }
+ IF_ENQUEUE(&p->if_snd, m);
+ if ((p->if_flags & IFF_OACTIVE) == 0) {
+ p->if_start(p);
+ ifp->if_opackets++;
+ }
+ }
+ ifp->if_flags &= ~IFF_OACTIVE;
+
+ return;
+}
+
+int
+vlan_input_tag(struct ether_header *eh, struct mbuf *m, u_int16_t t)
+{
+ int i;
+ struct ifvlan *ifv;
+
+ for (i = 0; i < NVLAN; i++) {
+ ifv = &ifv_softc[i];
+ if (ifv->ifv_tag == t)
+ break;
+ }
+
+ if (i >= NVLAN || (ifv->ifv_if.if_flags & IFF_UP) == 0) {
+ m_free(m);
+ return -1; /* So the parent can take note */
+ }
+
+ /*
+ * Having found a valid vlan interface corresponding to
+ * the given source interface and vlan tag, run the
+ * the real packet through ethert_input().
+ */
+ m->m_pkthdr.rcvif = &ifv->ifv_if;
+
+ if (ifv->ifv_if.if_bpf) {
+ /*
+ * Do the usual BPF fakery. Note that we don't support
+ * promiscuous mode here, since it would require the
+ * drivers to know about VLANs and we're not ready for
+ * that yet.
+ */
+ struct mbuf m0;
+ m0.m_next = m;
+ m0.m_len = sizeof(struct ether_header);
+ m0.m_data = (char *)eh;
+ bpf_mtap(&ifv->ifv_if, &m0);
+ }
+ ifv->ifv_if.if_ipackets++;
+ ether_input(&ifv->ifv_if, eh, m);
+ return 0;
+}
+
+int
+vlan_input(struct ether_header *eh, struct mbuf *m)
+{
+ int i;
+ struct ifvlan *ifv;
+
+ for (i = 0; i < NVLAN; i++) {
+ ifv = &ifv_softc[i];
+ if (m->m_pkthdr.rcvif == ifv->ifv_p
+ && (EVL_VLANOFTAG(ntohs(*mtod(m, u_int16_t *)))
+ == ifv->ifv_tag))
+ break;
+ }
+
+ if (i >= NVLAN || (ifv->ifv_if.if_flags & IFF_UP) == 0) {
+ m_freem(m);
+ return -1; /* so ether_input can take note */
+ }
+
+ /*
+ * Having found a valid vlan interface corresponding to
+ * the given source interface and vlan tag, remove the
+ * encapsulation, and run the real packet through
+ * ether_input() a second time (it had better be
+ * reentrant!).
+ */
+ m->m_pkthdr.rcvif = &ifv->ifv_if;
+ eh->ether_type = mtod(m, u_int16_t *)[1];
+ m->m_data += EVL_ENCAPLEN;
+ m->m_len -= EVL_ENCAPLEN;
+ m->m_pkthdr.len -= EVL_ENCAPLEN;
+
+ if (ifv->ifv_if.if_bpf) {
+ /*
+ * Do the usual BPF fakery. Note that we don't support
+ * promiscuous mode here, since it would require the
+ * drivers to know about VLANs and we're not ready for
+ * that yet.
+ */
+ struct mbuf m0;
+ m0.m_next = m;
+ m0.m_len = sizeof(struct ether_header);
+ m0.m_data = (char *)eh;
+ bpf_mtap(&ifv->ifv_if, &m0);
+ }
+ ifv->ifv_if.if_ipackets++;
+ ether_input(&ifv->ifv_if, eh, m);
+ return 0;
+}
+
+static int
+vlan_config(struct ifvlan *ifv, struct ifnet *p)
+{
+ struct ifaddr *ifa1, *ifa2;
+ struct sockaddr_dl *sdl1, *sdl2;
+
+ if (p->if_data.ifi_type != IFT_ETHER)
+ return EPROTONOSUPPORT;
+ if (ifv->ifv_p)
+ return EBUSY;
+ ifv->ifv_p = p;
+ if (p->if_data.ifi_hdrlen == sizeof(struct ether_vlan_header))
+ ifv->ifv_if.if_mtu = p->if_mtu;
+ else
+ ifv->ifv_if.if_mtu = p->if_data.ifi_mtu - EVL_ENCAPLEN;
+
+ /*
+ * Preserve the state of the LINK0 flag for ourselves.
+ */
+ ifv->ifv_if.if_flags = (p->if_flags & ~(IFF_LINK0));
+
+ /*
+ * Set up our ``Ethernet address'' to reflect the underlying
+ * physical interface's.
+ */
+ ifa1 = ifnet_addrs[ifv->ifv_if.if_index - 1];
+ ifa2 = ifnet_addrs[p->if_index - 1];
+ sdl1 = (struct sockaddr_dl *)ifa1->ifa_addr;
+ sdl2 = (struct sockaddr_dl *)ifa2->ifa_addr;
+ sdl1->sdl_type = IFT_ETHER;
+ sdl1->sdl_alen = ETHER_ADDR_LEN;
+ bcopy(LLADDR(sdl2), LLADDR(sdl1), ETHER_ADDR_LEN);
+ bcopy(LLADDR(sdl2), ifv->ifv_ac.ac_enaddr, ETHER_ADDR_LEN);
+ return 0;
+}
+
+static int
+vlan_unconfig(struct ifnet *ifp)
+{
+ struct ifaddr *ifa;
+ struct sockaddr_dl *sdl;
+ struct vlan_mc_entry *mc;
+ struct ifvlan *ifv;
+ struct ifnet *p;
+ int error;
+
+ ifv = ifp->if_softc;
+ p = ifv->ifv_p;
+
+ /*
+ * Since the interface is being unconfigured, we need to
+ * empty the list of multicast groups that we may have joined
+ * while we were alive and remove them from the parent's list
+ * as well.
+ */
+ while(ifv->vlan_mc_listhead.slh_first != NULL) {
+ struct sockaddr_dl sdl;
+
+ sdl.sdl_len = ETHER_ADDR_LEN;
+ sdl.sdl_family = AF_LINK;
+ mc = ifv->vlan_mc_listhead.slh_first;
+ bcopy((char *)&mc->mc_addr, LLADDR(&sdl), ETHER_ADDR_LEN);
+ error = if_delmulti(p, (struct sockaddr *)&sdl);
+ error = if_delmulti(ifp, (struct sockaddr *)&sdl);
+ if (error)
+ return(error);
+ SLIST_REMOVE_HEAD(&ifv->vlan_mc_listhead, mc_entries);
+ free(mc, M_DEVBUF);
+ }
+
+ /* Disconnect from parent. */
+ ifv->ifv_p = NULL;
+ ifv->ifv_if.if_mtu = ETHERMTU;
+
+ /* Clear our MAC address. */
+ ifa = ifnet_addrs[ifv->ifv_if.if_index - 1];
+ sdl = (struct sockaddr_dl *)ifa->ifa_addr;
+ sdl->sdl_type = IFT_ETHER;
+ sdl->sdl_alen = ETHER_ADDR_LEN;
+ bzero(LLADDR(sdl), ETHER_ADDR_LEN);
+ bzero(ifv->ifv_ac.ac_enaddr, ETHER_ADDR_LEN);
+
+ return 0;
+}
+
+static int
+vlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+ struct ifaddr *ifa;
+ struct ifnet *p;
+ struct ifreq *ifr;
+ struct ifvlan *ifv;
+ struct vlanreq vlr;
+ int error = 0;
+
+ ifr = (struct ifreq *)data;
+ ifa = (struct ifaddr *)data;
+ ifv = ifp->if_softc;
+
+ switch (cmd) {
+ case SIOCSIFADDR:
+ ifp->if_flags |= IFF_UP;
+
+ switch (ifa->ifa_addr->sa_family) {
+#ifdef INET
+ case AF_INET:
+ arp_ifinit(&ifv->ifv_ac, ifa);
+ break;
+#endif
+ default:
+ break;
+ }
+ break;
+
+ case SIOCGIFADDR:
+ {
+ struct sockaddr *sa;
+
+ sa = (struct sockaddr *) &ifr->ifr_data;
+ bcopy(((struct arpcom *)ifp->if_softc)->ac_enaddr,
+ (caddr_t) sa->sa_data, ETHER_ADDR_LEN);
+ }
+ break;
+
+ case SIOCSIFMTU:
+ /*
+ * Set the interface MTU.
+ * This is bogus. The underlying interface might support
+ * jumbo frames.
+ */
+ if (ifr->ifr_mtu > ETHERMTU) {
+ error = EINVAL;
+ } else {
+ ifp->if_mtu = ifr->ifr_mtu;
+ }
+ break;
+
+ case SIOCSETVLAN:
+ error = copyin(ifr->ifr_data, &vlr, sizeof vlr);
+ if (error)
+ break;
+ if (vlr.vlr_parent[0] == '\0') {
+ vlan_unconfig(ifp);
+ if_down(ifp);
+ ifp->if_flags &= ~(IFF_UP|IFF_RUNNING);
+ break;
+ }
+ p = ifunit(vlr.vlr_parent);
+ if (p == 0) {
+ error = ENOENT;
+ break;
+ }
+ error = vlan_config(ifv, p);
+ if (error)
+ break;
+ ifv->ifv_tag = vlr.vlr_tag;
+ ifp->if_flags |= IFF_RUNNING;
+ break;
+
+ case SIOCGETVLAN:
+ bzero(&vlr, sizeof vlr);
+ if (ifv->ifv_p) {
+ snprintf(vlr.vlr_parent, sizeof(vlr.vlr_parent),
+ "%s%d", ifv->ifv_p->if_name, ifv->ifv_p->if_unit);
+ vlr.vlr_tag = ifv->ifv_tag;
+ }
+ error = copyout(&vlr, ifr->ifr_data, sizeof vlr);
+ break;
+
+ case SIOCSIFFLAGS:
+ /*
+ * We don't support promiscuous mode
+ * right now because it would require help from the
+ * underlying drivers, which hasn't been implemented.
+ */
+ if (ifr->ifr_flags & (IFF_PROMISC)) {
+ ifp->if_flags &= ~(IFF_PROMISC);
+ error = EINVAL;
+ }
+ break;
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ error = vlan_setmulti(ifp);
+ break;
+ default:
+ error = EINVAL;
+ }
+ return error;
+}
+
+#endif /* NVLAN > 0 */
diff --git a/sys/net/if_vlan_var.h b/sys/net/if_vlan_var.h
new file mode 100644
index 0000000..1427b34
--- /dev/null
+++ b/sys/net/if_vlan_var.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright 1998 Massachusetts Institute of Technology
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby
+ * granted, provided that both the above copyright notice and this
+ * permission notice appear in all copies, that both the above
+ * copyright notice and this permission notice appear in all
+ * supporting documentation, and that the name of M.I.T. not be used
+ * in advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission. M.I.T. makes
+ * no representations about the suitability of this software for any
+ * purpose. It is provided "as is" without express or implied
+ * warranty.
+ *
+ * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
+ * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
+ * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NET_IF_VLAN_VAR_H_
+#define _NET_IF_VLAN_VAR_H_ 1
+
+#ifdef KERNEL
+struct vlan_mc_entry {
+ struct ether_addr mc_addr;
+ SLIST_ENTRY(vlan_mc_entry) mc_entries;
+};
+
+struct ifvlan {
+ struct arpcom ifv_ac; /* make this an interface */
+ struct ifnet *ifv_p; /* parent inteface of this vlan */
+ struct ifv_linkmib {
+ int ifvm_parent;
+ u_int16_t ifvm_proto; /* encapsulation ethertype */
+ u_int16_t ifvm_tag; /* tag to apply on packets leaving if */
+ } ifv_mib;
+ SLIST_HEAD(__vlan_mchead, vlan_mc_entry) vlan_mc_listhead;
+};
+#define ifv_if ifv_ac.ac_if
+#define ifv_tag ifv_mib.ifvm_tag
+#endif /* KERNEL */
+
+struct ether_vlan_header {
+ u_char evl_dhost[ETHER_ADDR_LEN];
+ u_char evl_shost[ETHER_ADDR_LEN];
+ u_int16_t evl_encap_proto;
+ u_int16_t evl_tag;
+ u_int16_t evl_proto;
+};
+
+#define EVL_VLANOFTAG(tag) ((tag) & 4095)
+#define EVL_PRIOFTAG(tag) (((tag) >> 13) & 7)
+#define EVL_ENCAPLEN 4 /* length in octets of encapsulation */
+
+/* When these sorts of interfaces get their own identifier... */
+#define IFT_8021_VLAN IFT_PROPVIRTUAL
+
+/* sysctl(3) tags, for compatibility purposes */
+#define VLANCTL_PROTO 1
+#define VLANCTL_MAX 2
+
+/*
+ * Configuration structure for SIOCSETVLAN and SIOCGETVLAN ioctls.
+ */
+struct vlanreq {
+ char vlr_parent[IFNAMSIZ];
+ u_short vlr_tag;
+};
+#define SIOCSETVLAN SIOCSIFGENERIC
+#define SIOCGETVLAN SIOCGIFGENERIC
+
+#ifdef KERNEL
+/* shared with if_ethersubr.c: */
+extern u_int vlan_proto;
+extern int vlan_input(struct ether_header *eh, struct mbuf *m);
+extern int vlan_input_tag(struct ether_header *eh,
+ struct mbuf *m, u_int16_t t);
+#endif
+
+#endif /* _NET_IF_VLAN_VAR_H_ */
diff --git a/sys/net/iso88025.h b/sys/net/iso88025.h
new file mode 100644
index 0000000..ccb16e1
--- /dev/null
+++ b/sys/net/iso88025.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 1998, Larry Lile
+ * All rights reserved.
+ *
+ * For latest sources and information on this driver, please
+ * go to http://anarchy.stdio.com.
+ *
+ * Questions, comments or suggestions should be directed to
+ * Larry Lile <lile@stdio.com>.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ * Information gathered from tokenring@freebsd, /sys/net/ethernet.h and
+ * the Mach token ring driver.
+ */
+
+/*
+ * Fundamental constants relating to iso 802.5
+ */
+
+#ifndef _NET_ISO88025_H_
+#define _NET_ISO88025_H_
+
+/*
+ * The number of bytes in an iso 802.5 (MAC) address.
+ */
+#define ISO88025_ADDR_LEN 6
+
+/*
+ */
+#define ISO88025_HDR_LEN (ISO88025_CF_LEN + ISO88025_ADDR_LEN*2)
+#define ISO88025_CF_LEN 2
+#define RCF_LEN 2
+#define RIF_LEN 16
+
+
+/*
+ * The minimum packet length.
+ */
+#define ISO88025_MIN_LEN 0 /* This offends my morality */
+
+/*
+ * The maximum packet length.
+ */
+#define ISO88025_MAX_LEN 17960
+
+/*
+ * A macro to validate a length with
+ */
+#define ISO88025_IS_VALID_LEN(foo) \
+ ((foo) >= ISO88025_MIN_LEN && (foo) <= ISO88025_MAX_LEN)
+
+/*
+ * ISO 802.5 physical header
+ */
+struct iso88025_header {
+ u_char ac; /* access control field */
+ u_char fc; /* frame control field */
+ u_char iso88025_dhost[ISO88025_ADDR_LEN]; /* destination address */
+ u_char iso88025_shost[ISO88025_ADDR_LEN]; /* source address */
+ u_short rcf; /* route control field */
+ u_short rseg[RIF_LEN]; /* routing registers */
+};
+
+struct iso88025_sockaddr_data {
+ u_char ether_dhost[ISO88025_ADDR_LEN];
+ u_char ether_shost[ISO88025_ADDR_LEN];
+ u_char ac;
+ u_char fc;
+};
+
+/*
+ * Structure of a 48-bit iso 802.5 address.
+ * ( We could also add the 16 bit addresses as a union)
+ */
+struct iso88025_addr {
+ u_char octet[ISO88025_ADDR_LEN];
+};
+
+#define ISO88025MTU 18000
+#define ISO88025_DEFAULT_MTU 1500
+#define senderr(e) { error = (e); goto bad;}
+
+void iso88025_ifattach __P((struct ifnet *));
+int iso88025_ioctl __P((struct ifnet *, int , caddr_t ));
+int iso88025_output __P((struct ifnet *, struct mbuf *, struct sockaddr *, struct rtentry *));
+void iso88025_input __P((struct ifnet *, struct iso88025_header *, struct mbuf *));
+
+
+#endif
diff --git a/sys/net/net_osdep.c b/sys/net/net_osdep.c
new file mode 100644
index 0000000..81dd3a8
--- /dev/null
+++ b/sys/net/net_osdep.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/mbuf.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/errno.h>
+#include <sys/time.h>
+#include <sys/syslog.h>
+#include <machine/cpu.h>
+
+#include <net/if.h>
+#include <net/if_types.h>
+#include <net/netisr.h>
+#include <net/route.h>
+#include <net/bpf.h>
+#include <net/net_osdep.h>
+
+const char *
+if_name(ifp)
+ struct ifnet *ifp;
+{
+ static char nam[IFNAMSIZ + 10]; /*enough?*/
+
+ snprintf(nam, sizeof(nam), "%s%d", ifp->if_name, ifp->if_unit);
+ return nam;
+}
diff --git a/sys/net/net_osdep.h b/sys/net/net_osdep.h
new file mode 100644
index 0000000..11fc27c
--- /dev/null
+++ b/sys/net/net_osdep.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+/*
+ * glue for kernel code programming differences.
+ */
+
+/*
+ * OS dependencies:
+ *
+ * - privileged process
+ * NetBSD, FreeBSD 3
+ * struct proc *p;
+ * if (p && !suser(p->p_ucred, &p->p_acflag))
+ * privileged;
+ * OpenBSD, BSDI [34], FreeBSD 2
+ * struct socket *so;
+ * if (so->so_state & SS_PRIV)
+ * privileged;
+ * - foo_control
+ * NetBSD, FreeBSD 3
+ * needs to give struct proc * as argument
+ * OpenBSD, BSDI [34], FreeBSD 2
+ * do not need struct proc *
+ * - bpf:
+ * OpenBSD, NetBSD, BSDI [34]
+ * need caddr_t * (= if_bpf **) and struct ifnet *
+ * FreeBSD 2, FreeBSD 3
+ * need only struct ifnet * as argument
+ * - struct ifnet
+ * use queue.h? member names if name
+ * --- --- ---
+ * FreeBSD 2 no old standard if_name+unit
+ * FreeBSD 3 yes strange if_name+unit
+ * OpenBSD yes standard if_xname
+ * NetBSD yes standard if_xname
+ * BSDI [34] no old standard if_name+unit
+ * - usrreq
+ * NetBSD, OpenBSD, BSDI [34], FreeBSD 2
+ * single function with PRU_xx, arguments are mbuf
+ * FreeBSD 3
+ * separates functions, non-mbuf arguments
+ * - {set,get}sockopt
+ * NetBSD, OpenBSD, BSDI [34], FreeBSD 2
+ * manipulation based on mbuf
+ * FreeBSD 3
+ * non-mbuf manipulation using sooptcopy{in,out}()
+ * - timeout() and untimeout()
+ * NetBSD, OpenBSD, BSDI [34], FreeBSD 2
+ * timeout() is a void function
+ * FreeBSD 3
+ * timeout() is non-void, must keep returned value for untimeuot()
+ * - sysctl
+ * NetBSD, OpenBSD
+ * foo_sysctl()
+ * BSDI [34]
+ * foo_sysctl() but with different style
+ * FreeBSD 2, FreeBSD 3
+ * linker hack
+ *
+ * - if_ioctl
+ * NetBSD, FreeBSD 3, BSDI [34]
+ * 2nd argument is u_long cmd
+ * FreeBSD 2
+ * 2nd argument is int cmd
+ * - if attach routines
+ * NetBSD
+ * void xxattach(int);
+ * FreeBSD 2, FreeBSD 3
+ * void xxattach(void *);
+ * PSEUDO_SET(xxattach, if_xx);
+ *
+ * - ovbcopy()
+ * in NetBSD 1.4 or later, ovbcopy() is not supplied in the kernel.
+ * bcopy() is safe against overwrites.
+ * - splnet()
+ * NetBSD 1.4 or later requires splsoftnet().
+ * other operating systems use splnet().
+ *
+ * - dtom()
+ * NEVER USE IT!
+ */
+
+#ifndef __NET_NET_OSDEP_H_DEFINED_
+#define __NET_NET_OSDEP_H_DEFINED_
+#ifdef _KERNEL
+
+struct ifnet;
+extern const char *if_name __P((struct ifnet *));
+
+#define HAVE_OLD_BPF
+
+#endif /*_KERNEL*/
+#endif /*__NET_NET_OSDEP_H_DEFINED_ */
diff --git a/sys/net/netisr.h b/sys/net/netisr.h
new file mode 100644
index 0000000..432607c
--- /dev/null
+++ b/sys/net/netisr.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 1980, 1986, 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)netisr.h 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#ifndef _NET_NETISR_H_
+#define _NET_NETISR_H_
+
+/*
+ * The networking code runs off software interrupts.
+ *
+ * You can switch into the network by doing splnet() and return by splx().
+ * The software interrupt level for the network is higher than the software
+ * level for the clock (so you can enter the network in routines called
+ * at timeout time).
+ */
+#if defined(vax) || defined(tahoe)
+#define setsoftnet() mtpr(SIRR, 12)
+#endif
+
+/*
+ * Each ``pup-level-1'' input queue has a bit in a ``netisr'' status
+ * word which is used to de-multiplex a single software
+ * interrupt used for scheduling the network code to calls
+ * on the lowest level routine of each protocol.
+ */
+#define NETISR_RAW 0 /* same as AF_UNSPEC */
+#define NETISR_IP 2 /* same as AF_INET */
+#define NETISR_IMP 3 /* same as AF_IMPLINK */
+#define NETISR_NS 6 /* same as AF_NS */
+#define NETISR_ISO 7 /* same as AF_ISO */
+#define NETISR_CCITT 10 /* same as AF_CCITT */
+#define NETISR_ATALK 16 /* same as AF_APPLETALK */
+#define NETISR_ARP 18 /* same as AF_LINK */
+#define NETISR_IPX 23 /* same as AF_IPX */
+#define NETISR_ISDN 26 /* same as AF_E164 */
+#define NETISR_PPP 27 /* PPP soft interrupt */
+#define NETISR_IPV6 28 /* same as AF_INET6 */
+#define NETISR_NATM 29 /* same as AF_NATM */
+#define NETISR_NETGRAPH 31 /* same as AF_NETGRAPH */
+
+#define schednetisr(anisr) { netisr |= 1<<(anisr); setsoftnet(); }
+
+#ifndef LOCORE
+#ifdef KERNEL
+extern volatile unsigned int netisr; /* scheduling bits for network */
+
+typedef void netisr_t __P((void));
+
+struct netisrtab {
+ int nit_num;
+ netisr_t *nit_isr;
+};
+
+int register_netisr __P((int, netisr_t *));
+void netisr_sysinit __P((void *));
+
+#define NETISR_SET(num, isr) \
+ static struct netisrtab nisr_##num = { num, isr }; \
+ SYSINIT(nisr_##num, SI_SUB_CPU, SI_ORDER_ANY, netisr_sysinit, &nisr_##num)
+#endif
+#endif
+
+#endif
diff --git a/sys/net/pfkeyv2.h b/sys/net/pfkeyv2.h
new file mode 100644
index 0000000..ac509e0
--- /dev/null
+++ b/sys/net/pfkeyv2.h
@@ -0,0 +1,420 @@
+/*
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/* $Id: keyv2.h,v 1.1.6.1.6.4 1999/06/08 05:33:39 itojun Exp $ */
+
+/*
+ * This file has been derived rfc 2367,
+ * And added some flags of SADB_KEY_FLAGS_ as SADB_X_EXT_.
+ * sakane@ydc.co.jp
+ */
+
+#ifndef _NET_PFKEYV2_H_
+#define _NET_PFKEYV2_H_
+
+/*
+This file defines structures and symbols for the PF_KEY Version 2
+key management interface. It was written at the U.S. Naval Research
+Laboratory. This file is in the public domain. The authors ask that
+you leave this credit intact on any copies of this file.
+*/
+#ifndef __PFKEY_V2_H
+#define __PFKEY_V2_H 1
+
+#define PF_KEY_V2 2
+#define PFKEYV2_REVISION 199806L
+
+#define SADB_RESERVED 0
+#define SADB_GETSPI 1
+#define SADB_UPDATE 2
+#define SADB_ADD 3
+#define SADB_DELETE 4
+#define SADB_GET 5
+#define SADB_ACQUIRE 6
+#define SADB_REGISTER 7
+#define SADB_EXPIRE 8
+#define SADB_FLUSH 9
+#define SADB_DUMP 10
+#define SADB_X_PROMISC 11
+#define SADB_X_PCHANGE 12
+
+#define SADB_X_SPDUPDATE 13 /* not yet */
+#define SADB_X_SPDADD 14
+#define SADB_X_SPDDELETE 15
+#define SADB_X_SPDGET 16 /* not yet */
+#define SADB_X_SPDACQUIRE 17 /* not yet */
+#define SADB_X_SPDDUMP 18
+#define SADB_X_SPDFLUSH 19
+#define SADB_MAX 19
+
+struct sadb_msg {
+ u_int8_t sadb_msg_version;
+ u_int8_t sadb_msg_type;
+ u_int8_t sadb_msg_errno;
+ u_int8_t sadb_msg_satype;
+ u_int16_t sadb_msg_len;
+ u_int8_t sadb_msg_mode; /* XXX */
+ u_int8_t sadb_msg_reserved;
+ u_int32_t sadb_msg_seq;
+ u_int32_t sadb_msg_pid;
+};
+
+struct sadb_ext {
+ u_int16_t sadb_ext_len;
+ u_int16_t sadb_ext_type;
+};
+
+struct sadb_sa {
+ u_int16_t sadb_sa_len;
+ u_int16_t sadb_sa_exttype;
+ u_int32_t sadb_sa_spi;
+ u_int8_t sadb_sa_replay;
+ u_int8_t sadb_sa_state;
+ u_int8_t sadb_sa_auth;
+ u_int8_t sadb_sa_encrypt;
+ u_int32_t sadb_sa_flags;
+};
+
+struct sadb_lifetime {
+ u_int16_t sadb_lifetime_len;
+ u_int16_t sadb_lifetime_exttype;
+ u_int32_t sadb_lifetime_allocations;
+ u_int64_t sadb_lifetime_bytes;
+ u_int64_t sadb_lifetime_addtime;
+ u_int64_t sadb_lifetime_usetime;
+};
+
+struct sadb_address {
+ u_int16_t sadb_address_len;
+ u_int16_t sadb_address_exttype;
+ u_int8_t sadb_address_proto;
+ u_int8_t sadb_address_prefixlen;
+ u_int16_t sadb_address_reserved;
+};
+
+struct sadb_key {
+ u_int16_t sadb_key_len;
+ u_int16_t sadb_key_exttype;
+ u_int16_t sadb_key_bits;
+ u_int16_t sadb_key_reserved;
+};
+
+struct sadb_ident {
+ u_int16_t sadb_ident_len;
+ u_int16_t sadb_ident_exttype;
+ u_int16_t sadb_ident_type;
+ u_int16_t sadb_ident_reserved;
+ u_int64_t sadb_ident_id;
+};
+/* in order to use to divide sadb_ident.sadb_ident_id */
+union sadb_x_ident_id {
+ u_int64_t sadb_x_ident_id;
+ struct _sadb_x_ident_id_addr {
+ u_int16_t prefix;
+ u_int16_t ul_proto;
+ u_int32_t reserved;
+ } sadb_x_ident_id_addr;
+};
+
+struct sadb_sens {
+ u_int16_t sadb_sens_len;
+ u_int16_t sadb_sens_exttype;
+ u_int32_t sadb_sens_dpd;
+ u_int8_t sadb_sens_sens_level;
+ u_int8_t sadb_sens_sens_len;
+ u_int8_t sadb_sens_integ_level;
+ u_int8_t sadb_sens_integ_len;
+ u_int32_t sadb_sens_reserved;
+};
+
+struct sadb_prop {
+ u_int16_t sadb_prop_len;
+ u_int16_t sadb_prop_exttype;
+ u_int8_t sadb_prop_replay;
+ u_int8_t sadb_prop_reserved[3];
+};
+
+struct sadb_comb {
+ u_int8_t sadb_comb_auth;
+ u_int8_t sadb_comb_encrypt;
+ u_int16_t sadb_comb_flags;
+ u_int16_t sadb_comb_auth_minbits;
+ u_int16_t sadb_comb_auth_maxbits;
+ u_int16_t sadb_comb_encrypt_minbits;
+ u_int16_t sadb_comb_encrypt_maxbits;
+ u_int32_t sadb_comb_reserved;
+ u_int32_t sadb_comb_soft_allocations;
+ u_int32_t sadb_comb_hard_allocations;
+ u_int64_t sadb_comb_soft_bytes;
+ u_int64_t sadb_comb_hard_bytes;
+ u_int64_t sadb_comb_soft_addtime;
+ u_int64_t sadb_comb_hard_addtime;
+ u_int64_t sadb_comb_soft_usetime;
+ u_int64_t sadb_comb_hard_usetime;
+};
+
+struct sadb_supported {
+ u_int16_t sadb_supported_len;
+ u_int16_t sadb_supported_exttype;
+ u_int32_t sadb_supported_reserved;
+};
+
+struct sadb_alg {
+ u_int8_t sadb_alg_id;
+ u_int8_t sadb_alg_ivlen;
+ u_int16_t sadb_alg_minbits;
+ u_int16_t sadb_alg_maxbits;
+ u_int16_t sadb_alg_reserved;
+};
+
+struct sadb_spirange {
+ u_int16_t sadb_spirange_len;
+ u_int16_t sadb_spirange_exttype;
+ u_int32_t sadb_spirange_min;
+ u_int32_t sadb_spirange_max;
+ u_int32_t sadb_spirange_reserved;
+};
+
+struct sadb_x_kmprivate {
+ u_int16_t sadb_x_kmprivate_len;
+ u_int16_t sadb_x_kmprivate_exttype;
+ u_int32_t sadb_x_kmprivate_reserved;
+};
+
+/* XXX Policy Extension */
+/* sizeof(struct sadb_x_policy) == 8 */
+struct sadb_x_policy {
+ u_int16_t sadb_x_policy_len;
+ u_int16_t sadb_x_policy_exttype;
+ /* See policy type of ipsec.h */
+ u_int16_t sadb_x_policy_type;
+ u_int8_t sadb_x_policy_dir; /* direction, see ipsec.h */
+ u_int8_t sadb_x_policy_reserved;
+};
+/*
+ * When policy_type == IPSEC, it is followed by some of
+ * the ipsec policy request.
+ * [total length of ipsec policy requests]
+ * = (sadb_x_policy_len * sizeof(uint64_t) - sizeof(struct sadb_x_policy))
+ */
+
+/* XXX IPsec Policy Request Extension */
+/*
+ * This structure is aligned 8 bytes.
+ */
+struct sadb_x_ipsecrequest {
+ u_int16_t sadb_x_ipsecrequest_len;
+ /* structure length aligned to 8 bytes.
+ * This value is true length of bytes.
+ * Not in units of 64 bits. */
+ u_int16_t sadb_x_ipsecrequest_proto; /* See ipsec.h */
+ /* See ipsec.h. Not SADB_SATYPE_XX */
+ u_int16_t sadb_x_ipsecrequest_mode;
+ u_int16_t sadb_x_ipsecrequest_level; /* See ipsec.h */
+
+ /*
+ * followed by source IP address of SA, and immediately followed by
+ * destination IP address of SA. These encoded into two of sockaddr
+ * structure without any padding. Must set each sa_len exactly.
+ * Each of length of the sockaddr structure are not aligned to 64bits,
+ * but sum of x_request and addresses is aligned to 64bits.
+ */
+};
+
+#define SADB_EXT_RESERVED 0
+#define SADB_EXT_SA 1
+#define SADB_EXT_LIFETIME_CURRENT 2
+#define SADB_EXT_LIFETIME_HARD 3
+#define SADB_EXT_LIFETIME_SOFT 4
+#define SADB_EXT_ADDRESS_SRC 5
+#define SADB_EXT_ADDRESS_DST 6
+#define SADB_EXT_ADDRESS_PROXY 7
+#define SADB_EXT_KEY_AUTH 8
+#define SADB_EXT_KEY_ENCRYPT 9
+#define SADB_EXT_IDENTITY_SRC 10
+#define SADB_EXT_IDENTITY_DST 11
+#define SADB_EXT_SENSITIVITY 12
+#define SADB_EXT_PROPOSAL 13
+#define SADB_EXT_SUPPORTED_AUTH 14
+#define SADB_EXT_SUPPORTED_ENCRYPT 15
+#define SADB_EXT_SPIRANGE 16
+#define SADB_X_EXT_KMPRIVATE 17
+#define SADB_X_EXT_POLICY 18
+#define SADB_EXT_MAX 18
+
+#define SADB_SATYPE_UNSPEC 0
+#define SADB_SATYPE_AH 2
+#define SADB_SATYPE_ESP 3
+#define SADB_SATYPE_RSVP 5
+#define SADB_SATYPE_OSPFV2 6
+#define SADB_SATYPE_RIPV2 7
+#define SADB_SATYPE_MIP 8
+#define SADB_X_SATYPE_IPCOMP 9
+#define SADB_SATYPE_MAX 9
+
+#define SADB_SASTATE_LARVAL 0
+#define SADB_SASTATE_MATURE 1
+#define SADB_SASTATE_DYING 2
+#define SADB_SASTATE_DEAD 3
+#define SADB_SASTATE_MAX 3
+#define SADB_SAFLAGS_PFS 1
+
+#define SADB_AALG_NONE 0
+#define SADB_AALG_MD5HMAC 1 /* 2 */
+#define SADB_AALG_SHA1HMAC 2 /* 3 */
+#define SADB_AALG_MD5 3 /* Keyed MD5 */
+#define SADB_AALG_SHA 4 /* Keyed SHA */
+#define SADB_AALG_NULL 5 /* null authentication */
+#define SADB_AALG_MAX 6
+
+#define SADB_EALG_NONE 0
+#define SADB_EALG_DESCBC 1 /* 2 */
+#define SADB_EALG_3DESCBC 2 /* 3 */
+#define SADB_EALG_NULL 3 /* 11 */
+#define SADB_EALG_BLOWFISHCBC 4
+#define SADB_EALG_CAST128CBC 5
+#define SADB_EALG_RC5CBC 6
+#define SADB_EALG_MAX 7
+
+/*nonstandard */
+#define SADB_X_CALG_NONE 0
+#define SADB_X_CALG_OUI 1
+#define SADB_X_CALG_DEFLATE 2
+#define SADB_X_CALG_LZS 3
+
+#define SADB_IDENTTYPE_RESERVED 0
+#define SADB_IDENTTYPE_PREFIX 1
+#define SADB_IDENTTYPE_FQDN 2
+#define SADB_IDENTTYPE_USERFQDN 3
+#define SADB_X_IDENTTYPE_ADDR 4
+#define SADB_IDENTTYPE_MAX 4
+
+/* `flags' in sadb_sa structure holds followings */
+#define SADB_X_EXT_NONE 0x0000 /* i.e. new format. */
+#define SADB_X_EXT_OLD 0x0001 /* old format. */
+
+#define SADB_X_EXT_IV4B 0x0010 /* IV length of 4 bytes in use */
+#define SADB_X_EXT_DERIV 0x0020 /* DES derived */
+#define SADB_X_EXT_CYCSEQ 0x0040 /* allowing to cyclic sequence. */
+
+ /* three of followings are exclusive flags each them */
+#define SADB_X_EXT_PSEQ 0x0000 /* sequencial padding for ESP */
+#define SADB_X_EXT_PRAND 0x0100 /* random padding for ESP */
+#define SADB_X_EXT_PZERO 0x0200 /* zero padding for ESP */
+#define SADB_X_EXT_PMASK 0x0300 /* mask for padding flag */
+
+#define SADB_X_EXT_RAWCPI 0x0080 /* use well known CPI (IPComp) */
+
+#define SADB_KEY_FLAGS_MAX 0x0fff
+
+/* SPI size for PF_KEYv2 */
+#define PFKEY_SPI_SIZE sizeof(u_int32_t)
+
+/* Identifier for menber of lifetime structure */
+#define SADB_X_LIFETIME_ALLOCATIONS 0
+#define SADB_X_LIFETIME_BYTES 1
+#define SADB_X_LIFETIME_ADDTIME 2
+#define SADB_X_LIFETIME_USETIME 3
+
+/* The rate for SOFT lifetime against HARD one. */
+#define PFKEY_SOFT_LIFETIME_RATE 80
+
+/* Utilities */
+#define PFKEY_ALIGN8(a) (1 + (((a) - 1) | (8 - 1)))
+#define PFKEY_EXTLEN(msg) \
+ PFKEY_UNUNIT64(((struct sadb_ext *)(msg))->sadb_ext_len)
+#define PFKEY_ADDR_PREFIX(ext) \
+ (((struct sadb_address *)(ext))->sadb_address_prefixlen)
+#define PFKEY_ADDR_PROTO(ext) \
+ (((struct sadb_address *)(ext))->sadb_address_proto)
+#define PFKEY_ADDR_SADDR(ext) \
+ ((struct sockaddr *)((caddr_t)(ext) + sizeof(struct sadb_address)))
+
+/* in 64bits */
+#define PFKEY_UNUNIT64(a) ((a) << 3)
+#define PFKEY_UNIT64(a) ((a) >> 3)
+
+#ifndef KERNEL
+extern void pfkey_sadump(struct sadb_msg *m);
+extern void pfkey_spdump(struct sadb_msg *m);
+
+struct sockaddr;
+int ipsec_check_keylen __P((u_int supported, u_int alg_id, u_int keylen));
+u_int pfkey_set_softrate __P((u_int type, u_int rate));
+u_int pfkey_get_softrate __P((u_int type));
+int pfkey_send_getspi __P((int so, u_int satype, u_int mode,
+ struct sockaddr *src, struct sockaddr *dst,
+ u_int32_t min, u_int32_t max, u_int32_t seq));
+int pfkey_send_update __P((int so, u_int satype, u_int mode,
+ struct sockaddr *src, struct sockaddr *dst,
+ u_int32_t spi, u_int wsize, caddr_t keymat,
+ u_int e_type, u_int e_keylen, u_int a_type,
+ u_int a_keylen, u_int flags, u_int32_t l_alloc,
+ u_int64_t l_bytes, u_int64_t l_addtime,
+ u_int64_t l_usetime, u_int32_t seq));
+int pfkey_send_add __P((int so, u_int satype, u_int mode,
+ struct sockaddr *src, struct sockaddr *dst,
+ u_int32_t spi, u_int wsize, caddr_t keymat,
+ u_int e_type, u_int e_keylen, u_int a_type,
+ u_int a_keylen, u_int flags, u_int32_t l_alloc,
+ u_int64_t l_bytes, u_int64_t l_addtime,
+ u_int64_t l_usetime, u_int32_t seq));
+int pfkey_send_delete __P((int so, u_int satype, u_int mode,
+ struct sockaddr *src, struct sockaddr *dst,
+ u_int32_t spi));
+int pfkey_send_get __P((int so, u_int satype, u_int mode,
+ struct sockaddr *src, struct sockaddr *dst,
+ u_int32_t spi));
+int pfkey_send_register __P((int so, u_int satype));
+int pfkey_recv_register __P((int so));
+int pfkey_send_flush __P((int so, u_int satype));
+int pfkey_send_dump __P((int so, u_int satype));
+int pfkey_send_promisc_toggle __P((int so, int flag));
+int pfkey_send_spdadd __P((int so, struct sockaddr *src, u_int prefs,
+ struct sockaddr *dst, u_int prefd, u_int proto,
+ caddr_t policy, int policylen, u_int32_t seq));
+int pfkey_send_spddelete __P((int so, struct sockaddr *src, u_int prefs,
+ struct sockaddr *dst, u_int prefd, u_int proto, u_int32_t seq));
+int pfkey_send_spdflush __P((int so));
+int pfkey_send_spddump __P((int so));
+
+int pfkey_open __P((void));
+void pfkey_close __P((int so));
+struct sadb_msg *pfkey_recv __P((int so));
+int pfkey_send __P((int so, struct sadb_msg *msg, int len));
+int pfkey_align __P((struct sadb_msg *msg, caddr_t *mhp));
+int pfkey_check __P((caddr_t *mhp));
+
+#endif /*!KERNEL*/
+
+#endif /* __PFKEY_V2_H */
+
+#endif /* _NET_PFKEYV2_H_ */
diff --git a/sys/net/ppp_comp.h b/sys/net/ppp_comp.h
new file mode 100644
index 0000000..b777bfc
--- /dev/null
+++ b/sys/net/ppp_comp.h
@@ -0,0 +1,165 @@
+/*
+ * ppp_comp.h - Definitions for doing PPP packet compression.
+ *
+ * Copyright (c) 1994 The Australian National University.
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify, and distribute this software and its
+ * documentation is hereby granted, provided that the above copyright
+ * notice appears in all copies. This software is provided without any
+ * warranty, express or implied. The Australian National University
+ * makes no representations about the suitability of this software for
+ * any purpose.
+ *
+ * IN NO EVENT SHALL THE AUSTRALIAN NATIONAL UNIVERSITY BE LIABLE TO ANY
+ * PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
+ * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF
+ * THE AUSTRALIAN NATIONAL UNIVERSITY HAVE BEEN ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * THE AUSTRALIAN NATIONAL UNIVERSITY SPECIFICALLY DISCLAIMS ANY WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
+ * ON AN "AS IS" BASIS, AND THE AUSTRALIAN NATIONAL UNIVERSITY HAS NO
+ * OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS,
+ * OR MODIFICATIONS.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NET_PPP_COMP_H
+#define _NET_PPP_COMP_H
+
+/*
+ * The following symbols control whether we include code for
+ * various compression methods.
+ */
+#ifndef DO_BSD_COMPRESS
+#define DO_BSD_COMPRESS 1 /* by default, include BSD-Compress */
+#endif
+#ifndef DO_DEFLATE
+#define DO_DEFLATE 1 /* by default, include Deflate */
+#endif
+#define DO_PREDICTOR_1 0
+#define DO_PREDICTOR_2 0
+
+/*
+ * Structure giving methods for compression/decompression.
+ */
+#ifdef PACKETPTR
+struct compressor {
+ int compress_proto; /* CCP compression protocol number */
+
+ /* Allocate space for a compressor (transmit side) */
+ void *(*comp_alloc) __P((u_char *options, int opt_len));
+ /* Free space used by a compressor */
+ void (*comp_free) __P((void *state));
+ /* Initialize a compressor */
+ int (*comp_init) __P((void *state, u_char *options, int opt_len,
+ int unit, int hdrlen, int debug));
+ /* Reset a compressor */
+ void (*comp_reset) __P((void *state));
+ /* Compress a packet */
+ int (*compress) __P((void *state, PACKETPTR *mret,
+ PACKETPTR mp, int orig_len, int max_len));
+ /* Return compression statistics */
+ void (*comp_stat) __P((void *state, struct compstat *stats));
+
+ /* Allocate space for a decompressor (receive side) */
+ void *(*decomp_alloc) __P((u_char *options, int opt_len));
+ /* Free space used by a decompressor */
+ void (*decomp_free) __P((void *state));
+ /* Initialize a decompressor */
+ int (*decomp_init) __P((void *state, u_char *options, int opt_len,
+ int unit, int hdrlen, int mru, int debug));
+ /* Reset a decompressor */
+ void (*decomp_reset) __P((void *state));
+ /* Decompress a packet. */
+ int (*decompress) __P((void *state, PACKETPTR mp,
+ PACKETPTR *dmpp));
+ /* Update state for an incompressible packet received */
+ void (*incomp) __P((void *state, PACKETPTR mp));
+ /* Return decompression statistics */
+ void (*decomp_stat) __P((void *state, struct compstat *stats));
+};
+#endif /* PACKETPTR */
+
+/*
+ * Return values for decompress routine.
+ * We need to make these distinctions so that we can disable certain
+ * useful functionality, namely sending a CCP reset-request as a result
+ * of an error detected after decompression. This is to avoid infringing
+ * a patent held by Motorola.
+ * Don't you just lurve software patents.
+ */
+#define DECOMP_OK 0 /* everything went OK */
+#define DECOMP_ERROR 1 /* error detected before decomp. */
+#define DECOMP_FATALERROR 2 /* error detected after decomp. */
+
+/*
+ * CCP codes.
+ */
+#define CCP_CONFREQ 1
+#define CCP_CONFACK 2
+#define CCP_TERMREQ 5
+#define CCP_TERMACK 6
+#define CCP_RESETREQ 14
+#define CCP_RESETACK 15
+
+/*
+ * Max # bytes for a CCP option
+ */
+#define CCP_MAX_OPTION_LENGTH 32
+
+/*
+ * Parts of a CCP packet.
+ */
+#define CCP_CODE(dp) ((dp)[0])
+#define CCP_ID(dp) ((dp)[1])
+#define CCP_LENGTH(dp) (((dp)[2] << 8) + (dp)[3])
+#define CCP_HDRLEN 4
+
+#define CCP_OPT_CODE(dp) ((dp)[0])
+#define CCP_OPT_LENGTH(dp) ((dp)[1])
+#define CCP_OPT_MINLEN 2
+
+/*
+ * Definitions for BSD-Compress.
+ */
+#define CI_BSD_COMPRESS 21 /* config. option for BSD-Compress */
+#define CILEN_BSD_COMPRESS 3 /* length of config. option */
+
+/* Macros for handling the 3rd byte of the BSD-Compress config option. */
+#define BSD_NBITS(x) ((x) & 0x1F) /* number of bits requested */
+#define BSD_VERSION(x) ((x) >> 5) /* version of option format */
+#define BSD_CURRENT_VERSION 1 /* current version number */
+#define BSD_MAKE_OPT(v, n) (((v) << 5) | (n))
+
+#define BSD_MIN_BITS 9 /* smallest code size supported */
+#define BSD_MAX_BITS 15 /* largest code size supported */
+
+/*
+ * Definitions for Deflate.
+ */
+#define CI_DEFLATE 26 /* config option for Deflate */
+#define CI_DEFLATE_DRAFT 24 /* value used in original draft RFC */
+#define CILEN_DEFLATE 4 /* length of its config option */
+
+#define DEFLATE_MIN_SIZE 8
+#define DEFLATE_MAX_SIZE 15
+#define DEFLATE_METHOD_VAL 8
+#define DEFLATE_SIZE(x) (((x) >> 4) + DEFLATE_MIN_SIZE)
+#define DEFLATE_METHOD(x) ((x) & 0x0F)
+#define DEFLATE_MAKE_OPT(w) ((((w) - DEFLATE_MIN_SIZE) << 4) \
+ + DEFLATE_METHOD_VAL)
+#define DEFLATE_CHK_SEQUENCE 0
+
+/*
+ * Definitions for other, as yet unsupported, compression methods.
+ */
+#define CI_PREDICTOR_1 1 /* config option for Predictor-1 */
+#define CILEN_PREDICTOR_1 2 /* length of its config option */
+#define CI_PREDICTOR_2 2 /* config option for Predictor-2 */
+#define CILEN_PREDICTOR_2 2 /* length of its config option */
+
+#endif /* _NET_PPP_COMP_H */
diff --git a/sys/net/ppp_deflate.c b/sys/net/ppp_deflate.c
new file mode 100644
index 0000000..fdc0c64
--- /dev/null
+++ b/sys/net/ppp_deflate.c
@@ -0,0 +1,680 @@
+/* $FreeBSD$ */
+
+/*
+ * ppp_deflate.c - interface the zlib procedures for Deflate compression
+ * and decompression (as used by gzip) to the PPP code.
+ * This version is for use with mbufs on BSD-derived systems.
+ *
+ * Copyright (c) 1994 The Australian National University.
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify, and distribute this software and its
+ * documentation is hereby granted, provided that the above copyright
+ * notice appears in all copies. This software is provided without any
+ * warranty, express or implied. The Australian National University
+ * makes no representations about the suitability of this software for
+ * any purpose.
+ *
+ * IN NO EVENT SHALL THE AUSTRALIAN NATIONAL UNIVERSITY BE LIABLE TO ANY
+ * PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
+ * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF
+ * THE AUSTRALIAN NATIONAL UNIVERSITY HAS BEEN ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * THE AUSTRALIAN NATIONAL UNIVERSITY SPECIFICALLY DISCLAIMS ANY WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
+ * ON AN "AS IS" BASIS, AND THE AUSTRALIAN NATIONAL UNIVERSITY HAS NO
+ * OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS,
+ * OR MODIFICATIONS.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <net/ppp_defs.h>
+#include <net/zlib.h>
+
+#define PACKETPTR struct mbuf *
+#include <net/ppp_comp.h>
+
+#if DO_DEFLATE
+
+#define DEFLATE_DEBUG 1
+
+/*
+ * State for a Deflate (de)compressor.
+ */
+struct deflate_state {
+ int seqno;
+ int w_size;
+ int unit;
+ int hdrlen;
+ int mru;
+ int debug;
+ z_stream strm;
+ struct compstat stats;
+};
+
+#define DEFLATE_OVHD 2 /* Deflate overhead/packet */
+
+static void *z_alloc __P((void *, u_int items, u_int size));
+static void z_free __P((void *, void *ptr));
+static void *z_comp_alloc __P((u_char *options, int opt_len));
+static void *z_decomp_alloc __P((u_char *options, int opt_len));
+static void z_comp_free __P((void *state));
+static void z_decomp_free __P((void *state));
+static int z_comp_init __P((void *state, u_char *options, int opt_len,
+ int unit, int hdrlen, int debug));
+static int z_decomp_init __P((void *state, u_char *options, int opt_len,
+ int unit, int hdrlen, int mru, int debug));
+static int z_compress __P((void *state, struct mbuf **mret,
+ struct mbuf *mp, int slen, int maxolen));
+static void z_incomp __P((void *state, struct mbuf *dmsg));
+static int z_decompress __P((void *state, struct mbuf *cmp,
+ struct mbuf **dmpp));
+static void z_comp_reset __P((void *state));
+static void z_decomp_reset __P((void *state));
+static void z_comp_stats __P((void *state, struct compstat *stats));
+
+/*
+ * Procedures exported to if_ppp.c.
+ */
+struct compressor ppp_deflate = {
+ CI_DEFLATE, /* compress_proto */
+ z_comp_alloc, /* comp_alloc */
+ z_comp_free, /* comp_free */
+ z_comp_init, /* comp_init */
+ z_comp_reset, /* comp_reset */
+ z_compress, /* compress */
+ z_comp_stats, /* comp_stat */
+ z_decomp_alloc, /* decomp_alloc */
+ z_decomp_free, /* decomp_free */
+ z_decomp_init, /* decomp_init */
+ z_decomp_reset, /* decomp_reset */
+ z_decompress, /* decompress */
+ z_incomp, /* incomp */
+ z_comp_stats, /* decomp_stat */
+};
+
+struct compressor ppp_deflate_draft = {
+ CI_DEFLATE_DRAFT, /* compress_proto */
+ z_comp_alloc, /* comp_alloc */
+ z_comp_free, /* comp_free */
+ z_comp_init, /* comp_init */
+ z_comp_reset, /* comp_reset */
+ z_compress, /* compress */
+ z_comp_stats, /* comp_stat */
+ z_decomp_alloc, /* decomp_alloc */
+ z_decomp_free, /* decomp_free */
+ z_decomp_init, /* decomp_init */
+ z_decomp_reset, /* decomp_reset */
+ z_decompress, /* decompress */
+ z_incomp, /* incomp */
+ z_comp_stats, /* decomp_stat */
+};
+
+/*
+ * Space allocation and freeing routines for use by zlib routines.
+ */
+void *
+z_alloc(notused, items, size)
+ void *notused;
+ u_int items, size;
+{
+ void *ptr;
+
+ MALLOC(ptr, void *, items * size, M_DEVBUF, M_NOWAIT);
+ return ptr;
+}
+
+void
+z_free(notused, ptr)
+ void *notused;
+ void *ptr;
+{
+ FREE(ptr, M_DEVBUF);
+}
+
+/*
+ * Allocate space for a compressor.
+ */
+static void *
+z_comp_alloc(options, opt_len)
+ u_char *options;
+ int opt_len;
+{
+ struct deflate_state *state;
+ int w_size;
+
+ if (opt_len != CILEN_DEFLATE
+ || (options[0] != CI_DEFLATE && options[0] != CI_DEFLATE_DRAFT)
+ || options[1] != CILEN_DEFLATE
+ || DEFLATE_METHOD(options[2]) != DEFLATE_METHOD_VAL
+ || options[3] != DEFLATE_CHK_SEQUENCE)
+ return NULL;
+ w_size = DEFLATE_SIZE(options[2]);
+ if (w_size < DEFLATE_MIN_SIZE || w_size > DEFLATE_MAX_SIZE)
+ return NULL;
+
+ MALLOC(state, struct deflate_state *, sizeof(struct deflate_state),
+ M_DEVBUF, M_NOWAIT);
+ if (state == NULL)
+ return NULL;
+
+ state->strm.next_in = NULL;
+ state->strm.zalloc = z_alloc;
+ state->strm.zfree = z_free;
+ if (deflateInit2(&state->strm, Z_DEFAULT_COMPRESSION, DEFLATE_METHOD_VAL,
+ -w_size, 8, Z_DEFAULT_STRATEGY) != Z_OK) {
+ FREE(state, M_DEVBUF);
+ return NULL;
+ }
+
+ state->w_size = w_size;
+ bzero(&state->stats, sizeof(state->stats));
+ return (void *) state;
+}
+
+static void
+z_comp_free(arg)
+ void *arg;
+{
+ struct deflate_state *state = (struct deflate_state *) arg;
+
+ deflateEnd(&state->strm);
+ FREE(state, M_DEVBUF);
+}
+
+static int
+z_comp_init(arg, options, opt_len, unit, hdrlen, debug)
+ void *arg;
+ u_char *options;
+ int opt_len, unit, hdrlen, debug;
+{
+ struct deflate_state *state = (struct deflate_state *) arg;
+
+ if (opt_len < CILEN_DEFLATE
+ || (options[0] != CI_DEFLATE && options[0] != CI_DEFLATE_DRAFT)
+ || options[1] != CILEN_DEFLATE
+ || DEFLATE_METHOD(options[2]) != DEFLATE_METHOD_VAL
+ || DEFLATE_SIZE(options[2]) != state->w_size
+ || options[3] != DEFLATE_CHK_SEQUENCE)
+ return 0;
+
+ state->seqno = 0;
+ state->unit = unit;
+ state->hdrlen = hdrlen;
+ state->debug = debug;
+
+ deflateReset(&state->strm);
+
+ return 1;
+}
+
+static void
+z_comp_reset(arg)
+ void *arg;
+{
+ struct deflate_state *state = (struct deflate_state *) arg;
+
+ state->seqno = 0;
+ deflateReset(&state->strm);
+}
+
+int
+z_compress(arg, mret, mp, orig_len, maxolen)
+ void *arg;
+ struct mbuf **mret; /* compressed packet (out) */
+ struct mbuf *mp; /* uncompressed packet (in) */
+ int orig_len, maxolen;
+{
+ struct deflate_state *state = (struct deflate_state *) arg;
+ u_char *rptr, *wptr;
+ int proto, olen, wspace, r, flush;
+ struct mbuf *m;
+
+ /*
+ * Check that the protocol is in the range we handle.
+ */
+ rptr = mtod(mp, u_char *);
+ proto = PPP_PROTOCOL(rptr);
+ if (proto > 0x3fff || proto == 0xfd || proto == 0xfb) {
+ *mret = NULL;
+ return orig_len;
+ }
+
+ /* Allocate one mbuf initially. */
+ if (maxolen > orig_len)
+ maxolen = orig_len;
+ MGET(m, M_DONTWAIT, MT_DATA);
+ *mret = m;
+ if (m != NULL) {
+ m->m_len = 0;
+ if (maxolen + state->hdrlen > MLEN)
+ MCLGET(m, M_DONTWAIT);
+ wspace = M_TRAILINGSPACE(m);
+ if (state->hdrlen + PPP_HDRLEN + 2 < wspace) {
+ m->m_data += state->hdrlen;
+ wspace -= state->hdrlen;
+ }
+ wptr = mtod(m, u_char *);
+
+ /*
+ * Copy over the PPP header and store the 2-byte sequence number.
+ */
+ wptr[0] = PPP_ADDRESS(rptr);
+ wptr[1] = PPP_CONTROL(rptr);
+ wptr[2] = PPP_COMP >> 8;
+ wptr[3] = PPP_COMP;
+ wptr += PPP_HDRLEN;
+ wptr[0] = state->seqno >> 8;
+ wptr[1] = state->seqno;
+ wptr += 2;
+ state->strm.next_out = wptr;
+ state->strm.avail_out = wspace - (PPP_HDRLEN + 2);
+ } else {
+ state->strm.next_out = NULL;
+ state->strm.avail_out = 1000000;
+ wptr = NULL;
+ wspace = 0;
+ }
+ ++state->seqno;
+
+ rptr += (proto > 0xff)? 2: 3; /* skip 1st proto byte if 0 */
+ state->strm.next_in = rptr;
+ state->strm.avail_in = mtod(mp, u_char *) + mp->m_len - rptr;
+ mp = mp->m_next;
+ flush = (mp == NULL)? Z_PACKET_FLUSH: Z_NO_FLUSH;
+ olen = 0;
+ for (;;) {
+ r = deflate(&state->strm, flush);
+ if (r != Z_OK) {
+ printf("z_compress: deflate returned %d (%s)\n",
+ r, (state->strm.msg? state->strm.msg: ""));
+ break;
+ }
+ if (flush != Z_NO_FLUSH && state->strm.avail_out != 0)
+ break; /* all done */
+ if (state->strm.avail_in == 0 && mp != NULL) {
+ state->strm.next_in = mtod(mp, u_char *);
+ state->strm.avail_in = mp->m_len;
+ mp = mp->m_next;
+ if (mp == NULL)
+ flush = Z_PACKET_FLUSH;
+ }
+ if (state->strm.avail_out == 0) {
+ if (m != NULL) {
+ m->m_len = wspace;
+ olen += wspace;
+ MGET(m->m_next, M_DONTWAIT, MT_DATA);
+ m = m->m_next;
+ if (m != NULL) {
+ m->m_len = 0;
+ if (maxolen - olen > MLEN)
+ MCLGET(m, M_DONTWAIT);
+ state->strm.next_out = mtod(m, u_char *);
+ state->strm.avail_out = wspace = M_TRAILINGSPACE(m);
+ }
+ }
+ if (m == NULL) {
+ state->strm.next_out = NULL;
+ state->strm.avail_out = 1000000;
+ }
+ }
+ }
+ if (m != NULL)
+ olen += (m->m_len = wspace - state->strm.avail_out);
+
+ /*
+ * See if we managed to reduce the size of the packet.
+ */
+ if (m != NULL && olen < orig_len) {
+ state->stats.comp_bytes += olen;
+ state->stats.comp_packets++;
+ } else {
+ if (*mret != NULL) {
+ m_freem(*mret);
+ *mret = NULL;
+ }
+ state->stats.inc_bytes += orig_len;
+ state->stats.inc_packets++;
+ olen = orig_len;
+ }
+ state->stats.unc_bytes += orig_len;
+ state->stats.unc_packets++;
+
+ return olen;
+}
+
+static void
+z_comp_stats(arg, stats)
+ void *arg;
+ struct compstat *stats;
+{
+ struct deflate_state *state = (struct deflate_state *) arg;
+ u_int out;
+
+ *stats = state->stats;
+ stats->ratio = stats->unc_bytes;
+ out = stats->comp_bytes + stats->inc_bytes;
+ if (stats->ratio <= 0x7ffffff)
+ stats->ratio <<= 8;
+ else
+ out >>= 8;
+ if (out != 0)
+ stats->ratio /= out;
+}
+
+/*
+ * Allocate space for a decompressor.
+ */
+static void *
+z_decomp_alloc(options, opt_len)
+ u_char *options;
+ int opt_len;
+{
+ struct deflate_state *state;
+ int w_size;
+
+ if (opt_len != CILEN_DEFLATE
+ || (options[0] != CI_DEFLATE && options[0] != CI_DEFLATE_DRAFT)
+ || options[1] != CILEN_DEFLATE
+ || DEFLATE_METHOD(options[2]) != DEFLATE_METHOD_VAL
+ || options[3] != DEFLATE_CHK_SEQUENCE)
+ return NULL;
+ w_size = DEFLATE_SIZE(options[2]);
+ if (w_size < DEFLATE_MIN_SIZE || w_size > DEFLATE_MAX_SIZE)
+ return NULL;
+
+ MALLOC(state, struct deflate_state *, sizeof(struct deflate_state),
+ M_DEVBUF, M_NOWAIT);
+ if (state == NULL)
+ return NULL;
+
+ state->strm.next_out = NULL;
+ state->strm.zalloc = z_alloc;
+ state->strm.zfree = z_free;
+ if (inflateInit2(&state->strm, -w_size) != Z_OK) {
+ FREE(state, M_DEVBUF);
+ return NULL;
+ }
+
+ state->w_size = w_size;
+ bzero(&state->stats, sizeof(state->stats));
+ return (void *) state;
+}
+
+static void
+z_decomp_free(arg)
+ void *arg;
+{
+ struct deflate_state *state = (struct deflate_state *) arg;
+
+ inflateEnd(&state->strm);
+ FREE(state, M_DEVBUF);
+}
+
+static int
+z_decomp_init(arg, options, opt_len, unit, hdrlen, mru, debug)
+ void *arg;
+ u_char *options;
+ int opt_len, unit, hdrlen, mru, debug;
+{
+ struct deflate_state *state = (struct deflate_state *) arg;
+
+ if (opt_len < CILEN_DEFLATE
+ || (options[0] != CI_DEFLATE && options[0] != CI_DEFLATE_DRAFT)
+ || options[1] != CILEN_DEFLATE
+ || DEFLATE_METHOD(options[2]) != DEFLATE_METHOD_VAL
+ || DEFLATE_SIZE(options[2]) != state->w_size
+ || options[3] != DEFLATE_CHK_SEQUENCE)
+ return 0;
+
+ state->seqno = 0;
+ state->unit = unit;
+ state->hdrlen = hdrlen;
+ state->debug = debug;
+ state->mru = mru;
+
+ inflateReset(&state->strm);
+
+ return 1;
+}
+
+static void
+z_decomp_reset(arg)
+ void *arg;
+{
+ struct deflate_state *state = (struct deflate_state *) arg;
+
+ state->seqno = 0;
+ inflateReset(&state->strm);
+}
+
+/*
+ * Decompress a Deflate-compressed packet.
+ *
+ * Because of patent problems, we return DECOMP_ERROR for errors
+ * found by inspecting the input data and for system problems, but
+ * DECOMP_FATALERROR for any errors which could possibly be said to
+ * be being detected "after" decompression. For DECOMP_ERROR,
+ * we can issue a CCP reset-request; for DECOMP_FATALERROR, we may be
+ * infringing a patent of Motorola's if we do, so we take CCP down
+ * instead.
+ *
+ * Given that the frame has the correct sequence number and a good FCS,
+ * errors such as invalid codes in the input most likely indicate a
+ * bug, so we return DECOMP_FATALERROR for them in order to turn off
+ * compression, even though they are detected by inspecting the input.
+ */
+int
+z_decompress(arg, mi, mop)
+ void *arg;
+ struct mbuf *mi, **mop;
+{
+ struct deflate_state *state = (struct deflate_state *) arg;
+ struct mbuf *mo, *mo_head;
+ u_char *rptr, *wptr;
+ int rlen, olen, ospace;
+ int seq, i, flush, r, decode_proto;
+ u_char hdr[PPP_HDRLEN + DEFLATE_OVHD];
+
+ *mop = NULL;
+ rptr = mtod(mi, u_char *);
+ rlen = mi->m_len;
+ for (i = 0; i < PPP_HDRLEN + DEFLATE_OVHD; ++i) {
+ while (rlen <= 0) {
+ mi = mi->m_next;
+ if (mi == NULL)
+ return DECOMP_ERROR;
+ rptr = mtod(mi, u_char *);
+ rlen = mi->m_len;
+ }
+ hdr[i] = *rptr++;
+ --rlen;
+ }
+
+ /* Check the sequence number. */
+ seq = (hdr[PPP_HDRLEN] << 8) + hdr[PPP_HDRLEN+1];
+ if (seq != state->seqno) {
+ if (state->debug)
+ printf("z_decompress%d: bad seq # %d, expected %d\n",
+ state->unit, seq, state->seqno);
+ return DECOMP_ERROR;
+ }
+ ++state->seqno;
+
+ /* Allocate an output mbuf. */
+ MGETHDR(mo, M_DONTWAIT, MT_DATA);
+ if (mo == NULL)
+ return DECOMP_ERROR;
+ mo_head = mo;
+ mo->m_len = 0;
+ mo->m_next = NULL;
+ MCLGET(mo, M_DONTWAIT);
+ ospace = M_TRAILINGSPACE(mo);
+ if (state->hdrlen + PPP_HDRLEN < ospace) {
+ mo->m_data += state->hdrlen;
+ ospace -= state->hdrlen;
+ }
+
+ /*
+ * Fill in the first part of the PPP header. The protocol field
+ * comes from the decompressed data.
+ */
+ wptr = mtod(mo, u_char *);
+ wptr[0] = PPP_ADDRESS(hdr);
+ wptr[1] = PPP_CONTROL(hdr);
+ wptr[2] = 0;
+
+ /*
+ * Set up to call inflate. We set avail_out to 1 initially so we can
+ * look at the first byte of the output and decide whether we have
+ * a 1-byte or 2-byte protocol field.
+ */
+ state->strm.next_in = rptr;
+ state->strm.avail_in = rlen;
+ mi = mi->m_next;
+ flush = (mi == NULL)? Z_PACKET_FLUSH: Z_NO_FLUSH;
+ rlen += PPP_HDRLEN + DEFLATE_OVHD;
+ state->strm.next_out = wptr + 3;
+ state->strm.avail_out = 1;
+ decode_proto = 1;
+ olen = PPP_HDRLEN;
+
+ /*
+ * Call inflate, supplying more input or output as needed.
+ */
+ for (;;) {
+ r = inflate(&state->strm, flush);
+ if (r != Z_OK) {
+#if !DEFLATE_DEBUG
+ if (state->debug)
+#endif
+ printf("z_decompress%d: inflate returned %d (%s)\n",
+ state->unit, r, (state->strm.msg? state->strm.msg: ""));
+ m_freem(mo_head);
+ return DECOMP_FATALERROR;
+ }
+ if (flush != Z_NO_FLUSH && state->strm.avail_out != 0)
+ break; /* all done */
+ if (state->strm.avail_in == 0 && mi != NULL) {
+ state->strm.next_in = mtod(mi, u_char *);
+ state->strm.avail_in = mi->m_len;
+ rlen += mi->m_len;
+ mi = mi->m_next;
+ if (mi == NULL)
+ flush = Z_PACKET_FLUSH;
+ }
+ if (state->strm.avail_out == 0) {
+ if (decode_proto) {
+ state->strm.avail_out = ospace - PPP_HDRLEN;
+ if ((wptr[3] & 1) == 0) {
+ /* 2-byte protocol field */
+ wptr[2] = wptr[3];
+ --state->strm.next_out;
+ ++state->strm.avail_out;
+ --olen;
+ }
+ decode_proto = 0;
+ } else {
+ mo->m_len = ospace;
+ olen += ospace;
+ MGET(mo->m_next, M_DONTWAIT, MT_DATA);
+ mo = mo->m_next;
+ if (mo == NULL) {
+ m_freem(mo_head);
+ return DECOMP_ERROR;
+ }
+ MCLGET(mo, M_DONTWAIT);
+ state->strm.next_out = mtod(mo, u_char *);
+ state->strm.avail_out = ospace = M_TRAILINGSPACE(mo);
+ }
+ }
+ }
+ if (decode_proto) {
+ m_freem(mo_head);
+ return DECOMP_ERROR;
+ }
+ olen += (mo->m_len = ospace - state->strm.avail_out);
+#if DEFLATE_DEBUG
+ if (state->debug && olen > state->mru + PPP_HDRLEN)
+ printf("ppp_deflate%d: exceeded mru (%d > %d)\n",
+ state->unit, olen, state->mru + PPP_HDRLEN);
+#endif
+
+ state->stats.unc_bytes += olen;
+ state->stats.unc_packets++;
+ state->stats.comp_bytes += rlen;
+ state->stats.comp_packets++;
+
+ *mop = mo_head;
+ return DECOMP_OK;
+}
+
+/*
+ * Incompressible data has arrived - add it to the history.
+ */
+static void
+z_incomp(arg, mi)
+ void *arg;
+ struct mbuf *mi;
+{
+ struct deflate_state *state = (struct deflate_state *) arg;
+ u_char *rptr;
+ int rlen, proto, r;
+
+ /*
+ * Check that the protocol is one we handle.
+ */
+ rptr = mtod(mi, u_char *);
+ proto = PPP_PROTOCOL(rptr);
+ if (proto > 0x3fff || proto == 0xfd || proto == 0xfb)
+ return;
+
+ ++state->seqno;
+
+ /*
+ * Iterate through the mbufs, adding the characters in them
+ * to the decompressor's history. For the first mbuf, we start
+ * at the either the 1st or 2nd byte of the protocol field,
+ * depending on whether the protocol value is compressible.
+ */
+ rlen = mi->m_len;
+ state->strm.next_in = rptr + 3;
+ state->strm.avail_in = rlen - 3;
+ if (proto > 0xff) {
+ --state->strm.next_in;
+ ++state->strm.avail_in;
+ }
+ for (;;) {
+ r = inflateIncomp(&state->strm);
+ if (r != Z_OK) {
+ /* gak! */
+#if !DEFLATE_DEBUG
+ if (state->debug)
+#endif
+ printf("z_incomp%d: inflateIncomp returned %d (%s)\n",
+ state->unit, r, (state->strm.msg? state->strm.msg: ""));
+ return;
+ }
+ mi = mi->m_next;
+ if (mi == NULL)
+ break;
+ state->strm.next_in = mtod(mi, u_char *);
+ state->strm.avail_in = mi->m_len;
+ rlen += mi->m_len;
+ }
+
+ /*
+ * Update stats.
+ */
+ state->stats.inc_bytes += rlen;
+ state->stats.inc_packets++;
+ state->stats.unc_bytes += rlen;
+ state->stats.unc_packets++;
+}
+
+#endif /* DO_DEFLATE */
diff --git a/sys/net/ppp_defs.h b/sys/net/ppp_defs.h
new file mode 100644
index 0000000..ac86be2
--- /dev/null
+++ b/sys/net/ppp_defs.h
@@ -0,0 +1,155 @@
+/*
+ * ppp_defs.h - PPP definitions.
+ *
+ * Copyright (c) 1994 The Australian National University.
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify, and distribute this software and its
+ * documentation is hereby granted, provided that the above copyright
+ * notice appears in all copies. This software is provided without any
+ * warranty, express or implied. The Australian National University
+ * makes no representations about the suitability of this software for
+ * any purpose.
+ *
+ * IN NO EVENT SHALL THE AUSTRALIAN NATIONAL UNIVERSITY BE LIABLE TO ANY
+ * PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
+ * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF
+ * THE AUSTRALIAN NATIONAL UNIVERSITY HAVE BEEN ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * THE AUSTRALIAN NATIONAL UNIVERSITY SPECIFICALLY DISCLAIMS ANY WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
+ * ON AN "AS IS" BASIS, AND THE AUSTRALIAN NATIONAL UNIVERSITY HAS NO
+ * OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS,
+ * OR MODIFICATIONS.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _PPP_DEFS_H_
+#define _PPP_DEFS_H_
+
+/*
+ * The basic PPP frame.
+ */
+#define PPP_HDRLEN 4 /* octets for standard ppp header */
+#define PPP_FCSLEN 2 /* octets for FCS */
+#define PPP_MRU 1500 /* default MRU = max length of info field */
+
+#define PPP_ADDRESS(p) (((u_char *)(p))[0])
+#define PPP_CONTROL(p) (((u_char *)(p))[1])
+#define PPP_PROTOCOL(p) ((((u_char *)(p))[2] << 8) + ((u_char *)(p))[3])
+
+/*
+ * Significant octet values.
+ */
+#define PPP_ALLSTATIONS 0xff /* All-Stations broadcast address */
+#define PPP_UI 0x03 /* Unnumbered Information */
+#define PPP_FLAG 0x7e /* Flag Sequence */
+#define PPP_ESCAPE 0x7d /* Asynchronous Control Escape */
+#define PPP_TRANS 0x20 /* Asynchronous transparency modifier */
+
+/*
+ * Protocol field values.
+ */
+#define PPP_IP 0x21 /* Internet Protocol */
+#define PPP_XNS 0x25 /* Xerox NS */
+#define PPP_AT 0x29 /* AppleTalk Protocol */
+#define PPP_IPX 0x2b /* IPX Datagram (RFC1552) */
+#define PPP_VJC_COMP 0x2d /* VJ compressed TCP */
+#define PPP_VJC_UNCOMP 0x2f /* VJ uncompressed TCP */
+#define PPP_COMP 0xfd /* compressed packet */
+#define PPP_IPCP 0x8021 /* IP Control Protocol */
+#define PPP_ATCP 0x8029 /* AppleTalk Control Protocol */
+#define PPP_IPXCP 0x802b /* IPX Control Protocol (RFC1552) */
+#define PPP_CCP 0x80fd /* Compression Control Protocol */
+#define PPP_LCP 0xc021 /* Link Control Protocol */
+#define PPP_PAP 0xc023 /* Password Authentication Protocol */
+#define PPP_LQR 0xc025 /* Link Quality Report protocol */
+#define PPP_CHAP 0xc223 /* Cryptographic Handshake Auth. Protocol */
+#define PPP_CBCP 0xc029 /* Callback Control Protocol */
+
+/*
+ * Values for FCS calculations.
+ */
+#define PPP_INITFCS 0xffff /* Initial FCS value */
+#define PPP_GOODFCS 0xf0b8 /* Good final FCS value */
+#define PPP_FCS(fcs, c) (((fcs) >> 8) ^ fcstab[((fcs) ^ (c)) & 0xff])
+
+/*
+ * Extended asyncmap - allows any character to be escaped.
+ */
+typedef u_int32_t ext_accm[8];
+
+/*
+ * What to do with network protocol (NP) packets.
+ */
+enum NPmode {
+ NPMODE_PASS, /* pass the packet through */
+ NPMODE_DROP, /* silently drop the packet */
+ NPMODE_ERROR, /* return an error */
+ NPMODE_QUEUE /* save it up for later. */
+};
+
+/*
+ * Statistics.
+ */
+struct pppstat {
+ unsigned int ppp_ibytes; /* bytes received */
+ unsigned int ppp_ipackets; /* packets received */
+ unsigned int ppp_ierrors; /* receive errors */
+ unsigned int ppp_obytes; /* bytes sent */
+ unsigned int ppp_opackets; /* packets sent */
+ unsigned int ppp_oerrors; /* transmit errors */
+};
+
+struct vjstat {
+ unsigned int vjs_packets; /* outbound packets */
+ unsigned int vjs_compressed; /* outbound compressed packets */
+ unsigned int vjs_searches; /* searches for connection state */
+ unsigned int vjs_misses; /* times couldn't find conn. state */
+ unsigned int vjs_uncompressedin; /* inbound uncompressed packets */
+ unsigned int vjs_compressedin; /* inbound compressed packets */
+ unsigned int vjs_errorin; /* inbound unknown type packets */
+ unsigned int vjs_tossed; /* inbound packets tossed because of error */
+};
+
+struct ppp_stats {
+ struct pppstat p; /* basic PPP statistics */
+ struct vjstat vj; /* VJ header compression statistics */
+};
+
+struct compstat {
+ unsigned int unc_bytes; /* total uncompressed bytes */
+ unsigned int unc_packets; /* total uncompressed packets */
+ unsigned int comp_bytes; /* compressed bytes */
+ unsigned int comp_packets; /* compressed packets */
+ unsigned int inc_bytes; /* incompressible bytes */
+ unsigned int inc_packets; /* incompressible packets */
+ unsigned int ratio; /* recent compression ratio << 8 */
+};
+
+struct ppp_comp_stats {
+ struct compstat c; /* packet compression statistics */
+ struct compstat d; /* packet decompression statistics */
+};
+
+/*
+ * The following structure records the time in seconds since
+ * the last NP packet was sent or received.
+ */
+struct ppp_idle {
+ time_t xmit_idle; /* time since last NP packet sent */
+ time_t recv_idle; /* time since last NP packet received */
+};
+
+#ifndef __P
+#ifdef __STDC__
+#define __P(x) x
+#else
+#define __P(x) ()
+#endif
+#endif
+
+#endif /* _PPP_DEFS_H_ */
diff --git a/sys/net/ppp_tty.c b/sys/net/ppp_tty.c
new file mode 100644
index 0000000..cb181c5
--- /dev/null
+++ b/sys/net/ppp_tty.c
@@ -0,0 +1,1128 @@
+/*
+ * ppp_tty.c - Point-to-Point Protocol (PPP) driver for asynchronous
+ * tty devices.
+ *
+ * Copyright (c) 1989 Carnegie Mellon University.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms are permitted
+ * provided that the above copyright notice and this paragraph are
+ * duplicated in all such forms and that any documentation,
+ * advertising materials, and other materials related to such
+ * distribution and use acknowledge that the software was developed
+ * by Carnegie Mellon University. The name of the
+ * University may not be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Drew D. Perkins
+ * Carnegie Mellon University
+ * 4910 Forbes Ave.
+ * Pittsburgh, PA 15213
+ * (412) 268-8576
+ * ddp@andrew.cmu.edu
+ *
+ * Based on:
+ * @(#)if_sl.c 7.6.1.2 (Berkeley) 2/15/89
+ *
+ * Copyright (c) 1987 Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms are permitted
+ * provided that the above copyright notice and this paragraph are
+ * duplicated in all such forms and that any documentation,
+ * advertising materials, and other materials related to such
+ * distribution and use acknowledge that the software was developed
+ * by the University of California, Berkeley. The name of the
+ * University may not be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Serial Line interface
+ *
+ * Rick Adams
+ * Center for Seismic Studies
+ * 1300 N 17th Street, Suite 1450
+ * Arlington, Virginia 22209
+ * (703)276-7900
+ * rick@seismo.ARPA
+ * seismo!rick
+ *
+ * Pounded on heavily by Chris Torek (chris@mimsy.umd.edu, umcp-cs!chris).
+ * Converted to 4.3BSD Beta by Chris Torek.
+ * Other changes made at Berkeley, based in part on code by Kirk Smith.
+ *
+ * Converted to 4.3BSD+ 386BSD by Brad Parker (brad@cayman.com)
+ * Added VJ tcp header compression; more unified ioctls
+ *
+ * Extensively modified by Paul Mackerras (paulus@cs.anu.edu.au).
+ * Cleaned up a lot of the mbuf-related code to fix bugs that
+ * caused system crashes and packet corruption. Changed pppstart
+ * so that it doesn't just give up with a "collision" if the whole
+ * packet doesn't fit in the output ring buffer.
+ *
+ * Added priority queueing for interactive IP packets, following
+ * the model of if_sl.c, plus hooks for bpf.
+ * Paul Mackerras (paulus@cs.anu.edu.au).
+ */
+
+/* $FreeBSD$ */
+
+#include "ppp.h"
+#if NPPP > 0
+
+#include "opt_ppp.h" /* XXX for ppp_defs.h */
+
+#define VJC /* XXX for ppp_defs.h */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/mbuf.h>
+#include <sys/dkstat.h>
+#include <sys/socket.h>
+#include <sys/fcntl.h>
+#include <sys/tty.h>
+#include <sys/conf.h>
+#include <sys/uio.h>
+#include <sys/vnode.h>
+
+#ifdef __i386__
+#include <i386/isa/intr_machdep.h>
+#endif
+
+#ifdef PPP_FILTER
+#include <net/bpf.h>
+#endif
+#include <net/if_ppp.h>
+#include <net/if_pppvar.h>
+
+static int pppopen __P((dev_t dev, struct tty *tp));
+static int pppclose __P((struct tty *tp, int flag));
+static int pppread __P((struct tty *tp, struct uio *uio, int flag));
+static int pppwrite __P((struct tty *tp, struct uio *uio, int flag));
+static int ppptioctl __P((struct tty *tp, u_long cmd, caddr_t data, int flag,
+ struct proc *));
+static int pppinput __P((int c, struct tty *tp));
+static int pppstart __P((struct tty *tp));
+
+static u_short pppfcs __P((u_short fcs, u_char *cp, int len));
+static void pppasyncstart __P((struct ppp_softc *));
+static void pppasyncctlp __P((struct ppp_softc *));
+static void pppasyncrelinq __P((struct ppp_softc *));
+static void pppasyncsetmtu __P((struct ppp_softc *));
+static void ppp_timeout __P((void *));
+static void pppgetm __P((struct ppp_softc *sc));
+static void ppplogchar __P((struct ppp_softc *, int));
+
+/* XXX called from if_ppp.c - layering violation */
+void pppasyncattach __P((void *));
+
+/*
+ * Some useful mbuf macros not in mbuf.h.
+ */
+#define M_IS_CLUSTER(m) ((m)->m_flags & M_EXT)
+
+#define M_DATASTART(m) \
+ (M_IS_CLUSTER(m) ? (m)->m_ext.ext_buf : \
+ (m)->m_flags & M_PKTHDR ? (m)->m_pktdat : (m)->m_dat)
+
+#define M_DATASIZE(m) \
+ (M_IS_CLUSTER(m) ? (m)->m_ext.ext_size : \
+ (m)->m_flags & M_PKTHDR ? MHLEN: MLEN)
+
+/*
+ * Does c need to be escaped?
+ */
+#define ESCAPE_P(c) (sc->sc_asyncmap[(c) >> 5] & (1 << ((c) & 0x1F)))
+
+/*
+ * Procedures for using an async tty interface for PPP.
+ */
+
+/* This is a FreeBSD-2.X kernel. */
+#define CCOUNT(q) ((q)->c_cc)
+#define PPP_LOWAT 100 /* Process more output when < LOWAT on queue */
+#define PPP_HIWAT 400 /* Don't start a new packet if HIWAT on que */
+
+/*
+ * Define the PPP line discipline.
+ */
+
+static struct linesw pppdisc = {
+ pppopen, pppclose, pppread, pppwrite,
+ ppptioctl, pppinput, pppstart, ttymodem,
+ PPP_FLAG
+};
+
+void
+pppasyncattach(dummy)
+ void *dummy;
+{
+#ifdef __i386__
+ int s;
+
+ s = splhigh();
+
+ /*
+ * Make sure that the soft net "engine" cannot run while spltty code is
+ * active. The if_ppp.c code can walk down into b_to_q etc, and it is
+ * bad if the tty system was in the middle of another b_to_q...
+ */
+ tty_imask |= softnet_imask; /* spltty() block spl[soft]net() */
+ net_imask |= softtty_imask; /* splimp() block splsofttty() */
+ net_imask |= tty_imask; /* splimp() block spltty() */
+ update_intr_masks();
+
+ splx(s);
+ if ( bootverbose )
+ printf("new masks: bio %x, tty %x, net %x\n",
+ bio_imask, tty_imask, net_imask);
+#endif
+
+ /* register line discipline */
+ linesw[PPPDISC] = pppdisc;
+}
+
+/*
+ * Line specific open routine for async tty devices.
+ * Attach the given tty to the first available ppp unit.
+ * Called from device open routine or ttioctl() at >= splsofttty()
+ */
+/* ARGSUSED */
+static int
+pppopen(dev, tp)
+ dev_t dev;
+ register struct tty *tp;
+{
+ struct proc *p = curproc; /* XXX */
+ register struct ppp_softc *sc;
+ int error, s;
+
+ if ((error = suser(p)) != 0)
+ return (error);
+
+ s = spltty();
+
+ if (tp->t_line == PPPDISC) {
+ sc = (struct ppp_softc *) tp->t_sc;
+ if (sc != NULL && sc->sc_devp == (void *) tp) {
+ splx(s);
+ return (0);
+ }
+ }
+
+ if ((sc = pppalloc(p->p_pid)) == NULL) {
+ splx(s);
+ return ENXIO;
+ }
+
+ if (sc->sc_relinq)
+ (*sc->sc_relinq)(sc); /* get previous owner to relinquish the unit */
+
+ sc->sc_ilen = 0;
+ sc->sc_m = NULL;
+ bzero(sc->sc_asyncmap, sizeof(sc->sc_asyncmap));
+ sc->sc_asyncmap[0] = 0xffffffff;
+ sc->sc_asyncmap[3] = 0x60000000;
+ sc->sc_rasyncmap = 0;
+ sc->sc_devp = (void *) tp;
+ sc->sc_start = pppasyncstart;
+ sc->sc_ctlp = pppasyncctlp;
+ sc->sc_relinq = pppasyncrelinq;
+ sc->sc_setmtu = pppasyncsetmtu;
+ sc->sc_outm = NULL;
+ pppgetm(sc);
+ sc->sc_if.if_flags |= IFF_RUNNING;
+ getmicrotime(&sc->sc_if.if_lastchange);
+ sc->sc_if.if_baudrate = tp->t_ospeed;
+
+ tp->t_sc = (caddr_t) sc;
+ ttyflush(tp, FREAD | FWRITE);
+
+ /*
+ * Pre-allocate cblocks to the "just right" amount. The 1 byte t_canq
+ * allocation helps avoid the need for select and/or FIONREAD.
+ * We also pass 1 byte tokens through t_canq...
+ */
+ clist_alloc_cblocks(&tp->t_canq, 1, 1);
+ clist_alloc_cblocks(&tp->t_outq, sc->sc_if.if_mtu + PPP_HIWAT,
+ sc->sc_if.if_mtu + PPP_HIWAT);
+ clist_alloc_cblocks(&tp->t_rawq, 0, 0);
+
+ splx(s);
+
+ return (0);
+}
+
+/*
+ * Line specific close routine, called from device close routine
+ * and from ttioctl at >= splsofttty().
+ * Detach the tty from the ppp unit.
+ * Mimics part of ttyclose().
+ */
+static int
+pppclose(tp, flag)
+ struct tty *tp;
+ int flag;
+{
+ register struct ppp_softc *sc;
+ int s;
+
+ s = spltty();
+ ttyflush(tp, FREAD | FWRITE);
+ clist_free_cblocks(&tp->t_canq);
+ clist_free_cblocks(&tp->t_outq);
+ tp->t_line = 0;
+ sc = (struct ppp_softc *) tp->t_sc;
+ if (sc != NULL) {
+ tp->t_sc = NULL;
+ if (tp == (struct tty *) sc->sc_devp) {
+ pppasyncrelinq(sc);
+ pppdealloc(sc);
+ }
+ }
+ splx(s);
+ return 0;
+}
+
+/*
+ * Relinquish the interface unit to another device.
+ */
+static void
+pppasyncrelinq(sc)
+ struct ppp_softc *sc;
+{
+ int s;
+
+ s = spltty();
+ if (sc->sc_outm) {
+ m_freem(sc->sc_outm);
+ sc->sc_outm = NULL;
+ }
+ if (sc->sc_m) {
+ m_freem(sc->sc_m);
+ sc->sc_m = NULL;
+ }
+ if (sc->sc_flags & SC_TIMEOUT) {
+ untimeout(ppp_timeout, (void *) sc, sc->sc_ch);
+ sc->sc_flags &= ~SC_TIMEOUT;
+ }
+ splx(s);
+}
+
+/*
+ * This gets called from the upper layer to notify a mtu change
+ */
+static void
+pppasyncsetmtu(sc)
+register struct ppp_softc *sc;
+{
+ register struct tty *tp = (struct tty *) sc->sc_devp;
+ int s;
+
+ s = spltty();
+ if (tp != NULL)
+ clist_alloc_cblocks(&tp->t_outq, sc->sc_if.if_mtu + PPP_HIWAT,
+ sc->sc_if.if_mtu + PPP_HIWAT);
+ splx(s);
+}
+
+/*
+ * Line specific (tty) read routine.
+ * called at zero spl from the device driver in the response to user-level
+ * reads on the tty file descriptor (ie: pppd).
+ */
+static int
+pppread(tp, uio, flag)
+ register struct tty *tp;
+ struct uio *uio;
+ int flag;
+{
+ register struct ppp_softc *sc = (struct ppp_softc *)tp->t_sc;
+ struct mbuf *m, *m0;
+ register int s;
+ int error = 0;
+
+ if (sc == NULL)
+ return 0;
+ /*
+ * Loop waiting for input, checking that nothing disasterous
+ * happens in the meantime.
+ */
+ s = spltty();
+ for (;;) {
+ if (tp != (struct tty *) sc->sc_devp || tp->t_line != PPPDISC) {
+ splx(s);
+ return 0;
+ }
+ if (sc->sc_inq.ifq_head != NULL)
+ break;
+ if ((tp->t_state & TS_CONNECTED) == 0) {
+ splx(s);
+ return 0; /* end of file */
+ }
+ if (tp->t_state & TS_ASYNC || flag & IO_NDELAY) {
+ splx(s);
+ return (EWOULDBLOCK);
+ }
+ error = ttysleep(tp, TSA_HUP_OR_INPUT(tp), TTIPRI | PCATCH, "pppin", 0);
+ if (error) {
+ splx(s);
+ return error;
+ }
+ }
+
+ /* Pull place-holder byte out of canonical queue */
+ getc(&tp->t_canq);
+
+ /* Get the packet from the input queue */
+ IF_DEQUEUE(&sc->sc_inq, m0);
+ splx(s);
+
+ for (m = m0; m && uio->uio_resid; m = m->m_next)
+ if ((error = uiomove(mtod(m, u_char *), m->m_len, uio)) != 0)
+ break;
+ m_freem(m0);
+ return (error);
+}
+
+/*
+ * Line specific (tty) write routine.
+ * called at zero spl from the device driver in the response to user-level
+ * writes on the tty file descriptor (ie: pppd).
+ */
+static int
+pppwrite(tp, uio, flag)
+ register struct tty *tp;
+ struct uio *uio;
+ int flag;
+{
+ register struct ppp_softc *sc = (struct ppp_softc *)tp->t_sc;
+ struct mbuf *m, *m0, **mp;
+ struct sockaddr dst;
+ int len, error, s;
+
+ if ((tp->t_state & TS_CONNECTED) == 0)
+ return 0; /* wrote 0 bytes */
+ if (tp->t_line != PPPDISC)
+ return (EINVAL);
+ if (sc == NULL || tp != (struct tty *) sc->sc_devp)
+ return EIO;
+ if (uio->uio_resid > sc->sc_if.if_mtu + PPP_HDRLEN ||
+ uio->uio_resid < PPP_HDRLEN)
+ return (EMSGSIZE);
+
+ s = spltty();
+ for (mp = &m0; uio->uio_resid; mp = &m->m_next) {
+ MGET(m, M_WAIT, MT_DATA);
+ if ((*mp = m) == NULL) {
+ m_freem(m0);
+ splx(s);
+ return (ENOBUFS);
+ }
+ m->m_len = 0;
+ if (uio->uio_resid >= MCLBYTES / 2)
+ MCLGET(m, M_DONTWAIT);
+ len = M_TRAILINGSPACE(m);
+ if (len > uio->uio_resid)
+ len = uio->uio_resid;
+ if ((error = uiomove(mtod(m, u_char *), len, uio)) != 0) {
+ m_freem(m0);
+ splx(s);
+ return (error);
+ }
+ m->m_len = len;
+ }
+ dst.sa_family = AF_UNSPEC;
+ bcopy(mtod(m0, u_char *), dst.sa_data, PPP_HDRLEN);
+ m0->m_data += PPP_HDRLEN;
+ m0->m_len -= PPP_HDRLEN;
+
+ /* call the upper layer to "transmit" it... */
+ error = pppoutput(&sc->sc_if, m0, &dst, (struct rtentry *)0);
+ splx(s);
+ return (error);
+}
+
+/*
+ * Line specific (tty) ioctl routine.
+ * This discipline requires that tty device drivers call
+ * the line specific l_ioctl routine from their ioctl routines.
+ */
+/* ARGSUSED */
+static int
+ppptioctl(tp, cmd, data, flag, p)
+ struct tty *tp;
+ u_long cmd;
+ caddr_t data;
+ int flag;
+ struct proc *p;
+{
+ struct ppp_softc *sc = (struct ppp_softc *) tp->t_sc;
+ int error, s;
+
+ if (sc == NULL || tp != (struct tty *) sc->sc_devp)
+ return (ENOIOCTL);
+
+ error = 0;
+ switch (cmd) {
+ case PPPIOCSASYNCMAP:
+ if ((error = suser(p)) != 0)
+ break;
+ sc->sc_asyncmap[0] = *(u_int *)data;
+ break;
+
+ case PPPIOCGASYNCMAP:
+ *(u_int *)data = sc->sc_asyncmap[0];
+ break;
+
+ case PPPIOCSRASYNCMAP:
+ if ((error = suser(p)) != 0)
+ break;
+ sc->sc_rasyncmap = *(u_int *)data;
+ break;
+
+ case PPPIOCGRASYNCMAP:
+ *(u_int *)data = sc->sc_rasyncmap;
+ break;
+
+ case PPPIOCSXASYNCMAP:
+ if ((error = suser(p)) != 0)
+ break;
+ s = spltty();
+ bcopy(data, sc->sc_asyncmap, sizeof(sc->sc_asyncmap));
+ sc->sc_asyncmap[1] = 0; /* mustn't escape 0x20 - 0x3f */
+ sc->sc_asyncmap[2] &= ~0x40000000; /* mustn't escape 0x5e */
+ sc->sc_asyncmap[3] |= 0x60000000; /* must escape 0x7d, 0x7e */
+ splx(s);
+ break;
+
+ case PPPIOCGXASYNCMAP:
+ bcopy(sc->sc_asyncmap, data, sizeof(sc->sc_asyncmap));
+ break;
+
+ default:
+ error = pppioctl(sc, cmd, data, flag, p);
+ if (error == 0 && cmd == PPPIOCSMRU)
+ pppgetm(sc);
+ }
+
+ return error;
+}
+
+/*
+ * FCS lookup table as calculated by genfcstab.
+ */
+static u_short fcstab[256] = {
+ 0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf,
+ 0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7,
+ 0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e,
+ 0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876,
+ 0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd,
+ 0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5,
+ 0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c,
+ 0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974,
+ 0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb,
+ 0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3,
+ 0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a,
+ 0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72,
+ 0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9,
+ 0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1,
+ 0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738,
+ 0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70,
+ 0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7,
+ 0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff,
+ 0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036,
+ 0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e,
+ 0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5,
+ 0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd,
+ 0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134,
+ 0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c,
+ 0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3,
+ 0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb,
+ 0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232,
+ 0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a,
+ 0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1,
+ 0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9,
+ 0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330,
+ 0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78
+};
+
+/*
+ * Calculate a new FCS given the current FCS and the new data.
+ */
+static u_short
+pppfcs(u_short fcs, u_char *cp, int len)
+{
+ while (len--)
+ fcs = PPP_FCS(fcs, *cp++);
+ return (fcs);
+}
+
+/*
+ * This gets called at splsoftnet from if_ppp.c at various times
+ * when there is data ready to be sent.
+ */
+static void
+pppasyncstart(sc)
+ register struct ppp_softc *sc;
+{
+ register struct tty *tp = (struct tty *) sc->sc_devp;
+ register struct mbuf *m;
+ register int len;
+ register u_char *start, *stop, *cp;
+ int n, ndone, done, idle;
+ struct mbuf *m2;
+ int s;
+
+ idle = 0;
+ /* XXX assumes atomic access to *tp although we're not at spltty(). */
+ while (CCOUNT(&tp->t_outq) < PPP_HIWAT) {
+ /*
+ * See if we have an existing packet partly sent.
+ * If not, get a new packet and start sending it.
+ */
+ m = sc->sc_outm;
+ if (m == NULL) {
+ /*
+ * Get another packet to be sent.
+ */
+ m = ppp_dequeue(sc);
+ if (m == NULL) {
+ idle = 1;
+ break;
+ }
+
+ /*
+ * The extra PPP_FLAG will start up a new packet, and thus
+ * will flush any accumulated garbage. We do this whenever
+ * the line may have been idle for some time.
+ */
+ /* XXX as above. */
+ if (CCOUNT(&tp->t_outq) == 0) {
+ ++sc->sc_stats.ppp_obytes;
+ (void) putc(PPP_FLAG, &tp->t_outq);
+ }
+
+ /* Calculate the FCS for the first mbuf's worth. */
+ sc->sc_outfcs = pppfcs(PPP_INITFCS, mtod(m, u_char *), m->m_len);
+ getmicrotime(&sc->sc_if.if_lastchange);
+ }
+
+ for (;;) {
+ start = mtod(m, u_char *);
+ len = m->m_len;
+ stop = start + len;
+ while (len > 0) {
+ /*
+ * Find out how many bytes in the string we can
+ * handle without doing something special.
+ */
+ for (cp = start; cp < stop; cp++)
+ if (ESCAPE_P(*cp))
+ break;
+ n = cp - start;
+ if (n) {
+ /* NetBSD (0.9 or later), 4.3-Reno or similar. */
+ ndone = n - b_to_q(start, n, &tp->t_outq);
+ len -= ndone;
+ start += ndone;
+ sc->sc_stats.ppp_obytes += ndone;
+
+ if (ndone < n)
+ break; /* packet doesn't fit */
+ }
+ /*
+ * If there are characters left in the mbuf,
+ * the first one must be special.
+ * Put it out in a different form.
+ */
+ if (len) {
+ s = spltty();
+ if (putc(PPP_ESCAPE, &tp->t_outq)) {
+ splx(s);
+ break;
+ }
+ if (putc(*start ^ PPP_TRANS, &tp->t_outq)) {
+ (void) unputc(&tp->t_outq);
+ splx(s);
+ break;
+ }
+ splx(s);
+ sc->sc_stats.ppp_obytes += 2;
+ start++;
+ len--;
+ }
+ }
+
+ /*
+ * If we didn't empty this mbuf, remember where we're up to.
+ * If we emptied the last mbuf, try to add the FCS and closing
+ * flag, and if we can't, leave sc_outm pointing to m, but with
+ * m->m_len == 0, to remind us to output the FCS and flag later.
+ */
+ done = len == 0;
+ if (done && m->m_next == NULL) {
+ u_char *p, *q;
+ int c;
+ u_char endseq[8];
+
+ /*
+ * We may have to escape the bytes in the FCS.
+ */
+ p = endseq;
+ c = ~sc->sc_outfcs & 0xFF;
+ if (ESCAPE_P(c)) {
+ *p++ = PPP_ESCAPE;
+ *p++ = c ^ PPP_TRANS;
+ } else
+ *p++ = c;
+ c = (~sc->sc_outfcs >> 8) & 0xFF;
+ if (ESCAPE_P(c)) {
+ *p++ = PPP_ESCAPE;
+ *p++ = c ^ PPP_TRANS;
+ } else
+ *p++ = c;
+ *p++ = PPP_FLAG;
+
+ /*
+ * Try to output the FCS and flag. If the bytes
+ * don't all fit, back out.
+ */
+ s = spltty();
+ for (q = endseq; q < p; ++q)
+ if (putc(*q, &tp->t_outq)) {
+ done = 0;
+ for (; q > endseq; --q)
+ unputc(&tp->t_outq);
+ break;
+ }
+ splx(s);
+ if (done)
+ sc->sc_stats.ppp_obytes += q - endseq;
+ }
+
+ if (!done) {
+ /* remember where we got to */
+ m->m_data = start;
+ m->m_len = len;
+ break;
+ }
+
+ /* Finished with this mbuf; free it and move on. */
+ MFREE(m, m2);
+ m = m2;
+ if (m == NULL) {
+ /* Finished a packet */
+ break;
+ }
+ sc->sc_outfcs = pppfcs(sc->sc_outfcs, mtod(m, u_char *), m->m_len);
+ }
+
+ /*
+ * If m == NULL, we have finished a packet.
+ * If m != NULL, we've either done as much work this time
+ * as we need to, or else we've filled up the output queue.
+ */
+ sc->sc_outm = m;
+ if (m)
+ break;
+ }
+
+ /* Call pppstart to start output again if necessary. */
+ s = spltty();
+ pppstart(tp);
+
+ /*
+ * This timeout is needed for operation on a pseudo-tty,
+ * because the pty code doesn't call pppstart after it has
+ * drained the t_outq.
+ */
+ if (!idle && (sc->sc_flags & SC_TIMEOUT) == 0) {
+ sc->sc_ch = timeout(ppp_timeout, (void *) sc, 1);
+ sc->sc_flags |= SC_TIMEOUT;
+ }
+
+ splx(s);
+}
+
+/*
+ * This gets called when a received packet is placed on
+ * the inq, at splsoftnet. The pppd daemon is to be woken up to do a read().
+ */
+static void
+pppasyncctlp(sc)
+ struct ppp_softc *sc;
+{
+ struct tty *tp;
+ int s;
+
+ /* Put a placeholder byte in canq for ttselect()/ttnread(). */
+ s = spltty();
+ tp = (struct tty *) sc->sc_devp;
+ putc(0, &tp->t_canq);
+ ttwakeup(tp);
+ splx(s);
+}
+
+/*
+ * Start output on async tty interface. If the transmit queue
+ * has drained sufficiently, arrange for pppasyncstart to be
+ * called later at splsoftnet.
+ * Called at spltty or higher.
+ */
+int
+pppstart(tp)
+ register struct tty *tp;
+{
+ register struct ppp_softc *sc = (struct ppp_softc *) tp->t_sc;
+
+ /*
+ * Call output process whether or not there is any output.
+ * We are being called in lieu of ttstart and must do what it would.
+ */
+ if (tp->t_oproc != NULL)
+ (*tp->t_oproc)(tp);
+
+ /*
+ * If the transmit queue has drained and the tty has not hung up
+ * or been disconnected from the ppp unit, then tell if_ppp.c that
+ * we need more output.
+ */
+ if (CCOUNT(&tp->t_outq) < PPP_LOWAT
+ && !((tp->t_state & TS_CONNECTED) == 0)
+ && sc != NULL && tp == (struct tty *) sc->sc_devp) {
+ ppp_restart(sc);
+ }
+
+ return 0;
+}
+
+/*
+ * Timeout routine - try to start some more output.
+ */
+static void
+ppp_timeout(x)
+ void *x;
+{
+ struct ppp_softc *sc = (struct ppp_softc *) x;
+ struct tty *tp = (struct tty *) sc->sc_devp;
+ int s;
+
+ s = spltty();
+ sc->sc_flags &= ~SC_TIMEOUT;
+ pppstart(tp);
+ splx(s);
+}
+
+/*
+ * Allocate enough mbuf to handle current MRU.
+ */
+static void
+pppgetm(sc)
+ register struct ppp_softc *sc;
+{
+ struct mbuf *m, **mp;
+ int len;
+
+ mp = &sc->sc_m;
+ for (len = sc->sc_mru + PPP_HDRLEN + PPP_FCSLEN; len > 0; ){
+ if ((m = *mp) == NULL) {
+ MGETHDR(m, M_DONTWAIT, MT_DATA);
+ if (m == NULL)
+ break;
+ *mp = m;
+ MCLGET(m, M_DONTWAIT);
+ }
+ len -= M_DATASIZE(m);
+ mp = &m->m_next;
+ }
+}
+
+/*
+ * tty interface receiver interrupt.
+ */
+static unsigned paritytab[8] = {
+ 0x96696996, 0x69969669, 0x69969669, 0x96696996,
+ 0x69969669, 0x96696996, 0x96696996, 0x69969669
+};
+
+/*
+ * Called when character is available from device driver.
+ * Only guaranteed to be at splsofttty() or spltty()
+ * This is safe to be called while the upper half's netisr is preempted.
+ */
+static int
+pppinput(c, tp)
+ int c;
+ register struct tty *tp;
+{
+ register struct ppp_softc *sc;
+ struct mbuf *m;
+ int ilen, s;
+
+ sc = (struct ppp_softc *) tp->t_sc;
+ if (sc == NULL || tp != (struct tty *) sc->sc_devp)
+ return 0;
+
+ ++tk_nin;
+ ++sc->sc_stats.ppp_ibytes;
+
+ if ((tp->t_state & TS_CONNECTED) == 0) {
+ if (sc->sc_flags & SC_DEBUG)
+ printf("ppp%d: no carrier\n", sc->sc_if.if_unit);
+ goto flush;
+ }
+
+ if (c & TTY_ERRORMASK) {
+ /* framing error or overrun on this char - abort packet */
+ if (sc->sc_flags & SC_DEBUG)
+ printf("ppp%d: line error %x\n", sc->sc_if.if_unit,
+ c & TTY_ERRORMASK);
+ goto flush;
+ }
+
+ c &= TTY_CHARMASK;
+
+ /*
+ * Handle software flow control of output.
+ */
+ if (tp->t_iflag & IXON) {
+ if (c == tp->t_cc[VSTOP] && tp->t_cc[VSTOP] != _POSIX_VDISABLE) {
+ if ((tp->t_state & TS_TTSTOP) == 0) {
+ tp->t_state |= TS_TTSTOP;
+ tp->t_stop(tp, 0);
+ }
+ return 0;
+ }
+ if (c == tp->t_cc[VSTART] && tp->t_cc[VSTART] != _POSIX_VDISABLE) {
+ tp->t_state &= ~TS_TTSTOP;
+ if (tp->t_oproc != NULL)
+ (*tp->t_oproc)(tp);
+ return 0;
+ }
+ }
+
+ s = spltty();
+ if (c & 0x80)
+ sc->sc_flags |= SC_RCV_B7_1;
+ else
+ sc->sc_flags |= SC_RCV_B7_0;
+ if (paritytab[c >> 5] & (1 << (c & 0x1F)))
+ sc->sc_flags |= SC_RCV_ODDP;
+ else
+ sc->sc_flags |= SC_RCV_EVNP;
+ splx(s);
+
+ if (sc->sc_flags & SC_LOG_RAWIN)
+ ppplogchar(sc, c);
+
+ if (c == PPP_FLAG) {
+ ilen = sc->sc_ilen;
+ sc->sc_ilen = 0;
+
+ if (sc->sc_rawin_count > 0)
+ ppplogchar(sc, -1);
+
+ /*
+ * If SC_ESCAPED is set, then we've seen the packet
+ * abort sequence "}~".
+ */
+ if (sc->sc_flags & (SC_FLUSH | SC_ESCAPED)
+ || (ilen > 0 && sc->sc_fcs != PPP_GOODFCS)) {
+ s = spltty();
+ sc->sc_flags |= SC_PKTLOST; /* note the dropped packet */
+ if ((sc->sc_flags & (SC_FLUSH | SC_ESCAPED)) == 0){
+ if (sc->sc_flags & SC_DEBUG)
+ printf("ppp%d: bad fcs %x, pkt len %d\n",
+ sc->sc_if.if_unit, sc->sc_fcs, ilen);
+ sc->sc_if.if_ierrors++;
+ sc->sc_stats.ppp_ierrors++;
+ } else
+ sc->sc_flags &= ~(SC_FLUSH | SC_ESCAPED);
+ splx(s);
+ return 0;
+ }
+
+ if (ilen < PPP_HDRLEN + PPP_FCSLEN) {
+ if (ilen) {
+ if (sc->sc_flags & SC_DEBUG)
+ printf("ppp%d: too short (%d)\n", sc->sc_if.if_unit, ilen);
+ s = spltty();
+ sc->sc_if.if_ierrors++;
+ sc->sc_stats.ppp_ierrors++;
+ sc->sc_flags |= SC_PKTLOST;
+ splx(s);
+ }
+ return 0;
+ }
+
+ /*
+ * Remove FCS trailer. Somewhat painful...
+ */
+ ilen -= 2;
+ if (--sc->sc_mc->m_len == 0) {
+ for (m = sc->sc_m; m->m_next != sc->sc_mc; m = m->m_next)
+ ;
+ sc->sc_mc = m;
+ }
+ sc->sc_mc->m_len--;
+
+ /* excise this mbuf chain */
+ m = sc->sc_m;
+ sc->sc_m = sc->sc_mc->m_next;
+ sc->sc_mc->m_next = NULL;
+
+ ppppktin(sc, m, sc->sc_flags & SC_PKTLOST);
+ if (sc->sc_flags & SC_PKTLOST) {
+ s = spltty();
+ sc->sc_flags &= ~SC_PKTLOST;
+ splx(s);
+ }
+
+ pppgetm(sc);
+ return 0;
+ }
+
+ if (sc->sc_flags & SC_FLUSH) {
+ if (sc->sc_flags & SC_LOG_FLUSH)
+ ppplogchar(sc, c);
+ return 0;
+ }
+
+ if (c < 0x20 && (sc->sc_rasyncmap & (1 << c)))
+ return 0;
+
+ s = spltty();
+ if (sc->sc_flags & SC_ESCAPED) {
+ sc->sc_flags &= ~SC_ESCAPED;
+ c ^= PPP_TRANS;
+ } else if (c == PPP_ESCAPE) {
+ sc->sc_flags |= SC_ESCAPED;
+ splx(s);
+ return 0;
+ }
+ splx(s);
+
+ /*
+ * Initialize buffer on first octet received.
+ * First octet could be address or protocol (when compressing
+ * address/control).
+ * Second octet is control.
+ * Third octet is first or second (when compressing protocol)
+ * octet of protocol.
+ * Fourth octet is second octet of protocol.
+ */
+ if (sc->sc_ilen == 0) {
+ /* reset the first input mbuf */
+ if (sc->sc_m == NULL) {
+ pppgetm(sc);
+ if (sc->sc_m == NULL) {
+ if (sc->sc_flags & SC_DEBUG)
+ printf("ppp%d: no input mbufs!\n", sc->sc_if.if_unit);
+ goto flush;
+ }
+ }
+ m = sc->sc_m;
+ m->m_len = 0;
+ m->m_data = M_DATASTART(sc->sc_m);
+ sc->sc_mc = m;
+ sc->sc_mp = mtod(m, char *);
+ sc->sc_fcs = PPP_INITFCS;
+ if (c != PPP_ALLSTATIONS) {
+ if (sc->sc_flags & SC_REJ_COMP_AC) {
+ if (sc->sc_flags & SC_DEBUG)
+ printf("ppp%d: garbage received: 0x%x (need 0xFF)\n",
+ sc->sc_if.if_unit, c);
+ goto flush;
+ }
+ *sc->sc_mp++ = PPP_ALLSTATIONS;
+ *sc->sc_mp++ = PPP_UI;
+ sc->sc_ilen += 2;
+ m->m_len += 2;
+ }
+ }
+ if (sc->sc_ilen == 1 && c != PPP_UI) {
+ if (sc->sc_flags & SC_DEBUG)
+ printf("ppp%d: missing UI (0x3), got 0x%x\n",
+ sc->sc_if.if_unit, c);
+ goto flush;
+ }
+ if (sc->sc_ilen == 2 && (c & 1) == 1) {
+ /* a compressed protocol */
+ *sc->sc_mp++ = 0;
+ sc->sc_ilen++;
+ sc->sc_mc->m_len++;
+ }
+ if (sc->sc_ilen == 3 && (c & 1) == 0) {
+ if (sc->sc_flags & SC_DEBUG)
+ printf("ppp%d: bad protocol %x\n", sc->sc_if.if_unit,
+ (sc->sc_mp[-1] << 8) + c);
+ goto flush;
+ }
+
+ /* packet beyond configured mru? */
+ if (++sc->sc_ilen > sc->sc_mru + PPP_HDRLEN + PPP_FCSLEN) {
+ if (sc->sc_flags & SC_DEBUG)
+ printf("ppp%d: packet too big\n", sc->sc_if.if_unit);
+ goto flush;
+ }
+
+ /* is this mbuf full? */
+ m = sc->sc_mc;
+ if (M_TRAILINGSPACE(m) <= 0) {
+ if (m->m_next == NULL) {
+ pppgetm(sc);
+ if (m->m_next == NULL) {
+ if (sc->sc_flags & SC_DEBUG)
+ printf("ppp%d: too few input mbufs!\n", sc->sc_if.if_unit);
+ goto flush;
+ }
+ }
+ sc->sc_mc = m = m->m_next;
+ m->m_len = 0;
+ m->m_data = M_DATASTART(m);
+ sc->sc_mp = mtod(m, char *);
+ }
+
+ ++m->m_len;
+ *sc->sc_mp++ = c;
+ sc->sc_fcs = PPP_FCS(sc->sc_fcs, c);
+ return 0;
+
+ flush:
+ if (!(sc->sc_flags & SC_FLUSH)) {
+ s = spltty();
+ sc->sc_if.if_ierrors++;
+ sc->sc_stats.ppp_ierrors++;
+ sc->sc_flags |= SC_FLUSH;
+ splx(s);
+ if (sc->sc_flags & SC_LOG_FLUSH)
+ ppplogchar(sc, c);
+ }
+ return 0;
+}
+
+#define MAX_DUMP_BYTES 128
+
+static void
+ppplogchar(sc, c)
+ struct ppp_softc *sc;
+ int c;
+{
+ if (c >= 0)
+ sc->sc_rawin[sc->sc_rawin_count++] = c;
+ if (sc->sc_rawin_count >= sizeof(sc->sc_rawin)
+ || (c < 0 && sc->sc_rawin_count > 0)) {
+ printf("ppp%d input: %*D", sc->sc_if.if_unit,
+ sc->sc_rawin_count, sc->sc_rawin, " ");
+ sc->sc_rawin_count = 0;
+ }
+}
+
+#endif /* NPPP > 0 */
diff --git a/sys/net/radix.c b/sys/net/radix.c
new file mode 100644
index 0000000..55887eb
--- /dev/null
+++ b/sys/net/radix.c
@@ -0,0 +1,1028 @@
+/*
+ * Copyright (c) 1988, 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)radix.c 8.4 (Berkeley) 11/2/94
+ * $FreeBSD$
+ */
+
+/*
+ * Routines to build and maintain radix trees for routing lookups.
+ */
+#ifndef _RADIX_H_
+#include <sys/param.h>
+#ifdef KERNEL
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#define M_DONTWAIT M_NOWAIT
+#include <sys/domain.h>
+#else
+#include <stdlib.h>
+#endif
+#include <sys/syslog.h>
+#include <net/radix.h>
+#endif
+
+static int rn_walktree_from __P((struct radix_node_head *h, void *a,
+ void *m, walktree_f_t *f, void *w));
+static int rn_walktree __P((struct radix_node_head *, walktree_f_t *, void *));
+static struct radix_node
+ *rn_insert __P((void *, struct radix_node_head *, int *,
+ struct radix_node [2])),
+ *rn_newpair __P((void *, int, struct radix_node[2])),
+ *rn_search __P((void *, struct radix_node *)),
+ *rn_search_m __P((void *, struct radix_node *, void *));
+
+static int max_keylen;
+static struct radix_mask *rn_mkfreelist;
+static struct radix_node_head *mask_rnhead;
+static char *addmask_key;
+static char normal_chars[] = {0, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe, -1};
+static char *rn_zeros, *rn_ones;
+
+#define rn_masktop (mask_rnhead->rnh_treetop)
+#undef Bcmp
+#define Bcmp(a, b, l) (l == 0 ? 0 : bcmp((caddr_t)(a), (caddr_t)(b), (u_long)l))
+
+static int rn_lexobetter __P((void *m_arg, void *n_arg));
+static struct radix_mask *
+ rn_new_radix_mask __P((struct radix_node *tt,
+ struct radix_mask *next));
+static int rn_satsifies_leaf __P((char *trial, struct radix_node *leaf,
+ int skip));
+
+/*
+ * The data structure for the keys is a radix tree with one way
+ * branching removed. The index rn_b at an internal node n represents a bit
+ * position to be tested. The tree is arranged so that all descendants
+ * of a node n have keys whose bits all agree up to position rn_b - 1.
+ * (We say the index of n is rn_b.)
+ *
+ * There is at least one descendant which has a one bit at position rn_b,
+ * and at least one with a zero there.
+ *
+ * A route is determined by a pair of key and mask. We require that the
+ * bit-wise logical and of the key and mask to be the key.
+ * We define the index of a route to associated with the mask to be
+ * the first bit number in the mask where 0 occurs (with bit number 0
+ * representing the highest order bit).
+ *
+ * We say a mask is normal if every bit is 0, past the index of the mask.
+ * If a node n has a descendant (k, m) with index(m) == index(n) == rn_b,
+ * and m is a normal mask, then the route applies to every descendant of n.
+ * If the index(m) < rn_b, this implies the trailing last few bits of k
+ * before bit b are all 0, (and hence consequently true of every descendant
+ * of n), so the route applies to all descendants of the node as well.
+ *
+ * Similar logic shows that a non-normal mask m such that
+ * index(m) <= index(n) could potentially apply to many children of n.
+ * Thus, for each non-host route, we attach its mask to a list at an internal
+ * node as high in the tree as we can go.
+ *
+ * The present version of the code makes use of normal routes in short-
+ * circuiting an explict mask and compare operation when testing whether
+ * a key satisfies a normal route, and also in remembering the unique leaf
+ * that governs a subtree.
+ */
+
+static struct radix_node *
+rn_search(v_arg, head)
+ void *v_arg;
+ struct radix_node *head;
+{
+ register struct radix_node *x;
+ register caddr_t v;
+
+ for (x = head, v = v_arg; x->rn_b >= 0;) {
+ if (x->rn_bmask & v[x->rn_off])
+ x = x->rn_r;
+ else
+ x = x->rn_l;
+ }
+ return (x);
+}
+
+static struct radix_node *
+rn_search_m(v_arg, head, m_arg)
+ struct radix_node *head;
+ void *v_arg, *m_arg;
+{
+ register struct radix_node *x;
+ register caddr_t v = v_arg, m = m_arg;
+
+ for (x = head; x->rn_b >= 0;) {
+ if ((x->rn_bmask & m[x->rn_off]) &&
+ (x->rn_bmask & v[x->rn_off]))
+ x = x->rn_r;
+ else
+ x = x->rn_l;
+ }
+ return x;
+}
+
+int
+rn_refines(m_arg, n_arg)
+ void *m_arg, *n_arg;
+{
+ register caddr_t m = m_arg, n = n_arg;
+ register caddr_t lim, lim2 = lim = n + *(u_char *)n;
+ int longer = (*(u_char *)n++) - (int)(*(u_char *)m++);
+ int masks_are_equal = 1;
+
+ if (longer > 0)
+ lim -= longer;
+ while (n < lim) {
+ if (*n & ~(*m))
+ return 0;
+ if (*n++ != *m++)
+ masks_are_equal = 0;
+ }
+ while (n < lim2)
+ if (*n++)
+ return 0;
+ if (masks_are_equal && (longer < 0))
+ for (lim2 = m - longer; m < lim2; )
+ if (*m++)
+ return 1;
+ return (!masks_are_equal);
+}
+
+struct radix_node *
+rn_lookup(v_arg, m_arg, head)
+ void *v_arg, *m_arg;
+ struct radix_node_head *head;
+{
+ register struct radix_node *x;
+ caddr_t netmask = 0;
+
+ if (m_arg) {
+ if ((x = rn_addmask(m_arg, 1, head->rnh_treetop->rn_off)) == 0)
+ return (0);
+ netmask = x->rn_key;
+ }
+ x = rn_match(v_arg, head);
+ if (x && netmask) {
+ while (x && x->rn_mask != netmask)
+ x = x->rn_dupedkey;
+ }
+ return x;
+}
+
+static int
+rn_satsifies_leaf(trial, leaf, skip)
+ char *trial;
+ register struct radix_node *leaf;
+ int skip;
+{
+ register char *cp = trial, *cp2 = leaf->rn_key, *cp3 = leaf->rn_mask;
+ char *cplim;
+ int length = min(*(u_char *)cp, *(u_char *)cp2);
+
+ if (cp3 == 0)
+ cp3 = rn_ones;
+ else
+ length = min(length, *(u_char *)cp3);
+ cplim = cp + length; cp3 += skip; cp2 += skip;
+ for (cp += skip; cp < cplim; cp++, cp2++, cp3++)
+ if ((*cp ^ *cp2) & *cp3)
+ return 0;
+ return 1;
+}
+
+struct radix_node *
+rn_match(v_arg, head)
+ void *v_arg;
+ struct radix_node_head *head;
+{
+ caddr_t v = v_arg;
+ register struct radix_node *t = head->rnh_treetop, *x;
+ register caddr_t cp = v, cp2;
+ caddr_t cplim;
+ struct radix_node *saved_t, *top = t;
+ int off = t->rn_off, vlen = *(u_char *)cp, matched_off;
+ register int test, b, rn_b;
+
+ /*
+ * Open code rn_search(v, top) to avoid overhead of extra
+ * subroutine call.
+ */
+ for (; t->rn_b >= 0; ) {
+ if (t->rn_bmask & cp[t->rn_off])
+ t = t->rn_r;
+ else
+ t = t->rn_l;
+ }
+ /*
+ * See if we match exactly as a host destination
+ * or at least learn how many bits match, for normal mask finesse.
+ *
+ * It doesn't hurt us to limit how many bytes to check
+ * to the length of the mask, since if it matches we had a genuine
+ * match and the leaf we have is the most specific one anyway;
+ * if it didn't match with a shorter length it would fail
+ * with a long one. This wins big for class B&C netmasks which
+ * are probably the most common case...
+ */
+ if (t->rn_mask)
+ vlen = *(u_char *)t->rn_mask;
+ cp += off; cp2 = t->rn_key + off; cplim = v + vlen;
+ for (; cp < cplim; cp++, cp2++)
+ if (*cp != *cp2)
+ goto on1;
+ /*
+ * This extra grot is in case we are explicitly asked
+ * to look up the default. Ugh!
+ *
+ * Never return the root node itself, it seems to cause a
+ * lot of confusion.
+ */
+ if (t->rn_flags & RNF_ROOT)
+ t = t->rn_dupedkey;
+ return t;
+on1:
+ test = (*cp ^ *cp2) & 0xff; /* find first bit that differs */
+ for (b = 7; (test >>= 1) > 0;)
+ b--;
+ matched_off = cp - v;
+ b += matched_off << 3;
+ rn_b = -1 - b;
+ /*
+ * If there is a host route in a duped-key chain, it will be first.
+ */
+ if ((saved_t = t)->rn_mask == 0)
+ t = t->rn_dupedkey;
+ for (; t; t = t->rn_dupedkey)
+ /*
+ * Even if we don't match exactly as a host,
+ * we may match if the leaf we wound up at is
+ * a route to a net.
+ */
+ if (t->rn_flags & RNF_NORMAL) {
+ if (rn_b <= t->rn_b)
+ return t;
+ } else if (rn_satsifies_leaf(v, t, matched_off))
+ return t;
+ t = saved_t;
+ /* start searching up the tree */
+ do {
+ register struct radix_mask *m;
+ t = t->rn_p;
+ m = t->rn_mklist;
+ if (m) {
+ /*
+ * If non-contiguous masks ever become important
+ * we can restore the masking and open coding of
+ * the search and satisfaction test and put the
+ * calculation of "off" back before the "do".
+ */
+ do {
+ if (m->rm_flags & RNF_NORMAL) {
+ if (rn_b <= m->rm_b)
+ return (m->rm_leaf);
+ } else {
+ off = min(t->rn_off, matched_off);
+ x = rn_search_m(v, t, m->rm_mask);
+ while (x && x->rn_mask != m->rm_mask)
+ x = x->rn_dupedkey;
+ if (x && rn_satsifies_leaf(v, x, off))
+ return x;
+ }
+ m = m->rm_mklist;
+ } while (m);
+ }
+ } while (t != top);
+ return 0;
+}
+
+#ifdef RN_DEBUG
+int rn_nodenum;
+struct radix_node *rn_clist;
+int rn_saveinfo;
+int rn_debug = 1;
+#endif
+
+static struct radix_node *
+rn_newpair(v, b, nodes)
+ void *v;
+ int b;
+ struct radix_node nodes[2];
+{
+ register struct radix_node *tt = nodes, *t = tt + 1;
+ t->rn_b = b; t->rn_bmask = 0x80 >> (b & 7);
+ t->rn_l = tt; t->rn_off = b >> 3;
+ tt->rn_b = -1; tt->rn_key = (caddr_t)v; tt->rn_p = t;
+ tt->rn_flags = t->rn_flags = RNF_ACTIVE;
+#ifdef RN_DEBUG
+ tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++;
+ tt->rn_twin = t; tt->rn_ybro = rn_clist; rn_clist = tt;
+#endif
+ return t;
+}
+
+static struct radix_node *
+rn_insert(v_arg, head, dupentry, nodes)
+ void *v_arg;
+ struct radix_node_head *head;
+ int *dupentry;
+ struct radix_node nodes[2];
+{
+ caddr_t v = v_arg;
+ struct radix_node *top = head->rnh_treetop;
+ int head_off = top->rn_off, vlen = (int)*((u_char *)v);
+ register struct radix_node *t = rn_search(v_arg, top);
+ register caddr_t cp = v + head_off;
+ register int b;
+ struct radix_node *tt;
+ /*
+ * Find first bit at which v and t->rn_key differ
+ */
+ {
+ register caddr_t cp2 = t->rn_key + head_off;
+ register int cmp_res;
+ caddr_t cplim = v + vlen;
+
+ while (cp < cplim)
+ if (*cp2++ != *cp++)
+ goto on1;
+ *dupentry = 1;
+ return t;
+on1:
+ *dupentry = 0;
+ cmp_res = (cp[-1] ^ cp2[-1]) & 0xff;
+ for (b = (cp - v) << 3; cmp_res; b--)
+ cmp_res >>= 1;
+ }
+ {
+ register struct radix_node *p, *x = top;
+ cp = v;
+ do {
+ p = x;
+ if (cp[x->rn_off] & x->rn_bmask)
+ x = x->rn_r;
+ else x = x->rn_l;
+ } while (b > (unsigned) x->rn_b); /* x->rn_b < b && x->rn_b >= 0 */
+#ifdef RN_DEBUG
+ if (rn_debug)
+ log(LOG_DEBUG, "rn_insert: Going In:\n"), traverse(p);
+#endif
+ t = rn_newpair(v_arg, b, nodes); tt = t->rn_l;
+ if ((cp[p->rn_off] & p->rn_bmask) == 0)
+ p->rn_l = t;
+ else
+ p->rn_r = t;
+ x->rn_p = t; t->rn_p = p; /* frees x, p as temp vars below */
+ if ((cp[t->rn_off] & t->rn_bmask) == 0) {
+ t->rn_r = x;
+ } else {
+ t->rn_r = tt; t->rn_l = x;
+ }
+#ifdef RN_DEBUG
+ if (rn_debug)
+ log(LOG_DEBUG, "rn_insert: Coming Out:\n"), traverse(p);
+#endif
+ }
+ return (tt);
+}
+
+struct radix_node *
+rn_addmask(n_arg, search, skip)
+ int search, skip;
+ void *n_arg;
+{
+ caddr_t netmask = (caddr_t)n_arg;
+ register struct radix_node *x;
+ register caddr_t cp, cplim;
+ register int b = 0, mlen, j;
+ int maskduplicated, m0, isnormal;
+ struct radix_node *saved_x;
+ static int last_zeroed = 0;
+
+ if ((mlen = *(u_char *)netmask) > max_keylen)
+ mlen = max_keylen;
+ if (skip == 0)
+ skip = 1;
+ if (mlen <= skip)
+ return (mask_rnhead->rnh_nodes);
+ if (skip > 1)
+ Bcopy(rn_ones + 1, addmask_key + 1, skip - 1);
+ if ((m0 = mlen) > skip)
+ Bcopy(netmask + skip, addmask_key + skip, mlen - skip);
+ /*
+ * Trim trailing zeroes.
+ */
+ for (cp = addmask_key + mlen; (cp > addmask_key) && cp[-1] == 0;)
+ cp--;
+ mlen = cp - addmask_key;
+ if (mlen <= skip) {
+ if (m0 >= last_zeroed)
+ last_zeroed = mlen;
+ return (mask_rnhead->rnh_nodes);
+ }
+ if (m0 < last_zeroed)
+ Bzero(addmask_key + m0, last_zeroed - m0);
+ *addmask_key = last_zeroed = mlen;
+ x = rn_search(addmask_key, rn_masktop);
+ if (Bcmp(addmask_key, x->rn_key, mlen) != 0)
+ x = 0;
+ if (x || search)
+ return (x);
+ R_Malloc(x, struct radix_node *, max_keylen + 2 * sizeof (*x));
+ if ((saved_x = x) == 0)
+ return (0);
+ Bzero(x, max_keylen + 2 * sizeof (*x));
+ netmask = cp = (caddr_t)(x + 2);
+ Bcopy(addmask_key, cp, mlen);
+ x = rn_insert(cp, mask_rnhead, &maskduplicated, x);
+ if (maskduplicated) {
+ log(LOG_ERR, "rn_addmask: mask impossibly already in tree");
+ Free(saved_x);
+ return (x);
+ }
+ /*
+ * Calculate index of mask, and check for normalcy.
+ */
+ cplim = netmask + mlen; isnormal = 1;
+ for (cp = netmask + skip; (cp < cplim) && *(u_char *)cp == 0xff;)
+ cp++;
+ if (cp != cplim) {
+ for (j = 0x80; (j & *cp) != 0; j >>= 1)
+ b++;
+ if (*cp != normal_chars[b] || cp != (cplim - 1))
+ isnormal = 0;
+ }
+ b += (cp - netmask) << 3;
+ x->rn_b = -1 - b;
+ if (isnormal)
+ x->rn_flags |= RNF_NORMAL;
+ return (x);
+}
+
+static int /* XXX: arbitrary ordering for non-contiguous masks */
+rn_lexobetter(m_arg, n_arg)
+ void *m_arg, *n_arg;
+{
+ register u_char *mp = m_arg, *np = n_arg, *lim;
+
+ if (*mp > *np)
+ return 1; /* not really, but need to check longer one first */
+ if (*mp == *np)
+ for (lim = mp + *mp; mp < lim;)
+ if (*mp++ > *np++)
+ return 1;
+ return 0;
+}
+
+static struct radix_mask *
+rn_new_radix_mask(tt, next)
+ register struct radix_node *tt;
+ register struct radix_mask *next;
+{
+ register struct radix_mask *m;
+
+ MKGet(m);
+ if (m == 0) {
+ log(LOG_ERR, "Mask for route not entered\n");
+ return (0);
+ }
+ Bzero(m, sizeof *m);
+ m->rm_b = tt->rn_b;
+ m->rm_flags = tt->rn_flags;
+ if (tt->rn_flags & RNF_NORMAL)
+ m->rm_leaf = tt;
+ else
+ m->rm_mask = tt->rn_mask;
+ m->rm_mklist = next;
+ tt->rn_mklist = m;
+ return m;
+}
+
+struct radix_node *
+rn_addroute(v_arg, n_arg, head, treenodes)
+ void *v_arg, *n_arg;
+ struct radix_node_head *head;
+ struct radix_node treenodes[2];
+{
+ caddr_t v = (caddr_t)v_arg, netmask = (caddr_t)n_arg;
+ register struct radix_node *t, *x = 0, *tt;
+ struct radix_node *saved_tt, *top = head->rnh_treetop;
+ short b = 0, b_leaf = 0;
+ int keyduplicated;
+ caddr_t mmask;
+ struct radix_mask *m, **mp;
+
+ /*
+ * In dealing with non-contiguous masks, there may be
+ * many different routes which have the same mask.
+ * We will find it useful to have a unique pointer to
+ * the mask to speed avoiding duplicate references at
+ * nodes and possibly save time in calculating indices.
+ */
+ if (netmask) {
+ if ((x = rn_addmask(netmask, 0, top->rn_off)) == 0)
+ return (0);
+ b_leaf = x->rn_b;
+ b = -1 - x->rn_b;
+ netmask = x->rn_key;
+ }
+ /*
+ * Deal with duplicated keys: attach node to previous instance
+ */
+ saved_tt = tt = rn_insert(v, head, &keyduplicated, treenodes);
+ if (keyduplicated) {
+ for (t = tt; tt; t = tt, tt = tt->rn_dupedkey) {
+ if (tt->rn_mask == netmask)
+ return (0);
+ if (netmask == 0 ||
+ (tt->rn_mask &&
+ ((b_leaf < tt->rn_b) || /* index(netmask) > node */
+ rn_refines(netmask, tt->rn_mask) ||
+ rn_lexobetter(netmask, tt->rn_mask))))
+ break;
+ }
+ /*
+ * If the mask is not duplicated, we wouldn't
+ * find it among possible duplicate key entries
+ * anyway, so the above test doesn't hurt.
+ *
+ * We sort the masks for a duplicated key the same way as
+ * in a masklist -- most specific to least specific.
+ * This may require the unfortunate nuisance of relocating
+ * the head of the list.
+ */
+ if (tt == saved_tt) {
+ struct radix_node *xx = x;
+ /* link in at head of list */
+ (tt = treenodes)->rn_dupedkey = t;
+ tt->rn_flags = t->rn_flags;
+ tt->rn_p = x = t->rn_p;
+ t->rn_p = tt; /* parent */
+ if (x->rn_l == t) x->rn_l = tt; else x->rn_r = tt;
+ saved_tt = tt; x = xx;
+ } else {
+ (tt = treenodes)->rn_dupedkey = t->rn_dupedkey;
+ t->rn_dupedkey = tt;
+ tt->rn_p = t; /* parent */
+ if (tt->rn_dupedkey) /* parent */
+ tt->rn_dupedkey->rn_p = tt; /* parent */
+ }
+#ifdef RN_DEBUG
+ t=tt+1; tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++;
+ tt->rn_twin = t; tt->rn_ybro = rn_clist; rn_clist = tt;
+#endif
+ tt->rn_key = (caddr_t) v;
+ tt->rn_b = -1;
+ tt->rn_flags = RNF_ACTIVE;
+ }
+ /*
+ * Put mask in tree.
+ */
+ if (netmask) {
+ tt->rn_mask = netmask;
+ tt->rn_b = x->rn_b;
+ tt->rn_flags |= x->rn_flags & RNF_NORMAL;
+ }
+ t = saved_tt->rn_p;
+ if (keyduplicated)
+ goto on2;
+ b_leaf = -1 - t->rn_b;
+ if (t->rn_r == saved_tt) x = t->rn_l; else x = t->rn_r;
+ /* Promote general routes from below */
+ if (x->rn_b < 0) {
+ for (mp = &t->rn_mklist; x; x = x->rn_dupedkey)
+ if (x->rn_mask && (x->rn_b >= b_leaf) && x->rn_mklist == 0) {
+ *mp = m = rn_new_radix_mask(x, 0);
+ if (m)
+ mp = &m->rm_mklist;
+ }
+ } else if (x->rn_mklist) {
+ /*
+ * Skip over masks whose index is > that of new node
+ */
+ for (mp = &x->rn_mklist; (m = *mp); mp = &m->rm_mklist)
+ if (m->rm_b >= b_leaf)
+ break;
+ t->rn_mklist = m; *mp = 0;
+ }
+on2:
+ /* Add new route to highest possible ancestor's list */
+ if ((netmask == 0) || (b > t->rn_b ))
+ return tt; /* can't lift at all */
+ b_leaf = tt->rn_b;
+ do {
+ x = t;
+ t = t->rn_p;
+ } while (b <= t->rn_b && x != top);
+ /*
+ * Search through routes associated with node to
+ * insert new route according to index.
+ * Need same criteria as when sorting dupedkeys to avoid
+ * double loop on deletion.
+ */
+ for (mp = &x->rn_mklist; (m = *mp); mp = &m->rm_mklist) {
+ if (m->rm_b < b_leaf)
+ continue;
+ if (m->rm_b > b_leaf)
+ break;
+ if (m->rm_flags & RNF_NORMAL) {
+ mmask = m->rm_leaf->rn_mask;
+ if (tt->rn_flags & RNF_NORMAL) {
+ log(LOG_ERR,
+ "Non-unique normal route, mask not entered");
+ return tt;
+ }
+ } else
+ mmask = m->rm_mask;
+ if (mmask == netmask) {
+ m->rm_refs++;
+ tt->rn_mklist = m;
+ return tt;
+ }
+ if (rn_refines(netmask, mmask) || rn_lexobetter(netmask, mmask))
+ break;
+ }
+ *mp = rn_new_radix_mask(tt, *mp);
+ return tt;
+}
+
+struct radix_node *
+rn_delete(v_arg, netmask_arg, head)
+ void *v_arg, *netmask_arg;
+ struct radix_node_head *head;
+{
+ register struct radix_node *t, *p, *x, *tt;
+ struct radix_mask *m, *saved_m, **mp;
+ struct radix_node *dupedkey, *saved_tt, *top;
+ caddr_t v, netmask;
+ int b, head_off, vlen;
+
+ v = v_arg;
+ netmask = netmask_arg;
+ x = head->rnh_treetop;
+ tt = rn_search(v, x);
+ head_off = x->rn_off;
+ vlen = *(u_char *)v;
+ saved_tt = tt;
+ top = x;
+ if (tt == 0 ||
+ Bcmp(v + head_off, tt->rn_key + head_off, vlen - head_off))
+ return (0);
+ /*
+ * Delete our route from mask lists.
+ */
+ if (netmask) {
+ if ((x = rn_addmask(netmask, 1, head_off)) == 0)
+ return (0);
+ netmask = x->rn_key;
+ while (tt->rn_mask != netmask)
+ if ((tt = tt->rn_dupedkey) == 0)
+ return (0);
+ }
+ if (tt->rn_mask == 0 || (saved_m = m = tt->rn_mklist) == 0)
+ goto on1;
+ if (tt->rn_flags & RNF_NORMAL) {
+ if (m->rm_leaf != tt || m->rm_refs > 0) {
+ log(LOG_ERR, "rn_delete: inconsistent annotation\n");
+ return 0; /* dangling ref could cause disaster */
+ }
+ } else {
+ if (m->rm_mask != tt->rn_mask) {
+ log(LOG_ERR, "rn_delete: inconsistent annotation\n");
+ goto on1;
+ }
+ if (--m->rm_refs >= 0)
+ goto on1;
+ }
+ b = -1 - tt->rn_b;
+ t = saved_tt->rn_p;
+ if (b > t->rn_b)
+ goto on1; /* Wasn't lifted at all */
+ do {
+ x = t;
+ t = t->rn_p;
+ } while (b <= t->rn_b && x != top);
+ for (mp = &x->rn_mklist; (m = *mp); mp = &m->rm_mklist)
+ if (m == saved_m) {
+ *mp = m->rm_mklist;
+ MKFree(m);
+ break;
+ }
+ if (m == 0) {
+ log(LOG_ERR, "rn_delete: couldn't find our annotation\n");
+ if (tt->rn_flags & RNF_NORMAL)
+ return (0); /* Dangling ref to us */
+ }
+on1:
+ /*
+ * Eliminate us from tree
+ */
+ if (tt->rn_flags & RNF_ROOT)
+ return (0);
+#ifdef RN_DEBUG
+ /* Get us out of the creation list */
+ for (t = rn_clist; t && t->rn_ybro != tt; t = t->rn_ybro) {}
+ if (t) t->rn_ybro = tt->rn_ybro;
+#endif
+ t = tt->rn_p;
+ dupedkey = saved_tt->rn_dupedkey;
+ if (dupedkey) {
+ /*
+ * at this point, tt is the deletion target and saved_tt
+ * is the head of the dupekey chain
+ */
+ if (tt == saved_tt) {
+ /* remove from head of chain */
+ x = dupedkey; x->rn_p = t;
+ if (t->rn_l == tt) t->rn_l = x; else t->rn_r = x;
+ } else {
+ /* find node in front of tt on the chain */
+ for (x = p = saved_tt; p && p->rn_dupedkey != tt;)
+ p = p->rn_dupedkey;
+ if (p) {
+ p->rn_dupedkey = tt->rn_dupedkey;
+ if (tt->rn_dupedkey) /* parent */
+ tt->rn_dupedkey->rn_p = p; /* parent */
+ } else log(LOG_ERR, "rn_delete: couldn't find us\n");
+ }
+ t = tt + 1;
+ if (t->rn_flags & RNF_ACTIVE) {
+#ifndef RN_DEBUG
+ *++x = *t; p = t->rn_p;
+#else
+ b = t->rn_info; *++x = *t; t->rn_info = b; p = t->rn_p;
+#endif
+ if (p->rn_l == t) p->rn_l = x; else p->rn_r = x;
+ x->rn_l->rn_p = x; x->rn_r->rn_p = x;
+ }
+ goto out;
+ }
+ if (t->rn_l == tt) x = t->rn_r; else x = t->rn_l;
+ p = t->rn_p;
+ if (p->rn_r == t) p->rn_r = x; else p->rn_l = x;
+ x->rn_p = p;
+ /*
+ * Demote routes attached to us.
+ */
+ if (t->rn_mklist) {
+ if (x->rn_b >= 0) {
+ for (mp = &x->rn_mklist; (m = *mp);)
+ mp = &m->rm_mklist;
+ *mp = t->rn_mklist;
+ } else {
+ /* If there are any key,mask pairs in a sibling
+ duped-key chain, some subset will appear sorted
+ in the same order attached to our mklist */
+ for (m = t->rn_mklist; m && x; x = x->rn_dupedkey)
+ if (m == x->rn_mklist) {
+ struct radix_mask *mm = m->rm_mklist;
+ x->rn_mklist = 0;
+ if (--(m->rm_refs) < 0)
+ MKFree(m);
+ m = mm;
+ }
+ if (m)
+ log(LOG_ERR,
+ "rn_delete: Orphaned Mask %p at %p\n",
+ (void *)m, (void *)x);
+ }
+ }
+ /*
+ * We may be holding an active internal node in the tree.
+ */
+ x = tt + 1;
+ if (t != x) {
+#ifndef RN_DEBUG
+ *t = *x;
+#else
+ b = t->rn_info; *t = *x; t->rn_info = b;
+#endif
+ t->rn_l->rn_p = t; t->rn_r->rn_p = t;
+ p = x->rn_p;
+ if (p->rn_l == x) p->rn_l = t; else p->rn_r = t;
+ }
+out:
+ tt->rn_flags &= ~RNF_ACTIVE;
+ tt[1].rn_flags &= ~RNF_ACTIVE;
+ return (tt);
+}
+
+/*
+ * This is the same as rn_walktree() except for the parameters and the
+ * exit.
+ */
+static int
+rn_walktree_from(h, a, m, f, w)
+ struct radix_node_head *h;
+ void *a, *m;
+ walktree_f_t *f;
+ void *w;
+{
+ int error;
+ struct radix_node *base, *next;
+ u_char *xa = (u_char *)a;
+ u_char *xm = (u_char *)m;
+ register struct radix_node *rn, *last = 0 /* shut up gcc */;
+ int stopping = 0;
+ int lastb;
+
+ /*
+ * rn_search_m is sort-of-open-coded here.
+ */
+ /* printf("about to search\n"); */
+ for (rn = h->rnh_treetop; rn->rn_b >= 0; ) {
+ last = rn;
+ /* printf("rn_b %d, rn_bmask %x, xm[rn_off] %x\n",
+ rn->rn_b, rn->rn_bmask, xm[rn->rn_off]); */
+ if (!(rn->rn_bmask & xm[rn->rn_off])) {
+ break;
+ }
+ if (rn->rn_bmask & xa[rn->rn_off]) {
+ rn = rn->rn_r;
+ } else {
+ rn = rn->rn_l;
+ }
+ }
+ /* printf("done searching\n"); */
+
+ /*
+ * Two cases: either we stepped off the end of our mask,
+ * in which case last == rn, or we reached a leaf, in which
+ * case we want to start from the last node we looked at.
+ * Either way, last is the node we want to start from.
+ */
+ rn = last;
+ lastb = rn->rn_b;
+
+ /* printf("rn %p, lastb %d\n", rn, lastb);*/
+
+ /*
+ * This gets complicated because we may delete the node
+ * while applying the function f to it, so we need to calculate
+ * the successor node in advance.
+ */
+ while (rn->rn_b >= 0)
+ rn = rn->rn_l;
+
+ while (!stopping) {
+ /* printf("node %p (%d)\n", rn, rn->rn_b); */
+ base = rn;
+ /* If at right child go back up, otherwise, go right */
+ while (rn->rn_p->rn_r == rn && !(rn->rn_flags & RNF_ROOT)) {
+ rn = rn->rn_p;
+
+ /* if went up beyond last, stop */
+ if (rn->rn_b < lastb) {
+ stopping = 1;
+ /* printf("up too far\n"); */
+ }
+ }
+
+ /* Find the next *leaf* since next node might vanish, too */
+ for (rn = rn->rn_p->rn_r; rn->rn_b >= 0;)
+ rn = rn->rn_l;
+ next = rn;
+ /* Process leaves */
+ while ((rn = base) != 0) {
+ base = rn->rn_dupedkey;
+ /* printf("leaf %p\n", rn); */
+ if (!(rn->rn_flags & RNF_ROOT)
+ && (error = (*f)(rn, w)))
+ return (error);
+ }
+ rn = next;
+
+ if (rn->rn_flags & RNF_ROOT) {
+ /* printf("root, stopping"); */
+ stopping = 1;
+ }
+
+ }
+ return 0;
+}
+
+static int
+rn_walktree(h, f, w)
+ struct radix_node_head *h;
+ walktree_f_t *f;
+ void *w;
+{
+ int error;
+ struct radix_node *base, *next;
+ register struct radix_node *rn = h->rnh_treetop;
+ /*
+ * This gets complicated because we may delete the node
+ * while applying the function f to it, so we need to calculate
+ * the successor node in advance.
+ */
+ /* First time through node, go left */
+ while (rn->rn_b >= 0)
+ rn = rn->rn_l;
+ for (;;) {
+ base = rn;
+ /* If at right child go back up, otherwise, go right */
+ while (rn->rn_p->rn_r == rn && (rn->rn_flags & RNF_ROOT) == 0)
+ rn = rn->rn_p;
+ /* Find the next *leaf* since next node might vanish, too */
+ for (rn = rn->rn_p->rn_r; rn->rn_b >= 0;)
+ rn = rn->rn_l;
+ next = rn;
+ /* Process leaves */
+ while ((rn = base)) {
+ base = rn->rn_dupedkey;
+ if (!(rn->rn_flags & RNF_ROOT) && (error = (*f)(rn, w)))
+ return (error);
+ }
+ rn = next;
+ if (rn->rn_flags & RNF_ROOT)
+ return (0);
+ }
+ /* NOTREACHED */
+}
+
+int
+rn_inithead(head, off)
+ void **head;
+ int off;
+{
+ register struct radix_node_head *rnh;
+ register struct radix_node *t, *tt, *ttt;
+ if (*head)
+ return (1);
+ R_Malloc(rnh, struct radix_node_head *, sizeof (*rnh));
+ if (rnh == 0)
+ return (0);
+ Bzero(rnh, sizeof (*rnh));
+ *head = rnh;
+ t = rn_newpair(rn_zeros, off, rnh->rnh_nodes);
+ ttt = rnh->rnh_nodes + 2;
+ t->rn_r = ttt;
+ t->rn_p = t;
+ tt = t->rn_l;
+ tt->rn_flags = t->rn_flags = RNF_ROOT | RNF_ACTIVE;
+ tt->rn_b = -1 - off;
+ *ttt = *tt;
+ ttt->rn_key = rn_ones;
+ rnh->rnh_addaddr = rn_addroute;
+ rnh->rnh_deladdr = rn_delete;
+ rnh->rnh_matchaddr = rn_match;
+ rnh->rnh_lookup = rn_lookup;
+ rnh->rnh_walktree = rn_walktree;
+ rnh->rnh_walktree_from = rn_walktree_from;
+ rnh->rnh_treetop = t;
+ return (1);
+}
+
+void
+rn_init()
+{
+ char *cp, *cplim;
+#ifdef KERNEL
+ struct domain *dom;
+
+ for (dom = domains; dom; dom = dom->dom_next)
+ if (dom->dom_maxrtkey > max_keylen)
+ max_keylen = dom->dom_maxrtkey;
+#endif
+ if (max_keylen == 0) {
+ log(LOG_ERR,
+ "rn_init: radix functions require max_keylen be set\n");
+ return;
+ }
+ R_Malloc(rn_zeros, char *, 3 * max_keylen);
+ if (rn_zeros == NULL)
+ panic("rn_init");
+ Bzero(rn_zeros, 3 * max_keylen);
+ rn_ones = cp = rn_zeros + max_keylen;
+ addmask_key = cplim = rn_ones + max_keylen;
+ while (cp < cplim)
+ *cp++ = -1;
+ if (rn_inithead((void **)&mask_rnhead, 0) == 0)
+ panic("rn_init 2");
+}
diff --git a/sys/net/radix.h b/sys/net/radix.h
new file mode 100644
index 0000000..f68f67a
--- /dev/null
+++ b/sys/net/radix.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 1988, 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)radix.h 8.2 (Berkeley) 10/31/94
+ * $FreeBSD$
+ */
+
+#ifndef _RADIX_H_
+#define _RADIX_H_
+
+#ifdef MALLOC_DECLARE
+MALLOC_DECLARE(M_RTABLE);
+#endif
+
+/*
+ * Radix search tree node layout.
+ */
+
+struct radix_node {
+ struct radix_mask *rn_mklist; /* list of masks contained in subtree */
+ struct radix_node *rn_p; /* parent */
+ short rn_b; /* bit offset; -1-index(netmask) */
+ char rn_bmask; /* node: mask for bit test*/
+ u_char rn_flags; /* enumerated next */
+#define RNF_NORMAL 1 /* leaf contains normal route */
+#define RNF_ROOT 2 /* leaf is root leaf for tree */
+#define RNF_ACTIVE 4 /* This node is alive (for rtfree) */
+ union {
+ struct { /* leaf only data: */
+ caddr_t rn_Key; /* object of search */
+ caddr_t rn_Mask; /* netmask, if present */
+ struct radix_node *rn_Dupedkey;
+ } rn_leaf;
+ struct { /* node only data: */
+ int rn_Off; /* where to start compare */
+ struct radix_node *rn_L;/* progeny */
+ struct radix_node *rn_R;/* progeny */
+ } rn_node;
+ } rn_u;
+#ifdef RN_DEBUG
+ int rn_info;
+ struct radix_node *rn_twin;
+ struct radix_node *rn_ybro;
+#endif
+};
+
+#define rn_dupedkey rn_u.rn_leaf.rn_Dupedkey
+#define rn_key rn_u.rn_leaf.rn_Key
+#define rn_mask rn_u.rn_leaf.rn_Mask
+#define rn_off rn_u.rn_node.rn_Off
+#define rn_l rn_u.rn_node.rn_L
+#define rn_r rn_u.rn_node.rn_R
+
+/*
+ * Annotations to tree concerning potential routes applying to subtrees.
+ */
+
+struct radix_mask {
+ short rm_b; /* bit offset; -1-index(netmask) */
+ char rm_unused; /* cf. rn_bmask */
+ u_char rm_flags; /* cf. rn_flags */
+ struct radix_mask *rm_mklist; /* more masks to try */
+ union {
+ caddr_t rmu_mask; /* the mask */
+ struct radix_node *rmu_leaf; /* for normal routes */
+ } rm_rmu;
+ int rm_refs; /* # of references to this struct */
+};
+
+#define rm_mask rm_rmu.rmu_mask
+#define rm_leaf rm_rmu.rmu_leaf /* extra field would make 32 bytes */
+
+#define MKGet(m) {\
+ if (rn_mkfreelist) {\
+ m = rn_mkfreelist; \
+ rn_mkfreelist = (m)->rm_mklist; \
+ } else \
+ R_Malloc(m, struct radix_mask *, sizeof (*(m))); }\
+
+#define MKFree(m) { (m)->rm_mklist = rn_mkfreelist; rn_mkfreelist = (m);}
+
+typedef int walktree_f_t __P((struct radix_node *, void *));
+
+struct radix_node_head {
+ struct radix_node *rnh_treetop;
+ int rnh_addrsize; /* permit, but not require fixed keys */
+ int rnh_pktsize; /* permit, but not require fixed keys */
+ struct radix_node *(*rnh_addaddr) /* add based on sockaddr */
+ __P((void *v, void *mask,
+ struct radix_node_head *head, struct radix_node nodes[]));
+ struct radix_node *(*rnh_addpkt) /* add based on packet hdr */
+ __P((void *v, void *mask,
+ struct radix_node_head *head, struct radix_node nodes[]));
+ struct radix_node *(*rnh_deladdr) /* remove based on sockaddr */
+ __P((void *v, void *mask, struct radix_node_head *head));
+ struct radix_node *(*rnh_delpkt) /* remove based on packet hdr */
+ __P((void *v, void *mask, struct radix_node_head *head));
+ struct radix_node *(*rnh_matchaddr) /* locate based on sockaddr */
+ __P((void *v, struct radix_node_head *head));
+ struct radix_node *(*rnh_lookup) /* locate based on sockaddr */
+ __P((void *v, void *mask, struct radix_node_head *head));
+ struct radix_node *(*rnh_matchpkt) /* locate based on packet hdr */
+ __P((void *v, struct radix_node_head *head));
+ int (*rnh_walktree) /* traverse tree */
+ __P((struct radix_node_head *head, walktree_f_t *f, void *w));
+ int (*rnh_walktree_from) /* traverse tree below a */
+ __P((struct radix_node_head *head, void *a, void *m,
+ walktree_f_t *f, void *w));
+ void (*rnh_close) /* do something when the last ref drops */
+ __P((struct radix_node *rn, struct radix_node_head *head));
+ struct radix_node rnh_nodes[3]; /* empty tree for common case */
+};
+
+#ifndef KERNEL
+#define Bcmp(a, b, n) bcmp(((char *)(a)), ((char *)(b)), (n))
+#define Bcopy(a, b, n) bcopy(((char *)(a)), ((char *)(b)), (unsigned)(n))
+#define Bzero(p, n) bzero((char *)(p), (int)(n));
+#define R_Malloc(p, t, n) (p = (t) malloc((unsigned int)(n)))
+#define Free(p) free((char *)p);
+#else
+#define Bcmp(a, b, n) bcmp(((caddr_t)(a)), ((caddr_t)(b)), (unsigned)(n))
+#define Bcopy(a, b, n) bcopy(((caddr_t)(a)), ((caddr_t)(b)), (unsigned)(n))
+#define Bzero(p, n) bzero((caddr_t)(p), (unsigned)(n));
+#define R_Malloc(p, t, n) (p = (t) malloc((unsigned long)(n), M_RTABLE, M_DONTWAIT))
+#define Free(p) free((caddr_t)p, M_RTABLE);
+#endif /*KERNEL*/
+
+void rn_init __P((void));
+int rn_inithead __P((void **, int));
+int rn_refines __P((void *, void *));
+struct radix_node
+ *rn_addmask __P((void *, int, int)),
+ *rn_addroute __P((void *, void *, struct radix_node_head *,
+ struct radix_node [2])),
+ *rn_delete __P((void *, void *, struct radix_node_head *)),
+ *rn_lookup __P((void *v_arg, void *m_arg,
+ struct radix_node_head *head)),
+ *rn_match __P((void *, struct radix_node_head *));
+
+
+#endif /* _RADIX_H_ */
diff --git a/sys/net/raw_cb.c b/sys/net/raw_cb.c
new file mode 100644
index 0000000..97dbc10
--- /dev/null
+++ b/sys/net/raw_cb.c
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 1980, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)raw_cb.c 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#include <sys/param.h>
+#include <sys/malloc.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/domain.h>
+#include <sys/protosw.h>
+
+#include <net/raw_cb.h>
+
+/*
+ * Routines to manage the raw protocol control blocks.
+ *
+ * TODO:
+ * hash lookups by protocol family/protocol + address family
+ * take care of unique address problems per AF?
+ * redo address binding to allow wildcards
+ */
+
+struct rawcb_list_head rawcb_list;
+
+static u_long raw_sendspace = RAWSNDQ;
+static u_long raw_recvspace = RAWRCVQ;
+
+/*
+ * Allocate a control block and a nominal amount
+ * of buffer space for the socket.
+ */
+int
+raw_attach(so, proto)
+ register struct socket *so;
+ int proto;
+{
+ register struct rawcb *rp = sotorawcb(so);
+ int error;
+
+ /*
+ * It is assumed that raw_attach is called
+ * after space has been allocated for the
+ * rawcb.
+ */
+ if (rp == 0)
+ return (ENOBUFS);
+ error = soreserve(so, raw_sendspace, raw_recvspace);
+ if (error)
+ return (error);
+ rp->rcb_socket = so;
+ rp->rcb_proto.sp_family = so->so_proto->pr_domain->dom_family;
+ rp->rcb_proto.sp_protocol = proto;
+ LIST_INSERT_HEAD(&rawcb_list, rp, list);
+ return (0);
+}
+
+/*
+ * Detach the raw connection block and discard
+ * socket resources.
+ */
+void
+raw_detach(rp)
+ register struct rawcb *rp;
+{
+ struct socket *so = rp->rcb_socket;
+
+ so->so_pcb = 0;
+ sofree(so);
+ LIST_REMOVE(rp, list);
+#ifdef notdef
+ if (rp->rcb_laddr)
+ m_freem(dtom(rp->rcb_laddr));
+ rp->rcb_laddr = 0;
+#endif
+ free((caddr_t)(rp), M_PCB);
+}
+
+/*
+ * Disconnect and possibly release resources.
+ */
+void
+raw_disconnect(rp)
+ struct rawcb *rp;
+{
+
+#ifdef notdef
+ if (rp->rcb_faddr)
+ m_freem(dtom(rp->rcb_faddr));
+ rp->rcb_faddr = 0;
+#endif
+ if (rp->rcb_socket->so_state & SS_NOFDREF)
+ raw_detach(rp);
+}
+
+#ifdef notdef
+#include <sys/mbuf.h>
+
+int
+raw_bind(so, nam)
+ register struct socket *so;
+ struct mbuf *nam;
+{
+ struct sockaddr *addr = mtod(nam, struct sockaddr *);
+ register struct rawcb *rp;
+
+ if (ifnet == 0)
+ return (EADDRNOTAVAIL);
+ rp = sotorawcb(so);
+ nam = m_copym(nam, 0, M_COPYALL, M_WAITOK);
+ rp->rcb_laddr = mtod(nam, struct sockaddr *);
+ return (0);
+}
+#endif
diff --git a/sys/net/raw_cb.h b/sys/net/raw_cb.h
new file mode 100644
index 0000000..5184de6
--- /dev/null
+++ b/sys/net/raw_cb.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 1980, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)raw_cb.h 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#ifndef _NET_RAW_CB_H_
+#define _NET_RAW_CB_H_
+
+#include <sys/queue.h>
+
+/*
+ * Raw protocol interface control block. Used
+ * to tie a socket to the generic raw interface.
+ */
+struct rawcb {
+ LIST_ENTRY(rawcb) list;
+ struct socket *rcb_socket; /* back pointer to socket */
+ struct sockaddr *rcb_faddr; /* destination address */
+ struct sockaddr *rcb_laddr; /* socket's address */
+ struct sockproto rcb_proto; /* protocol family, protocol */
+};
+
+#define sotorawcb(so) ((struct rawcb *)(so)->so_pcb)
+
+/*
+ * Nominal space allocated to a raw socket.
+ */
+#define RAWSNDQ 8192
+#define RAWRCVQ 8192
+
+#ifdef KERNEL
+extern LIST_HEAD(rawcb_list_head, rawcb) rawcb_list;
+
+int raw_attach __P((struct socket *, int));
+void raw_ctlinput __P((int, struct sockaddr *, void *));
+void raw_detach __P((struct rawcb *));
+void raw_disconnect __P((struct rawcb *));
+void raw_init __P((void));
+void raw_input __P((struct mbuf *,
+ struct sockproto *, struct sockaddr *, struct sockaddr *));
+
+extern struct pr_usrreqs raw_usrreqs;
+#endif
+
+#endif
diff --git a/sys/net/raw_usrreq.c b/sys/net/raw_usrreq.c
new file mode 100644
index 0000000..07b316d
--- /dev/null
+++ b/sys/net/raw_usrreq.c
@@ -0,0 +1,300 @@
+/*
+ * Copyright (c) 1980, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)raw_usrreq.c 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/mbuf.h>
+#include <sys/proc.h>
+#include <sys/protosw.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+
+#include <net/raw_cb.h>
+
+/*
+ * Initialize raw connection block q.
+ */
+void
+raw_init()
+{
+ LIST_INIT(&rawcb_list);
+}
+
+
+/*
+ * Raw protocol input routine. Find the socket
+ * associated with the packet(s) and move them over. If
+ * nothing exists for this packet, drop it.
+ */
+/*
+ * Raw protocol interface.
+ */
+void
+raw_input(m0, proto, src, dst)
+ struct mbuf *m0;
+ register struct sockproto *proto;
+ struct sockaddr *src, *dst;
+{
+ register struct rawcb *rp;
+ register struct mbuf *m = m0;
+ register int sockets = 0;
+ struct socket *last;
+
+ last = 0;
+ LIST_FOREACH(rp, &rawcb_list, list) {
+ if (rp->rcb_proto.sp_family != proto->sp_family)
+ continue;
+ if (rp->rcb_proto.sp_protocol &&
+ rp->rcb_proto.sp_protocol != proto->sp_protocol)
+ continue;
+ /*
+ * We assume the lower level routines have
+ * placed the address in a canonical format
+ * suitable for a structure comparison.
+ *
+ * Note that if the lengths are not the same
+ * the comparison will fail at the first byte.
+ */
+#define equal(a1, a2) \
+ (bcmp((caddr_t)(a1), (caddr_t)(a2), a1->sa_len) == 0)
+ if (rp->rcb_laddr && !equal(rp->rcb_laddr, dst))
+ continue;
+ if (rp->rcb_faddr && !equal(rp->rcb_faddr, src))
+ continue;
+ if (last) {
+ struct mbuf *n;
+ n = m_copy(m, 0, (int)M_COPYALL);
+ if (n) {
+ if (sbappendaddr(&last->so_rcv, src,
+ n, (struct mbuf *)0) == 0)
+ /* should notify about lost packet */
+ m_freem(n);
+ else {
+ sorwakeup(last);
+ sockets++;
+ }
+ }
+ }
+ last = rp->rcb_socket;
+ }
+ if (last) {
+ if (sbappendaddr(&last->so_rcv, src,
+ m, (struct mbuf *)0) == 0)
+ m_freem(m);
+ else {
+ sorwakeup(last);
+ sockets++;
+ }
+ } else
+ m_freem(m);
+}
+
+/*ARGSUSED*/
+void
+raw_ctlinput(cmd, arg, dummy)
+ int cmd;
+ struct sockaddr *arg;
+ void *dummy;
+{
+
+ if (cmd < 0 || cmd > PRC_NCMDS)
+ return;
+ /* INCOMPLETE */
+}
+
+static int
+raw_uabort(struct socket *so)
+{
+ struct rawcb *rp = sotorawcb(so);
+
+ if (rp == 0)
+ return EINVAL;
+ raw_disconnect(rp);
+ sofree(so);
+ soisdisconnected(so);
+ return 0;
+}
+
+/* pru_accept is EOPNOTSUPP */
+
+static int
+raw_uattach(struct socket *so, int proto, struct proc *p)
+{
+ struct rawcb *rp = sotorawcb(so);
+ int error;
+
+ if (rp == 0)
+ return EINVAL;
+ if (p && (error = suser(p)) != 0)
+ return error;
+ return raw_attach(so, proto);
+}
+
+static int
+raw_ubind(struct socket *so, struct sockaddr *nam, struct proc *p)
+{
+ return EINVAL;
+}
+
+static int
+raw_uconnect(struct socket *so, struct sockaddr *nam, struct proc *p)
+{
+ return EINVAL;
+}
+
+/* pru_connect2 is EOPNOTSUPP */
+/* pru_control is EOPNOTSUPP */
+
+static int
+raw_udetach(struct socket *so)
+{
+ struct rawcb *rp = sotorawcb(so);
+
+ if (rp == 0)
+ return EINVAL;
+
+ raw_detach(rp);
+ return 0;
+}
+
+static int
+raw_udisconnect(struct socket *so)
+{
+ struct rawcb *rp = sotorawcb(so);
+
+ if (rp == 0)
+ return EINVAL;
+ if (rp->rcb_faddr == 0) {
+ return ENOTCONN;
+ }
+ raw_disconnect(rp);
+ soisdisconnected(so);
+ return 0;
+}
+
+/* pru_listen is EOPNOTSUPP */
+
+static int
+raw_upeeraddr(struct socket *so, struct sockaddr **nam)
+{
+ struct rawcb *rp = sotorawcb(so);
+
+ if (rp == 0)
+ return EINVAL;
+ if (rp->rcb_faddr == 0) {
+ return ENOTCONN;
+ }
+ *nam = dup_sockaddr(rp->rcb_faddr, 1);
+ return 0;
+}
+
+/* pru_rcvd is EOPNOTSUPP */
+/* pru_rcvoob is EOPNOTSUPP */
+
+static int
+raw_usend(struct socket *so, int flags, struct mbuf *m,
+ struct sockaddr *nam, struct mbuf *control, struct proc *p)
+{
+ int error;
+ struct rawcb *rp = sotorawcb(so);
+
+ if (rp == 0) {
+ error = EINVAL;
+ goto release;
+ }
+
+ if (flags & PRUS_OOB) {
+ error = EOPNOTSUPP;
+ goto release;
+ }
+
+ if (control && control->m_len) {
+ error = EOPNOTSUPP;
+ goto release;
+ }
+ if (nam) {
+ if (rp->rcb_faddr) {
+ error = EISCONN;
+ goto release;
+ }
+ rp->rcb_faddr = nam;
+ } else if (rp->rcb_faddr == 0) {
+ error = ENOTCONN;
+ goto release;
+ }
+ error = (*so->so_proto->pr_output)(m, so);
+ m = NULL;
+ if (nam)
+ rp->rcb_faddr = 0;
+release:
+ if (m != NULL)
+ m_freem(m);
+ return (error);
+}
+
+/* pru_sense is null */
+
+static int
+raw_ushutdown(struct socket *so)
+{
+ struct rawcb *rp = sotorawcb(so);
+
+ if (rp == 0)
+ return EINVAL;
+ socantsendmore(so);
+ return 0;
+}
+
+static int
+raw_usockaddr(struct socket *so, struct sockaddr **nam)
+{
+ struct rawcb *rp = sotorawcb(so);
+
+ if (rp == 0)
+ return EINVAL;
+ if (rp->rcb_laddr == 0)
+ return EINVAL;
+ *nam = dup_sockaddr(rp->rcb_laddr, 1);
+ return 0;
+}
+
+struct pr_usrreqs raw_usrreqs = {
+ raw_uabort, pru_accept_notsupp, raw_uattach, raw_ubind, raw_uconnect,
+ pru_connect2_notsupp, pru_control_notsupp, raw_udetach,
+ raw_udisconnect, pru_listen_notsupp, raw_upeeraddr, pru_rcvd_notsupp,
+ pru_rcvoob_notsupp, raw_usend, pru_sense_null, raw_ushutdown,
+ raw_usockaddr, sosend, soreceive, sopoll
+};
diff --git a/sys/net/route.c b/sys/net/route.c
new file mode 100644
index 0000000..45e0e39
--- /dev/null
+++ b/sys/net/route.c
@@ -0,0 +1,1080 @@
+/*
+ * Copyright (c) 1980, 1986, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)route.c 8.2 (Berkeley) 11/15/93
+ * $FreeBSD$
+ */
+
+#include "opt_inet.h"
+#include "opt_mrouting.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/socket.h>
+#include <sys/domain.h>
+#include <sys/kernel.h>
+
+#include <net/if.h>
+#include <net/route.h>
+
+#include <netinet/in.h>
+#include <netinet/ip_mroute.h>
+
+#define SA(p) ((struct sockaddr *)(p))
+
+struct route_cb route_cb;
+static struct rtstat rtstat;
+struct radix_node_head *rt_tables[AF_MAX+1];
+
+static int rttrash; /* routes not in table but not freed */
+
+static void rt_maskedcopy __P((struct sockaddr *,
+ struct sockaddr *, struct sockaddr *));
+static void rtable_init __P((void **));
+
+static void
+rtable_init(table)
+ void **table;
+{
+ struct domain *dom;
+ for (dom = domains; dom; dom = dom->dom_next)
+ if (dom->dom_rtattach)
+ dom->dom_rtattach(&table[dom->dom_family],
+ dom->dom_rtoffset);
+}
+
+void
+route_init()
+{
+ rn_init(); /* initialize all zeroes, all ones, mask table */
+ rtable_init((void **)rt_tables);
+}
+
+/*
+ * Packet routing routines.
+ */
+void
+rtalloc(ro)
+ register struct route *ro;
+{
+ rtalloc_ign(ro, 0UL);
+}
+
+void
+rtalloc_ign(ro, ignore)
+ register struct route *ro;
+ u_long ignore;
+{
+ struct rtentry *rt;
+ int s;
+
+ if ((rt = ro->ro_rt) != NULL) {
+ if (rt->rt_ifp != NULL && rt->rt_flags & RTF_UP)
+ return;
+ /* XXX - We are probably always at splnet here already. */
+ s = splnet();
+ RTFREE(rt);
+ splx(s);
+ }
+ ro->ro_rt = rtalloc1(&ro->ro_dst, 1, ignore);
+}
+
+/*
+ * Look up the route that matches the address given
+ * Or, at least try.. Create a cloned route if needed.
+ */
+struct rtentry *
+rtalloc1(dst, report, ignflags)
+ register struct sockaddr *dst;
+ int report;
+ u_long ignflags;
+{
+ register struct radix_node_head *rnh = rt_tables[dst->sa_family];
+ register struct rtentry *rt;
+ register struct radix_node *rn;
+ struct rtentry *newrt = 0;
+ struct rt_addrinfo info;
+ u_long nflags;
+ int s = splnet(), err = 0, msgtype = RTM_MISS;
+
+ /*
+ * Look up the address in the table for that Address Family
+ */
+ if (rnh && (rn = rnh->rnh_matchaddr((caddr_t)dst, rnh)) &&
+ ((rn->rn_flags & RNF_ROOT) == 0)) {
+ /*
+ * If we find it and it's not the root node, then
+ * get a refernce on the rtentry associated.
+ */
+ newrt = rt = (struct rtentry *)rn;
+ nflags = rt->rt_flags & ~ignflags;
+ if (report && (nflags & (RTF_CLONING | RTF_PRCLONING))) {
+ /*
+ * We are apparently adding (report = 0 in delete).
+ * If it requires that it be cloned, do so.
+ * (This implies it wasn't a HOST route.)
+ */
+ err = rtrequest(RTM_RESOLVE, dst, SA(0),
+ SA(0), 0, &newrt);
+ if (err) {
+ /*
+ * If the cloning didn't succeed, maybe
+ * what we have will do. Return that.
+ */
+ newrt = rt;
+ rt->rt_refcnt++;
+ goto miss;
+ }
+ if ((rt = newrt) && (rt->rt_flags & RTF_XRESOLVE)) {
+ /*
+ * If the new route specifies it be
+ * externally resolved, then go do that.
+ */
+ msgtype = RTM_RESOLVE;
+ goto miss;
+ }
+ } else
+ rt->rt_refcnt++;
+ } else {
+ /*
+ * Either we hit the root or couldn't find any match,
+ * Which basically means
+ * "caint get there frm here"
+ */
+ rtstat.rts_unreach++;
+ miss: if (report) {
+ /*
+ * If required, report the failure to the supervising
+ * Authorities.
+ * For a delete, this is not an error. (report == 0)
+ */
+ bzero((caddr_t)&info, sizeof(info));
+ info.rti_info[RTAX_DST] = dst;
+ rt_missmsg(msgtype, &info, 0, err);
+ }
+ }
+ splx(s);
+ return (newrt);
+}
+
+/*
+ * Remove a reference count from an rtentry.
+ * If the count gets low enough, take it out of the routing table
+ */
+void
+rtfree(rt)
+ register struct rtentry *rt;
+{
+ /*
+ * find the tree for that address family
+ */
+ register struct radix_node_head *rnh =
+ rt_tables[rt_key(rt)->sa_family];
+ register struct ifaddr *ifa;
+
+ if (rt == 0 || rnh == 0)
+ panic("rtfree");
+
+ /*
+ * decrement the reference count by one and if it reaches 0,
+ * and there is a close function defined, call the close function
+ */
+ rt->rt_refcnt--;
+ if(rnh->rnh_close && rt->rt_refcnt == 0) {
+ rnh->rnh_close((struct radix_node *)rt, rnh);
+ }
+
+ /*
+ * If we are no longer "up" (and ref == 0)
+ * then we can free the resources associated
+ * with the route.
+ */
+ if (rt->rt_refcnt <= 0 && (rt->rt_flags & RTF_UP) == 0) {
+ if (rt->rt_nodes->rn_flags & (RNF_ACTIVE | RNF_ROOT))
+ panic ("rtfree 2");
+ /*
+ * the rtentry must have been removed from the routing table
+ * so it is represented in rttrash.. remove that now.
+ */
+ rttrash--;
+
+#ifdef DIAGNOSTIC
+ if (rt->rt_refcnt < 0) {
+ printf("rtfree: %p not freed (neg refs)\n", rt);
+ return;
+ }
+#endif
+
+ /*
+ * release references on items we hold them on..
+ * e.g other routes and ifaddrs.
+ */
+ if((ifa = rt->rt_ifa))
+ IFAFREE(ifa);
+ if (rt->rt_parent) {
+ RTFREE(rt->rt_parent);
+ }
+
+ /*
+ * The key is separatly alloc'd so free it (see rt_setgate()).
+ * This also frees the gateway, as they are always malloc'd
+ * together.
+ */
+ Free(rt_key(rt));
+
+ /*
+ * and the rtentry itself of course
+ */
+ Free(rt);
+ }
+}
+
+void
+ifafree(ifa)
+ register struct ifaddr *ifa;
+{
+ if (ifa == NULL)
+ panic("ifafree");
+ if (ifa->ifa_refcnt == 0)
+ free(ifa, M_IFADDR);
+ else
+ ifa->ifa_refcnt--;
+}
+
+/*
+ * Force a routing table entry to the specified
+ * destination to go through the given gateway.
+ * Normally called as a result of a routing redirect
+ * message from the network layer.
+ *
+ * N.B.: must be called at splnet
+ *
+ */
+void
+rtredirect(dst, gateway, netmask, flags, src, rtp)
+ struct sockaddr *dst, *gateway, *netmask, *src;
+ int flags;
+ struct rtentry **rtp;
+{
+ register struct rtentry *rt;
+ int error = 0;
+ short *stat = 0;
+ struct rt_addrinfo info;
+ struct ifaddr *ifa;
+
+ /* verify the gateway is directly reachable */
+ if ((ifa = ifa_ifwithnet(gateway)) == 0) {
+ error = ENETUNREACH;
+ goto out;
+ }
+ rt = rtalloc1(dst, 0, 0UL);
+ /*
+ * If the redirect isn't from our current router for this dst,
+ * it's either old or wrong. If it redirects us to ourselves,
+ * we have a routing loop, perhaps as a result of an interface
+ * going down recently.
+ */
+#define equal(a1, a2) (bcmp((caddr_t)(a1), (caddr_t)(a2), (a1)->sa_len) == 0)
+ if (!(flags & RTF_DONE) && rt &&
+ (!equal(src, rt->rt_gateway) || rt->rt_ifa != ifa))
+ error = EINVAL;
+ else if (ifa_ifwithaddr(gateway))
+ error = EHOSTUNREACH;
+ if (error)
+ goto done;
+ /*
+ * Create a new entry if we just got back a wildcard entry
+ * or the the lookup failed. This is necessary for hosts
+ * which use routing redirects generated by smart gateways
+ * to dynamically build the routing tables.
+ */
+ if ((rt == 0) || (rt_mask(rt) && rt_mask(rt)->sa_len < 2))
+ goto create;
+ /*
+ * Don't listen to the redirect if it's
+ * for a route to an interface.
+ */
+ if (rt->rt_flags & RTF_GATEWAY) {
+ if (((rt->rt_flags & RTF_HOST) == 0) && (flags & RTF_HOST)) {
+ /*
+ * Changing from route to net => route to host.
+ * Create new route, rather than smashing route to net.
+ */
+ create:
+ flags |= RTF_GATEWAY | RTF_DYNAMIC;
+ error = rtrequest((int)RTM_ADD, dst, gateway,
+ netmask, flags,
+ (struct rtentry **)0);
+ stat = &rtstat.rts_dynamic;
+ } else {
+ /*
+ * Smash the current notion of the gateway to
+ * this destination. Should check about netmask!!!
+ */
+ rt->rt_flags |= RTF_MODIFIED;
+ flags |= RTF_MODIFIED;
+ stat = &rtstat.rts_newgateway;
+ /*
+ * add the key and gateway (in one malloc'd chunk).
+ */
+ rt_setgate(rt, rt_key(rt), gateway);
+ }
+ } else
+ error = EHOSTUNREACH;
+done:
+ if (rt) {
+ if (rtp && !error)
+ *rtp = rt;
+ else
+ rtfree(rt);
+ }
+out:
+ if (error)
+ rtstat.rts_badredirect++;
+ else if (stat != NULL)
+ (*stat)++;
+ bzero((caddr_t)&info, sizeof(info));
+ info.rti_info[RTAX_DST] = dst;
+ info.rti_info[RTAX_GATEWAY] = gateway;
+ info.rti_info[RTAX_NETMASK] = netmask;
+ info.rti_info[RTAX_AUTHOR] = src;
+ rt_missmsg(RTM_REDIRECT, &info, flags, error);
+}
+
+/*
+* Routing table ioctl interface.
+*/
+int
+rtioctl(req, data, p)
+ int req;
+ caddr_t data;
+ struct proc *p;
+{
+#ifdef INET
+ /* Multicast goop, grrr... */
+#ifdef MROUTING
+ return mrt_ioctl(req, data);
+#else
+ return mrt_ioctl(req, data, p);
+#endif
+#else /* INET */
+ return ENXIO;
+#endif /* INET */
+}
+
+struct ifaddr *
+ifa_ifwithroute(flags, dst, gateway)
+ int flags;
+ struct sockaddr *dst, *gateway;
+{
+ register struct ifaddr *ifa;
+ if ((flags & RTF_GATEWAY) == 0) {
+ /*
+ * If we are adding a route to an interface,
+ * and the interface is a pt to pt link
+ * we should search for the destination
+ * as our clue to the interface. Otherwise
+ * we can use the local address.
+ */
+ ifa = 0;
+ if (flags & RTF_HOST) {
+ ifa = ifa_ifwithdstaddr(dst);
+ }
+ if (ifa == 0)
+ ifa = ifa_ifwithaddr(gateway);
+ } else {
+ /*
+ * If we are adding a route to a remote net
+ * or host, the gateway may still be on the
+ * other end of a pt to pt link.
+ */
+ ifa = ifa_ifwithdstaddr(gateway);
+ }
+ if (ifa == 0)
+ ifa = ifa_ifwithnet(gateway);
+ if (ifa == 0) {
+ struct rtentry *rt = rtalloc1(dst, 0, 0UL);
+ if (rt == 0)
+ return (0);
+ rt->rt_refcnt--;
+ if ((ifa = rt->rt_ifa) == 0)
+ return (0);
+ }
+ if (ifa->ifa_addr->sa_family != dst->sa_family) {
+ struct ifaddr *oifa = ifa;
+ ifa = ifaof_ifpforaddr(dst, ifa->ifa_ifp);
+ if (ifa == 0)
+ ifa = oifa;
+ }
+ return (ifa);
+}
+
+#define ROUNDUP(a) (a>0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long))
+
+static int rt_fixdelete __P((struct radix_node *, void *));
+static int rt_fixchange __P((struct radix_node *, void *));
+
+struct rtfc_arg {
+ struct rtentry *rt0;
+ struct radix_node_head *rnh;
+};
+
+/*
+ * Do appropriate manipulations of a routing tree given
+ * all the bits of info needed
+ */
+int
+rtrequest(req, dst, gateway, netmask, flags, ret_nrt)
+ int req, flags;
+ struct sockaddr *dst, *gateway, *netmask;
+ struct rtentry **ret_nrt;
+{
+ int s = splnet(); int error = 0;
+ register struct rtentry *rt;
+ register struct radix_node *rn;
+ register struct radix_node_head *rnh;
+ struct ifaddr *ifa;
+ struct sockaddr *ndst;
+#define senderr(x) { error = x ; goto bad; }
+
+ /*
+ * Find the correct routing tree to use for this Address Family
+ */
+ if ((rnh = rt_tables[dst->sa_family]) == 0)
+ senderr(ESRCH);
+ /*
+ * If we are adding a host route then we don't want to put
+ * a netmask in the tree
+ */
+ if (flags & RTF_HOST)
+ netmask = 0;
+ switch (req) {
+ case RTM_DELETE:
+ /*
+ * Remove the item from the tree and return it.
+ * Complain if it is not there and do no more processing.
+ */
+ if ((rn = rnh->rnh_deladdr(dst, netmask, rnh)) == 0)
+ senderr(ESRCH);
+ if (rn->rn_flags & (RNF_ACTIVE | RNF_ROOT))
+ panic ("rtrequest delete");
+ rt = (struct rtentry *)rn;
+
+ /*
+ * Now search what's left of the subtree for any cloned
+ * routes which might have been formed from this node.
+ */
+ if ((rt->rt_flags & RTF_PRCLONING) && netmask) {
+ rnh->rnh_walktree_from(rnh, dst, netmask,
+ rt_fixdelete, rt);
+ }
+
+ /*
+ * Remove any external references we may have.
+ * This might result in another rtentry being freed if
+ * we held its last reference.
+ */
+ if (rt->rt_gwroute) {
+ rt = rt->rt_gwroute;
+ RTFREE(rt);
+ (rt = (struct rtentry *)rn)->rt_gwroute = 0;
+ }
+
+ /*
+ * NB: RTF_UP must be set during the search above,
+ * because we might delete the last ref, causing
+ * rt to get freed prematurely.
+ * eh? then why not just add a reference?
+ * I'm not sure how RTF_UP helps matters. (JRE)
+ */
+ rt->rt_flags &= ~RTF_UP;
+
+ /*
+ * give the protocol a chance to keep things in sync.
+ */
+ if ((ifa = rt->rt_ifa) && ifa->ifa_rtrequest)
+ ifa->ifa_rtrequest(RTM_DELETE, rt, SA(0));
+
+ /*
+ * one more rtentry floating around that is not
+ * linked to the routing table.
+ */
+ rttrash++;
+
+ /*
+ * If the caller wants it, then it can have it,
+ * but it's up to it to free the rtentry as we won't be
+ * doing it.
+ */
+ if (ret_nrt)
+ *ret_nrt = rt;
+ else if (rt->rt_refcnt <= 0) {
+ rt->rt_refcnt++; /* make a 1->0 transition */
+ rtfree(rt);
+ }
+ break;
+
+ case RTM_RESOLVE:
+ if (ret_nrt == 0 || (rt = *ret_nrt) == 0)
+ senderr(EINVAL);
+ ifa = rt->rt_ifa;
+ flags = rt->rt_flags &
+ ~(RTF_CLONING | RTF_PRCLONING | RTF_STATIC);
+ flags |= RTF_WASCLONED;
+ gateway = rt->rt_gateway;
+ if ((netmask = rt->rt_genmask) == 0)
+ flags |= RTF_HOST;
+ goto makeroute;
+
+ case RTM_ADD:
+ if ((flags & RTF_GATEWAY) && !gateway)
+ panic("rtrequest: GATEWAY but no gateway");
+
+ if ((ifa = ifa_ifwithroute(flags, dst, gateway)) == 0)
+ senderr(ENETUNREACH);
+
+ makeroute:
+ R_Malloc(rt, struct rtentry *, sizeof(*rt));
+ if (rt == 0)
+ senderr(ENOBUFS);
+ Bzero(rt, sizeof(*rt));
+ rt->rt_flags = RTF_UP | flags;
+ /*
+ * Add the gateway. Possibly re-malloc-ing the storage for it
+ * also add the rt_gwroute if possible.
+ */
+ if ((error = rt_setgate(rt, dst, gateway)) != 0) {
+ Free(rt);
+ senderr(error);
+ }
+
+ /*
+ * point to the (possibly newly malloc'd) dest address.
+ */
+ ndst = rt_key(rt);
+
+ /*
+ * make sure it contains the value we want (masked if needed).
+ */
+ if (netmask) {
+ rt_maskedcopy(dst, ndst, netmask);
+ } else
+ Bcopy(dst, ndst, dst->sa_len);
+
+ /*
+ * Note that we now have a reference to the ifa.
+ * This moved from below so that rnh->rnh_addaddr() can
+ * examine the ifa and ifa->ifa_ifp if it so desires.
+ */
+ ifa->ifa_refcnt++;
+ rt->rt_ifa = ifa;
+ rt->rt_ifp = ifa->ifa_ifp;
+ /* XXX mtu manipulation will be done in rnh_addaddr -- itojun */
+
+ rn = rnh->rnh_addaddr((caddr_t)ndst, (caddr_t)netmask,
+ rnh, rt->rt_nodes);
+ if (rn == 0) {
+ struct rtentry *rt2;
+ /*
+ * Uh-oh, we already have one of these in the tree.
+ * We do a special hack: if the route that's already
+ * there was generated by the protocol-cloning
+ * mechanism, then we just blow it away and retry
+ * the insertion of the new one.
+ */
+ rt2 = rtalloc1(dst, 0, RTF_PRCLONING);
+ if (rt2 && rt2->rt_parent) {
+ rtrequest(RTM_DELETE,
+ (struct sockaddr *)rt_key(rt2),
+ rt2->rt_gateway,
+ rt_mask(rt2), rt2->rt_flags, 0);
+ RTFREE(rt2);
+ rn = rnh->rnh_addaddr((caddr_t)ndst,
+ (caddr_t)netmask,
+ rnh, rt->rt_nodes);
+ } else if (rt2) {
+ /* undo the extra ref we got */
+ RTFREE(rt2);
+ }
+ }
+
+ /*
+ * If it still failed to go into the tree,
+ * then un-make it (this should be a function)
+ */
+ if (rn == 0) {
+ if (rt->rt_gwroute)
+ rtfree(rt->rt_gwroute);
+ if (rt->rt_ifa) {
+ IFAFREE(rt->rt_ifa);
+ }
+ Free(rt_key(rt));
+ Free(rt);
+ senderr(EEXIST);
+ }
+
+ rt->rt_parent = 0;
+
+ /*
+ * If we got here from RESOLVE, then we are cloning
+ * so clone the rest, and note that we
+ * are a clone (and increment the parent's references)
+ */
+ if (req == RTM_RESOLVE) {
+ rt->rt_rmx = (*ret_nrt)->rt_rmx; /* copy metrics */
+ if ((*ret_nrt)->rt_flags & RTF_PRCLONING) {
+ rt->rt_parent = (*ret_nrt);
+ (*ret_nrt)->rt_refcnt++;
+ }
+ }
+
+ /*
+ * if this protocol has something to add to this then
+ * allow it to do that as well.
+ */
+ if (ifa->ifa_rtrequest)
+ ifa->ifa_rtrequest(req, rt, SA(ret_nrt ? *ret_nrt : 0));
+
+ /*
+ * We repeat the same procedure from rt_setgate() here because
+ * it doesn't fire when we call it there because the node
+ * hasn't been added to the tree yet.
+ */
+ if (!(rt->rt_flags & RTF_HOST) && rt_mask(rt) != 0) {
+ struct rtfc_arg arg;
+ arg.rnh = rnh;
+ arg.rt0 = rt;
+ rnh->rnh_walktree_from(rnh, rt_key(rt), rt_mask(rt),
+ rt_fixchange, &arg);
+ }
+
+ /*
+ * actually return a resultant rtentry and
+ * give the caller a single reference.
+ */
+ if (ret_nrt) {
+ *ret_nrt = rt;
+ rt->rt_refcnt++;
+ }
+ break;
+ }
+bad:
+ splx(s);
+ return (error);
+}
+
+/*
+ * Called from rtrequest(RTM_DELETE, ...) to fix up the route's ``family''
+ * (i.e., the routes related to it by the operation of cloning). This
+ * routine is iterated over all potential former-child-routes by way of
+ * rnh->rnh_walktree_from() above, and those that actually are children of
+ * the late parent (passed in as VP here) are themselves deleted.
+ */
+static int
+rt_fixdelete(rn, vp)
+ struct radix_node *rn;
+ void *vp;
+{
+ struct rtentry *rt = (struct rtentry *)rn;
+ struct rtentry *rt0 = vp;
+
+ if (rt->rt_parent == rt0 && !(rt->rt_flags & RTF_PINNED)) {
+ return rtrequest(RTM_DELETE, rt_key(rt),
+ (struct sockaddr *)0, rt_mask(rt),
+ rt->rt_flags, (struct rtentry **)0);
+ }
+ return 0;
+}
+
+/*
+ * This routine is called from rt_setgate() to do the analogous thing for
+ * adds and changes. There is the added complication in this case of a
+ * middle insert; i.e., insertion of a new network route between an older
+ * network route and (cloned) host routes. For this reason, a simple check
+ * of rt->rt_parent is insufficient; each candidate route must be tested
+ * against the (mask, value) of the new route (passed as before in vp)
+ * to see if the new route matches it. Unfortunately, this has the obnoxious
+ * property of also triggering for insertion /above/ a pre-existing network
+ * route and clones. Sigh. This may be fixed some day.
+ *
+ * XXX - it may be possible to do fixdelete() for changes and reserve this
+ * routine just for adds. I'm not sure why I thought it was necessary to do
+ * changes this way.
+ */
+#ifdef DEBUG
+static int rtfcdebug = 0;
+#endif
+
+static int
+rt_fixchange(rn, vp)
+ struct radix_node *rn;
+ void *vp;
+{
+ struct rtentry *rt = (struct rtentry *)rn;
+ struct rtfc_arg *ap = vp;
+ struct rtentry *rt0 = ap->rt0;
+ struct radix_node_head *rnh = ap->rnh;
+ u_char *xk1, *xm1, *xk2;
+ int i, len;
+
+#ifdef DEBUG
+ if (rtfcdebug)
+ printf("rt_fixchange: rt %p, rt0 %p\n", rt, rt0);
+#endif
+
+ if (!rt->rt_parent || (rt->rt_flags & RTF_PINNED)) {
+#ifdef DEBUG
+ if(rtfcdebug) printf("no parent or pinned\n");
+#endif
+ return 0;
+ }
+
+ if (rt->rt_parent == rt0) {
+#ifdef DEBUG
+ if(rtfcdebug) printf("parent match\n");
+#endif
+ return rtrequest(RTM_DELETE, rt_key(rt),
+ (struct sockaddr *)0, rt_mask(rt),
+ rt->rt_flags, (struct rtentry **)0);
+ }
+
+ /*
+ * There probably is a function somewhere which does this...
+ * if not, there should be.
+ */
+ len = imin(((struct sockaddr *)rt_key(rt0))->sa_len,
+ ((struct sockaddr *)rt_key(rt))->sa_len);
+
+ xk1 = (u_char *)rt_key(rt0);
+ xm1 = (u_char *)rt_mask(rt0);
+ xk2 = (u_char *)rt_key(rt);
+
+ for (i = rnh->rnh_treetop->rn_off; i < len; i++) {
+ if ((xk2[i] & xm1[i]) != xk1[i]) {
+#ifdef DEBUG
+ if(rtfcdebug) printf("no match\n");
+#endif
+ return 0;
+ }
+ }
+
+ /*
+ * OK, this node is a clone, and matches the node currently being
+ * changed/added under the node's mask. So, get rid of it.
+ */
+#ifdef DEBUG
+ if(rtfcdebug) printf("deleting\n");
+#endif
+ return rtrequest(RTM_DELETE, rt_key(rt), (struct sockaddr *)0,
+ rt_mask(rt), rt->rt_flags, (struct rtentry **)0);
+}
+
+int
+rt_setgate(rt0, dst, gate)
+ struct rtentry *rt0;
+ struct sockaddr *dst, *gate;
+{
+ caddr_t new, old;
+ int dlen = ROUNDUP(dst->sa_len), glen = ROUNDUP(gate->sa_len);
+ register struct rtentry *rt = rt0;
+ struct radix_node_head *rnh = rt_tables[dst->sa_family];
+
+ /*
+ * A host route with the destination equal to the gateway
+ * will interfere with keeping LLINFO in the routing
+ * table, so disallow it.
+ */
+ if (((rt0->rt_flags & (RTF_HOST|RTF_GATEWAY|RTF_LLINFO)) ==
+ (RTF_HOST|RTF_GATEWAY)) &&
+ (dst->sa_len == gate->sa_len) &&
+ (bcmp(dst, gate, dst->sa_len) == 0)) {
+ /*
+ * The route might already exist if this is an RTM_CHANGE
+ * or a routing redirect, so try to delete it.
+ */
+ if (rt_key(rt0))
+ rtrequest(RTM_DELETE, (struct sockaddr *)rt_key(rt0),
+ rt0->rt_gateway, rt_mask(rt0), rt0->rt_flags, 0);
+ return EADDRNOTAVAIL;
+ }
+
+ /*
+ * Both dst and gateway are stored in the same malloc'd chunk
+ * (If I ever get my hands on....)
+ * if we need to malloc a new chunk, then keep the old one around
+ * till we don't need it any more.
+ */
+ if (rt->rt_gateway == 0 || glen > ROUNDUP(rt->rt_gateway->sa_len)) {
+ old = (caddr_t)rt_key(rt);
+ R_Malloc(new, caddr_t, dlen + glen);
+ if (new == 0)
+ return ENOBUFS;
+ rt->rt_nodes->rn_key = new;
+ } else {
+ /*
+ * otherwise just overwrite the old one
+ */
+ new = rt->rt_nodes->rn_key;
+ old = 0;
+ }
+
+ /*
+ * copy the new gateway value into the memory chunk
+ */
+ Bcopy(gate, (rt->rt_gateway = (struct sockaddr *)(new + dlen)), glen);
+
+ /*
+ * if we are replacing the chunk (or it's new) we need to
+ * replace the dst as well
+ */
+ if (old) {
+ Bcopy(dst, new, dlen);
+ Free(old);
+ }
+
+ /*
+ * If there is already a gwroute, it's now almost definitly wrong
+ * so drop it.
+ */
+ if (rt->rt_gwroute) {
+ rt = rt->rt_gwroute; RTFREE(rt);
+ rt = rt0; rt->rt_gwroute = 0;
+ }
+ /*
+ * Cloning loop avoidance:
+ * In the presence of protocol-cloning and bad configuration,
+ * it is possible to get stuck in bottomless mutual recursion
+ * (rtrequest rt_setgate rtalloc1). We avoid this by not allowing
+ * protocol-cloning to operate for gateways (which is probably the
+ * correct choice anyway), and avoid the resulting reference loops
+ * by disallowing any route to run through itself as a gateway.
+ * This is obviously mandatory when we get rt->rt_output().
+ */
+ if (rt->rt_flags & RTF_GATEWAY) {
+ rt->rt_gwroute = rtalloc1(gate, 1, RTF_PRCLONING);
+ if (rt->rt_gwroute == rt) {
+ RTFREE(rt->rt_gwroute);
+ rt->rt_gwroute = 0;
+ return EDQUOT; /* failure */
+ }
+ }
+
+ /*
+ * This isn't going to do anything useful for host routes, so
+ * don't bother. Also make sure we have a reasonable mask
+ * (we don't yet have one during adds).
+ */
+ if (!(rt->rt_flags & RTF_HOST) && rt_mask(rt) != 0) {
+ struct rtfc_arg arg;
+ arg.rnh = rnh;
+ arg.rt0 = rt;
+ rnh->rnh_walktree_from(rnh, rt_key(rt), rt_mask(rt),
+ rt_fixchange, &arg);
+ }
+
+ return 0;
+}
+
+static void
+rt_maskedcopy(src, dst, netmask)
+ struct sockaddr *src, *dst, *netmask;
+{
+ register u_char *cp1 = (u_char *)src;
+ register u_char *cp2 = (u_char *)dst;
+ register u_char *cp3 = (u_char *)netmask;
+ u_char *cplim = cp2 + *cp3;
+ u_char *cplim2 = cp2 + *cp1;
+
+ *cp2++ = *cp1++; *cp2++ = *cp1++; /* copies sa_len & sa_family */
+ cp3 += 2;
+ if (cplim > cplim2)
+ cplim = cplim2;
+ while (cp2 < cplim)
+ *cp2++ = *cp1++ & *cp3++;
+ if (cp2 < cplim2)
+ bzero((caddr_t)cp2, (unsigned)(cplim2 - cp2));
+}
+
+/*
+ * Set up a routing table entry, normally
+ * for an interface.
+ */
+int
+rtinit(ifa, cmd, flags)
+ register struct ifaddr *ifa;
+ int cmd, flags;
+{
+ register struct rtentry *rt;
+ register struct sockaddr *dst;
+ register struct sockaddr *deldst;
+ struct mbuf *m = 0;
+ struct rtentry *nrt = 0;
+ int error;
+
+ dst = flags & RTF_HOST ? ifa->ifa_dstaddr : ifa->ifa_addr;
+ /*
+ * If it's a delete, check that if it exists, it's on the correct
+ * interface or we might scrub a route to another ifa which would
+ * be confusing at best and possibly worse.
+ */
+ if (cmd == RTM_DELETE) {
+ /*
+ * It's a delete, so it should already exist..
+ * If it's a net, mask off the host bits
+ * (Assuming we have a mask)
+ */
+ if ((flags & RTF_HOST) == 0 && ifa->ifa_netmask) {
+ m = m_get(M_DONTWAIT, MT_SONAME);
+ if (m == NULL)
+ return(ENOBUFS);
+ deldst = mtod(m, struct sockaddr *);
+ rt_maskedcopy(dst, deldst, ifa->ifa_netmask);
+ dst = deldst;
+ }
+ /*
+ * Get an rtentry that is in the routing tree and
+ * contains the correct info. (if this fails, can't get there).
+ * We set "report" to FALSE so that if it doesn't exist,
+ * it doesn't report an error or clone a route, etc. etc.
+ */
+ rt = rtalloc1(dst, 0, 0UL);
+ if (rt) {
+ /*
+ * Ok so we found the rtentry. it has an extra reference
+ * for us at this stage. we won't need that so
+ * lop that off now.
+ */
+ rt->rt_refcnt--;
+ if (rt->rt_ifa != ifa) {
+ /*
+ * If the interface in the rtentry doesn't match
+ * the interface we are using, then we don't
+ * want to delete it, so return an error.
+ * This seems to be the only point of
+ * this whole RTM_DELETE clause.
+ */
+ if (m)
+ (void) m_free(m);
+ return (flags & RTF_HOST ? EHOSTUNREACH
+ : ENETUNREACH);
+ }
+ }
+ /* XXX */
+#if 0
+ else {
+ /*
+ * One would think that as we are deleting, and we know
+ * it doesn't exist, we could just return at this point
+ * with an "ELSE" clause, but apparently not..
+ */
+ return (flags & RTF_HOST ? EHOSTUNREACH
+ : ENETUNREACH);
+ }
+#endif
+ }
+ /*
+ * Do the actual request
+ */
+ error = rtrequest(cmd, dst, ifa->ifa_addr, ifa->ifa_netmask,
+ flags | ifa->ifa_flags, &nrt);
+ if (m)
+ (void) m_free(m);
+ /*
+ * If we are deleting, and we found an entry, then
+ * it's been removed from the tree.. now throw it away.
+ */
+ if (cmd == RTM_DELETE && error == 0 && (rt = nrt)) {
+ /*
+ * notify any listenning routing agents of the change
+ */
+ rt_newaddrmsg(cmd, ifa, error, nrt);
+ if (rt->rt_refcnt <= 0) {
+ rt->rt_refcnt++; /* need a 1->0 transition to free */
+ rtfree(rt);
+ }
+ }
+
+ /*
+ * We are adding, and we have a returned routing entry.
+ * We need to sanity check the result.
+ */
+ if (cmd == RTM_ADD && error == 0 && (rt = nrt)) {
+ /*
+ * We just wanted to add it.. we don't actually need a reference
+ */
+ rt->rt_refcnt--;
+ /*
+ * If it came back with an unexpected interface, then it must
+ * have already existed or something. (XXX)
+ */
+ if (rt->rt_ifa != ifa) {
+ if (!(rt->rt_ifa->ifa_ifp->if_flags &
+ (IFF_POINTOPOINT|IFF_LOOPBACK)))
+ printf("rtinit: wrong ifa (%p) was (%p)\n",
+ ifa, rt->rt_ifa);
+ /*
+ * Ask that the protocol in question
+ * remove anything it has associated with
+ * this route and ifaddr.
+ */
+ if (rt->rt_ifa->ifa_rtrequest)
+ rt->rt_ifa->ifa_rtrequest(RTM_DELETE, rt, SA(0));
+ /*
+ * Remove the reference to its ifaddr.
+ */
+ IFAFREE(rt->rt_ifa);
+ /*
+ * And substitute in references to the ifaddr
+ * we are adding.
+ */
+ rt->rt_ifa = ifa;
+ rt->rt_ifp = ifa->ifa_ifp;
+ rt->rt_rmx.rmx_mtu = ifa->ifa_ifp->if_mtu; /*XXX*/
+ ifa->ifa_refcnt++;
+ /*
+ * Now ask the protocol to check if it needs
+ * any special processing in its new form.
+ */
+ if (ifa->ifa_rtrequest)
+ ifa->ifa_rtrequest(RTM_ADD, rt, SA(0));
+ }
+ /*
+ * notify any listenning routing agents of the change
+ */
+ rt_newaddrmsg(cmd, ifa, error, nrt);
+ }
+ return (error);
+}
+
+SYSINIT(route, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY, route_init, 0);
diff --git a/sys/net/route.h b/sys/net/route.h
new file mode 100644
index 0000000..255fbe3
--- /dev/null
+++ b/sys/net/route.h
@@ -0,0 +1,297 @@
+/*
+ * Copyright (c) 1980, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)route.h 8.3 (Berkeley) 4/19/94
+ * $FreeBSD$
+ */
+
+#ifndef _NET_ROUTE_H_
+#define _NET_ROUTE_H_
+
+/*
+ * Kernel resident routing tables.
+ *
+ * The routing tables are initialized when interface addresses
+ * are set by making entries for all directly connected interfaces.
+ */
+
+/*
+ * A route consists of a destination address and a reference
+ * to a routing entry. These are often held by protocols
+ * in their control blocks, e.g. inpcb.
+ */
+struct route {
+ struct rtentry *ro_rt;
+ struct sockaddr ro_dst;
+};
+
+/*
+ * These numbers are used by reliable protocols for determining
+ * retransmission behavior and are included in the routing structure.
+ */
+struct rt_metrics {
+ u_long rmx_locks; /* Kernel must leave these values alone */
+ u_long rmx_mtu; /* MTU for this path */
+ u_long rmx_hopcount; /* max hops expected */
+ u_long rmx_expire; /* lifetime for route, e.g. redirect */
+ u_long rmx_recvpipe; /* inbound delay-bandwidth product */
+ u_long rmx_sendpipe; /* outbound delay-bandwidth product */
+ u_long rmx_ssthresh; /* outbound gateway buffer limit */
+ u_long rmx_rtt; /* estimated round trip time */
+ u_long rmx_rttvar; /* estimated rtt variance */
+ u_long rmx_pksent; /* packets sent using this route */
+ u_long rmx_filler[4]; /* will be used for T/TCP later */
+};
+
+/*
+ * rmx_rtt and rmx_rttvar are stored as microseconds;
+ * RTTTOPRHZ(rtt) converts to a value suitable for use
+ * by a protocol slowtimo counter.
+ */
+#define RTM_RTTUNIT 1000000 /* units for rtt, rttvar, as units per sec */
+#define RTTTOPRHZ(r) ((r) / (RTM_RTTUNIT / PR_SLOWHZ))
+
+/*
+ * XXX kernel function pointer `rt_output' is visible to applications.
+ */
+struct mbuf;
+
+/*
+ * We distinguish between routes to hosts and routes to networks,
+ * preferring the former if available. For each route we infer
+ * the interface to use from the gateway address supplied when
+ * the route was entered. Routes that forward packets through
+ * gateways are marked so that the output routines know to address the
+ * gateway rather than the ultimate destination.
+ */
+#ifndef RNF_NORMAL
+#include <net/radix.h>
+#endif
+struct rtentry {
+ struct radix_node rt_nodes[2]; /* tree glue, and other values */
+#define rt_key(r) ((struct sockaddr *)((r)->rt_nodes->rn_key))
+#define rt_mask(r) ((struct sockaddr *)((r)->rt_nodes->rn_mask))
+ struct sockaddr *rt_gateway; /* value */
+ long rt_refcnt; /* # held references */
+ u_long rt_flags; /* up/down?, host/net */
+ struct ifnet *rt_ifp; /* the answer: interface to use */
+ struct ifaddr *rt_ifa; /* the answer: interface to use */
+ struct sockaddr *rt_genmask; /* for generation of cloned routes */
+ caddr_t rt_llinfo; /* pointer to link level info cache */
+ struct rt_metrics rt_rmx; /* metrics used by rx'ing protocols */
+ struct rtentry *rt_gwroute; /* implied entry for gatewayed routes */
+ int (*rt_output) __P((struct ifnet *, struct mbuf *,
+ struct sockaddr *, struct rtentry *));
+ /* output routine for this (rt,if) */
+ struct rtentry *rt_parent; /* cloning parent of this route */
+ void *rt_filler2; /* more filler */
+};
+
+/*
+ * Following structure necessary for 4.3 compatibility;
+ * We should eventually move it to a compat file.
+ */
+struct ortentry {
+ u_long rt_hash; /* to speed lookups */
+ struct sockaddr rt_dst; /* key */
+ struct sockaddr rt_gateway; /* value */
+ short rt_flags; /* up/down?, host/net */
+ short rt_refcnt; /* # held references */
+ u_long rt_use; /* raw # packets forwarded */
+ struct ifnet *rt_ifp; /* the answer: interface to use */
+};
+
+#define rt_use rt_rmx.rmx_pksent
+
+#define RTF_UP 0x1 /* route usable */
+#define RTF_GATEWAY 0x2 /* destination is a gateway */
+#define RTF_HOST 0x4 /* host entry (net otherwise) */
+#define RTF_REJECT 0x8 /* host or net unreachable */
+#define RTF_DYNAMIC 0x10 /* created dynamically (by redirect) */
+#define RTF_MODIFIED 0x20 /* modified dynamically (by redirect) */
+#define RTF_DONE 0x40 /* message confirmed */
+/* 0x80 unused */
+#define RTF_CLONING 0x100 /* generate new routes on use */
+#define RTF_XRESOLVE 0x200 /* external daemon resolves name */
+#define RTF_LLINFO 0x400 /* generated by link layer (e.g. ARP) */
+#define RTF_STATIC 0x800 /* manually added */
+#define RTF_BLACKHOLE 0x1000 /* just discard pkts (during updates) */
+#define RTF_PROTO2 0x4000 /* protocol specific routing flag */
+#define RTF_PROTO1 0x8000 /* protocol specific routing flag */
+
+#define RTF_PRCLONING 0x10000 /* protocol requires cloning */
+#define RTF_WASCLONED 0x20000 /* route generated through cloning */
+#define RTF_PROTO3 0x40000 /* protocol specific routing flag */
+/* 0x80000 unused */
+#define RTF_PINNED 0x100000 /* future use */
+#define RTF_LOCAL 0x200000 /* route represents a local address */
+#define RTF_BROADCAST 0x400000 /* route represents a bcast address */
+#define RTF_MULTICAST 0x800000 /* route represents a mcast address */
+ /* 0x1000000 and up unassigned */
+
+/*
+ * Routing statistics.
+ */
+struct rtstat {
+ short rts_badredirect; /* bogus redirect calls */
+ short rts_dynamic; /* routes created by redirects */
+ short rts_newgateway; /* routes modified by redirects */
+ short rts_unreach; /* lookups which failed */
+ short rts_wildcard; /* lookups satisfied by a wildcard */
+};
+/*
+ * Structures for routing messages.
+ */
+struct rt_msghdr {
+ u_short rtm_msglen; /* to skip over non-understood messages */
+ u_char rtm_version; /* future binary compatibility */
+ u_char rtm_type; /* message type */
+ u_short rtm_index; /* index for associated ifp */
+ int rtm_flags; /* flags, incl. kern & message, e.g. DONE */
+ int rtm_addrs; /* bitmask identifying sockaddrs in msg */
+ pid_t rtm_pid; /* identify sender */
+ int rtm_seq; /* for sender to identify action */
+ int rtm_errno; /* why failed */
+ int rtm_use; /* from rtentry */
+ u_long rtm_inits; /* which metrics we are initializing */
+ struct rt_metrics rtm_rmx; /* metrics themselves */
+};
+
+#define RTM_VERSION 5 /* Up the ante and ignore older versions */
+
+/*
+ * Message types.
+ */
+#define RTM_ADD 0x1 /* Add Route */
+#define RTM_DELETE 0x2 /* Delete Route */
+#define RTM_CHANGE 0x3 /* Change Metrics or flags */
+#define RTM_GET 0x4 /* Report Metrics */
+#define RTM_LOSING 0x5 /* Kernel Suspects Partitioning */
+#define RTM_REDIRECT 0x6 /* Told to use different route */
+#define RTM_MISS 0x7 /* Lookup failed on this address */
+#define RTM_LOCK 0x8 /* fix specified metrics */
+#define RTM_OLDADD 0x9 /* caused by SIOCADDRT */
+#define RTM_OLDDEL 0xa /* caused by SIOCDELRT */
+#define RTM_RESOLVE 0xb /* req to resolve dst to LL addr */
+#define RTM_NEWADDR 0xc /* address being added to iface */
+#define RTM_DELADDR 0xd /* address being removed from iface */
+#define RTM_IFINFO 0xe /* iface going up/down etc. */
+#define RTM_NEWMADDR 0xf /* mcast group membership being added to if */
+#define RTM_DELMADDR 0x10 /* mcast group membership being deleted */
+
+/*
+ * Bitmask values for rtm_inits and rmx_locks.
+ */
+#define RTV_MTU 0x1 /* init or lock _mtu */
+#define RTV_HOPCOUNT 0x2 /* init or lock _hopcount */
+#define RTV_EXPIRE 0x4 /* init or lock _expire */
+#define RTV_RPIPE 0x8 /* init or lock _recvpipe */
+#define RTV_SPIPE 0x10 /* init or lock _sendpipe */
+#define RTV_SSTHRESH 0x20 /* init or lock _ssthresh */
+#define RTV_RTT 0x40 /* init or lock _rtt */
+#define RTV_RTTVAR 0x80 /* init or lock _rttvar */
+
+/*
+ * Bitmask values for rtm_addrs.
+ */
+#define RTA_DST 0x1 /* destination sockaddr present */
+#define RTA_GATEWAY 0x2 /* gateway sockaddr present */
+#define RTA_NETMASK 0x4 /* netmask sockaddr present */
+#define RTA_GENMASK 0x8 /* cloning mask sockaddr present */
+#define RTA_IFP 0x10 /* interface name sockaddr present */
+#define RTA_IFA 0x20 /* interface addr sockaddr present */
+#define RTA_AUTHOR 0x40 /* sockaddr for author of redirect */
+#define RTA_BRD 0x80 /* for NEWADDR, broadcast or p-p dest addr */
+
+/*
+ * Index offsets for sockaddr array for alternate internal encoding.
+ */
+#define RTAX_DST 0 /* destination sockaddr present */
+#define RTAX_GATEWAY 1 /* gateway sockaddr present */
+#define RTAX_NETMASK 2 /* netmask sockaddr present */
+#define RTAX_GENMASK 3 /* cloning mask sockaddr present */
+#define RTAX_IFP 4 /* interface name sockaddr present */
+#define RTAX_IFA 5 /* interface addr sockaddr present */
+#define RTAX_AUTHOR 6 /* sockaddr for author of redirect */
+#define RTAX_BRD 7 /* for NEWADDR, broadcast or p-p dest addr */
+#define RTAX_MAX 8 /* size of array to allocate */
+
+struct rt_addrinfo {
+ int rti_addrs;
+ struct sockaddr *rti_info[RTAX_MAX];
+};
+
+struct route_cb {
+ int ip_count;
+ int ip6_count;
+ int ipx_count;
+ int ns_count;
+ int iso_count;
+ int any_count;
+};
+
+#ifdef KERNEL
+#define RTFREE(rt) \
+ do { \
+ if ((rt)->rt_refcnt <= 1) \
+ rtfree(rt); \
+ else \
+ (rt)->rt_refcnt--; \
+ } while (0)
+
+extern struct route_cb route_cb;
+extern struct radix_node_head *rt_tables[AF_MAX+1];
+
+struct ifmultiaddr;
+struct proc;
+
+void route_init __P((void));
+void rt_ifmsg __P((struct ifnet *));
+void rt_missmsg __P((int, struct rt_addrinfo *, int, int));
+void rt_newaddrmsg __P((int, struct ifaddr *, int, struct rtentry *));
+void rt_newmaddrmsg __P((int, struct ifmultiaddr *));
+int rt_setgate __P((struct rtentry *,
+ struct sockaddr *, struct sockaddr *));
+void rtalloc __P((struct route *));
+void rtalloc_ign __P((struct route *, u_long));
+struct rtentry *
+ rtalloc1 __P((struct sockaddr *, int, u_long));
+void rtfree __P((struct rtentry *));
+int rtinit __P((struct ifaddr *, int, int));
+int rtioctl __P((int, caddr_t, struct proc *));
+void rtredirect __P((struct sockaddr *, struct sockaddr *,
+ struct sockaddr *, int, struct sockaddr *, struct rtentry **));
+int rtrequest __P((int, struct sockaddr *,
+ struct sockaddr *, struct sockaddr *, int, struct rtentry **));
+#endif
+
+#endif
diff --git a/sys/net/rtsock.c b/sys/net/rtsock.c
new file mode 100644
index 0000000..0c02e78
--- /dev/null
+++ b/sys/net/rtsock.c
@@ -0,0 +1,1002 @@
+/*
+ * Copyright (c) 1988, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)rtsock.c 8.5 (Berkeley) 11/2/94
+ * $FreeBSD$
+ */
+
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/sysctl.h>
+#include <sys/proc.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/domain.h>
+#include <sys/protosw.h>
+
+#include <net/if.h>
+#include <net/route.h>
+#include <net/raw_cb.h>
+
+MALLOC_DEFINE(M_RTABLE, "routetbl", "routing tables");
+
+static struct sockaddr route_dst = { 2, PF_ROUTE, };
+static struct sockaddr route_src = { 2, PF_ROUTE, };
+static struct sockaddr sa_zero = { sizeof(sa_zero), AF_INET, };
+static struct sockproto route_proto = { PF_ROUTE, };
+
+struct walkarg {
+ int w_tmemsize;
+ int w_op, w_arg;
+ caddr_t w_tmem;
+ struct sysctl_req *w_req;
+};
+
+static struct mbuf *
+ rt_msg1 __P((int, struct rt_addrinfo *));
+static int rt_msg2 __P((int,
+ struct rt_addrinfo *, caddr_t, struct walkarg *));
+static int rt_xaddrs __P((caddr_t, caddr_t, struct rt_addrinfo *));
+static int sysctl_dumpentry __P((struct radix_node *rn, void *vw));
+static int sysctl_iflist __P((int af, struct walkarg *w));
+static int route_output __P((struct mbuf *, struct socket *));
+static void rt_setmetrics __P((u_long, struct rt_metrics *, struct rt_metrics *));
+
+/* Sleazy use of local variables throughout file, warning!!!! */
+#define dst info.rti_info[RTAX_DST]
+#define gate info.rti_info[RTAX_GATEWAY]
+#define netmask info.rti_info[RTAX_NETMASK]
+#define genmask info.rti_info[RTAX_GENMASK]
+#define ifpaddr info.rti_info[RTAX_IFP]
+#define ifaaddr info.rti_info[RTAX_IFA]
+#define brdaddr info.rti_info[RTAX_BRD]
+
+/*
+ * It really doesn't make any sense at all for this code to share much
+ * with raw_usrreq.c, since its functionality is so restricted. XXX
+ */
+static int
+rts_abort(struct socket *so)
+{
+ int s, error;
+ s = splnet();
+ error = raw_usrreqs.pru_abort(so);
+ splx(s);
+ return error;
+}
+
+/* pru_accept is EOPNOTSUPP */
+
+static int
+rts_attach(struct socket *so, int proto, struct proc *p)
+{
+ struct rawcb *rp;
+ int s, error;
+
+ if (sotorawcb(so) != 0)
+ return EISCONN; /* XXX panic? */
+ MALLOC(rp, struct rawcb *, sizeof *rp, M_PCB, M_WAITOK); /* XXX */
+ if (rp == 0)
+ return ENOBUFS;
+ bzero(rp, sizeof *rp);
+
+ /*
+ * The splnet() is necessary to block protocols from sending
+ * error notifications (like RTM_REDIRECT or RTM_LOSING) while
+ * this PCB is extant but incompletely initialized.
+ * Probably we should try to do more of this work beforehand and
+ * eliminate the spl.
+ */
+ s = splnet();
+ so->so_pcb = (caddr_t)rp;
+ error = raw_usrreqs.pru_attach(so, proto, p);
+ rp = sotorawcb(so);
+ if (error) {
+ splx(s);
+ free(rp, M_PCB);
+ return error;
+ }
+ switch(rp->rcb_proto.sp_protocol) {
+ case AF_INET:
+ route_cb.ip_count++;
+ break;
+ case AF_IPX:
+ route_cb.ipx_count++;
+ break;
+ case AF_NS:
+ route_cb.ns_count++;
+ break;
+ case AF_ISO:
+ route_cb.iso_count++;
+ break;
+ }
+ rp->rcb_faddr = &route_src;
+ route_cb.any_count++;
+ soisconnected(so);
+ so->so_options |= SO_USELOOPBACK;
+ splx(s);
+ return 0;
+}
+
+static int
+rts_bind(struct socket *so, struct sockaddr *nam, struct proc *p)
+{
+ int s, error;
+ s = splnet();
+ error = raw_usrreqs.pru_bind(so, nam, p); /* xxx just EINVAL */
+ splx(s);
+ return error;
+}
+
+static int
+rts_connect(struct socket *so, struct sockaddr *nam, struct proc *p)
+{
+ int s, error;
+ s = splnet();
+ error = raw_usrreqs.pru_connect(so, nam, p); /* XXX just EINVAL */
+ splx(s);
+ return error;
+}
+
+/* pru_connect2 is EOPNOTSUPP */
+/* pru_control is EOPNOTSUPP */
+
+static int
+rts_detach(struct socket *so)
+{
+ struct rawcb *rp = sotorawcb(so);
+ int s, error;
+
+ s = splnet();
+ if (rp != 0) {
+ switch(rp->rcb_proto.sp_protocol) {
+ case AF_INET:
+ route_cb.ip_count--;
+ break;
+ case AF_IPX:
+ route_cb.ipx_count--;
+ break;
+ case AF_NS:
+ route_cb.ns_count--;
+ break;
+ case AF_ISO:
+ route_cb.iso_count--;
+ break;
+ }
+ route_cb.any_count--;
+ }
+ error = raw_usrreqs.pru_detach(so);
+ splx(s);
+ return error;
+}
+
+static int
+rts_disconnect(struct socket *so)
+{
+ int s, error;
+ s = splnet();
+ error = raw_usrreqs.pru_disconnect(so);
+ splx(s);
+ return error;
+}
+
+/* pru_listen is EOPNOTSUPP */
+
+static int
+rts_peeraddr(struct socket *so, struct sockaddr **nam)
+{
+ int s, error;
+ s = splnet();
+ error = raw_usrreqs.pru_peeraddr(so, nam);
+ splx(s);
+ return error;
+}
+
+/* pru_rcvd is EOPNOTSUPP */
+/* pru_rcvoob is EOPNOTSUPP */
+
+static int
+rts_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
+ struct mbuf *control, struct proc *p)
+{
+ int s, error;
+ s = splnet();
+ error = raw_usrreqs.pru_send(so, flags, m, nam, control, p);
+ splx(s);
+ return error;
+}
+
+/* pru_sense is null */
+
+static int
+rts_shutdown(struct socket *so)
+{
+ int s, error;
+ s = splnet();
+ error = raw_usrreqs.pru_shutdown(so);
+ splx(s);
+ return error;
+}
+
+static int
+rts_sockaddr(struct socket *so, struct sockaddr **nam)
+{
+ int s, error;
+ s = splnet();
+ error = raw_usrreqs.pru_sockaddr(so, nam);
+ splx(s);
+ return error;
+}
+
+static struct pr_usrreqs route_usrreqs = {
+ rts_abort, pru_accept_notsupp, rts_attach, rts_bind, rts_connect,
+ pru_connect2_notsupp, pru_control_notsupp, rts_detach, rts_disconnect,
+ pru_listen_notsupp, rts_peeraddr, pru_rcvd_notsupp, pru_rcvoob_notsupp,
+ rts_send, pru_sense_null, rts_shutdown, rts_sockaddr,
+ sosend, soreceive, sopoll
+};
+
+/*ARGSUSED*/
+static int
+route_output(m, so)
+ register struct mbuf *m;
+ struct socket *so;
+{
+ register struct rt_msghdr *rtm = 0;
+ register struct rtentry *rt = 0;
+ struct rtentry *saved_nrt = 0;
+ struct radix_node_head *rnh;
+ struct rt_addrinfo info;
+ int len, error = 0;
+ struct ifnet *ifp = 0;
+ struct ifaddr *ifa = 0;
+
+#define senderr(e) { error = e; goto flush;}
+ if (m == 0 || ((m->m_len < sizeof(long)) &&
+ (m = m_pullup(m, sizeof(long))) == 0))
+ return (ENOBUFS);
+ if ((m->m_flags & M_PKTHDR) == 0)
+ panic("route_output");
+ len = m->m_pkthdr.len;
+ if (len < sizeof(*rtm) ||
+ len != mtod(m, struct rt_msghdr *)->rtm_msglen) {
+ dst = 0;
+ senderr(EINVAL);
+ }
+ R_Malloc(rtm, struct rt_msghdr *, len);
+ if (rtm == 0) {
+ dst = 0;
+ senderr(ENOBUFS);
+ }
+ m_copydata(m, 0, len, (caddr_t)rtm);
+ if (rtm->rtm_version != RTM_VERSION) {
+ dst = 0;
+ senderr(EPROTONOSUPPORT);
+ }
+ rtm->rtm_pid = curproc->p_pid;
+ info.rti_addrs = rtm->rtm_addrs;
+ if (rt_xaddrs((caddr_t)(rtm + 1), len + (caddr_t)rtm, &info)) {
+ dst = 0;
+ senderr(EINVAL);
+ }
+ if (dst == 0 || (dst->sa_family >= AF_MAX)
+ || (gate != 0 && (gate->sa_family >= AF_MAX)))
+ senderr(EINVAL);
+ if (genmask) {
+ struct radix_node *t;
+ t = rn_addmask((caddr_t)genmask, 0, 1);
+ if (t && Bcmp(genmask, t->rn_key, *(u_char *)genmask) == 0)
+ genmask = (struct sockaddr *)(t->rn_key);
+ else
+ senderr(ENOBUFS);
+ }
+ switch (rtm->rtm_type) {
+
+ case RTM_ADD:
+ if (gate == 0)
+ senderr(EINVAL);
+ error = rtrequest(RTM_ADD, dst, gate, netmask,
+ rtm->rtm_flags, &saved_nrt);
+ if (error == 0 && saved_nrt) {
+ rt_setmetrics(rtm->rtm_inits,
+ &rtm->rtm_rmx, &saved_nrt->rt_rmx);
+ saved_nrt->rt_rmx.rmx_locks &= ~(rtm->rtm_inits);
+ saved_nrt->rt_rmx.rmx_locks |=
+ (rtm->rtm_inits & rtm->rtm_rmx.rmx_locks);
+ saved_nrt->rt_refcnt--;
+ saved_nrt->rt_genmask = genmask;
+ }
+ break;
+
+ case RTM_DELETE:
+ error = rtrequest(RTM_DELETE, dst, gate, netmask,
+ rtm->rtm_flags, &saved_nrt);
+ if (error == 0) {
+ if ((rt = saved_nrt))
+ rt->rt_refcnt++;
+ goto report;
+ }
+ break;
+
+ case RTM_GET:
+ case RTM_CHANGE:
+ case RTM_LOCK:
+ if ((rnh = rt_tables[dst->sa_family]) == 0) {
+ senderr(EAFNOSUPPORT);
+ } else if ((rt = (struct rtentry *)
+ rnh->rnh_lookup(dst, netmask, rnh)) != NULL)
+ rt->rt_refcnt++;
+ else
+ senderr(ESRCH);
+ switch(rtm->rtm_type) {
+
+ case RTM_GET:
+ report:
+ dst = rt_key(rt);
+ gate = rt->rt_gateway;
+ netmask = rt_mask(rt);
+ genmask = rt->rt_genmask;
+ if (rtm->rtm_addrs & (RTA_IFP | RTA_IFA)) {
+ ifp = rt->rt_ifp;
+ if (ifp) {
+ ifpaddr = ifp->if_addrhead.tqh_first->ifa_addr;
+ ifaaddr = rt->rt_ifa->ifa_addr;
+ rtm->rtm_index = ifp->if_index;
+ } else {
+ ifpaddr = 0;
+ ifaaddr = 0;
+ }
+ }
+ len = rt_msg2(rtm->rtm_type, &info, (caddr_t)0,
+ (struct walkarg *)0);
+ if (len > rtm->rtm_msglen) {
+ struct rt_msghdr *new_rtm;
+ R_Malloc(new_rtm, struct rt_msghdr *, len);
+ if (new_rtm == 0)
+ senderr(ENOBUFS);
+ Bcopy(rtm, new_rtm, rtm->rtm_msglen);
+ Free(rtm); rtm = new_rtm;
+ }
+ (void)rt_msg2(rtm->rtm_type, &info, (caddr_t)rtm,
+ (struct walkarg *)0);
+ rtm->rtm_flags = rt->rt_flags;
+ rtm->rtm_rmx = rt->rt_rmx;
+ rtm->rtm_addrs = info.rti_addrs;
+ break;
+
+ case RTM_CHANGE:
+ if (gate && (error = rt_setgate(rt, rt_key(rt), gate)))
+ senderr(error);
+
+ /*
+ * If they tried to change things but didn't specify
+ * the required gateway, then just use the old one.
+ * This can happen if the user tries to change the
+ * flags on the default route without changing the
+ * default gateway. Changing flags still doesn't work.
+ */
+ if ((rt->rt_flags & RTF_GATEWAY) && !gate)
+ gate = rt->rt_gateway;
+
+ /* new gateway could require new ifaddr, ifp;
+ flags may also be different; ifp may be specified
+ by ll sockaddr when protocol address is ambiguous */
+ if (ifpaddr && (ifa = ifa_ifwithnet(ifpaddr)) &&
+ (ifp = ifa->ifa_ifp) && (ifaaddr || gate))
+ ifa = ifaof_ifpforaddr(ifaaddr ? ifaaddr : gate,
+ ifp);
+ else if ((ifaaddr && (ifa = ifa_ifwithaddr(ifaaddr))) ||
+ (gate && (ifa = ifa_ifwithroute(rt->rt_flags,
+ rt_key(rt), gate))))
+ ifp = ifa->ifa_ifp;
+ if (ifa) {
+ register struct ifaddr *oifa = rt->rt_ifa;
+ if (oifa != ifa) {
+ if (oifa && oifa->ifa_rtrequest)
+ oifa->ifa_rtrequest(RTM_DELETE,
+ rt, gate);
+ IFAFREE(rt->rt_ifa);
+ rt->rt_ifa = ifa;
+ ifa->ifa_refcnt++;
+ rt->rt_ifp = ifp;
+ }
+ }
+ rt_setmetrics(rtm->rtm_inits, &rtm->rtm_rmx,
+ &rt->rt_rmx);
+ if (rt->rt_ifa && rt->rt_ifa->ifa_rtrequest)
+ rt->rt_ifa->ifa_rtrequest(RTM_ADD, rt, gate);
+ if (genmask)
+ rt->rt_genmask = genmask;
+ /*
+ * Fall into
+ */
+ case RTM_LOCK:
+ rt->rt_rmx.rmx_locks &= ~(rtm->rtm_inits);
+ rt->rt_rmx.rmx_locks |=
+ (rtm->rtm_inits & rtm->rtm_rmx.rmx_locks);
+ break;
+ }
+ break;
+
+ default:
+ senderr(EOPNOTSUPP);
+ }
+
+flush:
+ if (rtm) {
+ if (error)
+ rtm->rtm_errno = error;
+ else
+ rtm->rtm_flags |= RTF_DONE;
+ }
+ if (rt)
+ rtfree(rt);
+ {
+ register struct rawcb *rp = 0;
+ /*
+ * Check to see if we don't want our own messages.
+ */
+ if ((so->so_options & SO_USELOOPBACK) == 0) {
+ if (route_cb.any_count <= 1) {
+ if (rtm)
+ Free(rtm);
+ m_freem(m);
+ return (error);
+ }
+ /* There is another listener, so construct message */
+ rp = sotorawcb(so);
+ }
+ if (rtm) {
+ m_copyback(m, 0, rtm->rtm_msglen, (caddr_t)rtm);
+ Free(rtm);
+ }
+ if (rp)
+ rp->rcb_proto.sp_family = 0; /* Avoid us */
+ if (dst)
+ route_proto.sp_protocol = dst->sa_family;
+ raw_input(m, &route_proto, &route_src, &route_dst);
+ if (rp)
+ rp->rcb_proto.sp_family = PF_ROUTE;
+ }
+ return (error);
+}
+
+static void
+rt_setmetrics(which, in, out)
+ u_long which;
+ register struct rt_metrics *in, *out;
+{
+#define metric(f, e) if (which & (f)) out->e = in->e;
+ metric(RTV_RPIPE, rmx_recvpipe);
+ metric(RTV_SPIPE, rmx_sendpipe);
+ metric(RTV_SSTHRESH, rmx_ssthresh);
+ metric(RTV_RTT, rmx_rtt);
+ metric(RTV_RTTVAR, rmx_rttvar);
+ metric(RTV_HOPCOUNT, rmx_hopcount);
+ metric(RTV_MTU, rmx_mtu);
+ metric(RTV_EXPIRE, rmx_expire);
+#undef metric
+}
+
+#define ROUNDUP(a) \
+ ((a) > 0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long))
+#define ADVANCE(x, n) (x += ROUNDUP((n)->sa_len))
+
+
+/*
+ * Extract the addresses of the passed sockaddrs.
+ * Do a little sanity checking so as to avoid bad memory references.
+ * This data is derived straight from userland.
+ */
+static int
+rt_xaddrs(cp, cplim, rtinfo)
+ register caddr_t cp, cplim;
+ register struct rt_addrinfo *rtinfo;
+{
+ register struct sockaddr *sa;
+ register int i;
+
+ bzero(rtinfo->rti_info, sizeof(rtinfo->rti_info));
+ for (i = 0; (i < RTAX_MAX) && (cp < cplim); i++) {
+ if ((rtinfo->rti_addrs & (1 << i)) == 0)
+ continue;
+ sa = (struct sockaddr *)cp;
+ /*
+ * It won't fit.
+ */
+ if ( (cp + sa->sa_len) > cplim ) {
+ return (EINVAL);
+ }
+
+ /*
+ * there are no more.. quit now
+ * If there are more bits, they are in error.
+ * I've seen this. route(1) can evidently generate these.
+ * This causes kernel to core dump.
+ * for compatibility, If we see this, point to a safe address.
+ */
+ if (sa->sa_len == 0) {
+ rtinfo->rti_info[i] = &sa_zero;
+ return (0); /* should be EINVAL but for compat */
+ }
+
+ /* accept it */
+ rtinfo->rti_info[i] = sa;
+ ADVANCE(cp, sa);
+ }
+ return (0);
+}
+
+static struct mbuf *
+rt_msg1(type, rtinfo)
+ int type;
+ register struct rt_addrinfo *rtinfo;
+{
+ register struct rt_msghdr *rtm;
+ register struct mbuf *m;
+ register int i;
+ register struct sockaddr *sa;
+ int len, dlen;
+
+ m = m_gethdr(M_DONTWAIT, MT_DATA);
+ if (m == 0)
+ return (m);
+ switch (type) {
+
+ case RTM_DELADDR:
+ case RTM_NEWADDR:
+ len = sizeof(struct ifa_msghdr);
+ break;
+
+ case RTM_DELMADDR:
+ case RTM_NEWMADDR:
+ len = sizeof(struct ifma_msghdr);
+ break;
+
+ case RTM_IFINFO:
+ len = sizeof(struct if_msghdr);
+ break;
+
+ default:
+ len = sizeof(struct rt_msghdr);
+ }
+ if (len > MHLEN)
+ panic("rt_msg1");
+ m->m_pkthdr.len = m->m_len = len;
+ m->m_pkthdr.rcvif = 0;
+ rtm = mtod(m, struct rt_msghdr *);
+ bzero((caddr_t)rtm, len);
+ for (i = 0; i < RTAX_MAX; i++) {
+ if ((sa = rtinfo->rti_info[i]) == NULL)
+ continue;
+ rtinfo->rti_addrs |= (1 << i);
+ dlen = ROUNDUP(sa->sa_len);
+ m_copyback(m, len, dlen, (caddr_t)sa);
+ len += dlen;
+ }
+ if (m->m_pkthdr.len != len) {
+ m_freem(m);
+ return (NULL);
+ }
+ rtm->rtm_msglen = len;
+ rtm->rtm_version = RTM_VERSION;
+ rtm->rtm_type = type;
+ return (m);
+}
+
+static int
+rt_msg2(type, rtinfo, cp, w)
+ int type;
+ register struct rt_addrinfo *rtinfo;
+ caddr_t cp;
+ struct walkarg *w;
+{
+ register int i;
+ int len, dlen, second_time = 0;
+ caddr_t cp0;
+
+ rtinfo->rti_addrs = 0;
+again:
+ switch (type) {
+
+ case RTM_DELADDR:
+ case RTM_NEWADDR:
+ len = sizeof(struct ifa_msghdr);
+ break;
+
+ case RTM_IFINFO:
+ len = sizeof(struct if_msghdr);
+ break;
+
+ default:
+ len = sizeof(struct rt_msghdr);
+ }
+ cp0 = cp;
+ if (cp0)
+ cp += len;
+ for (i = 0; i < RTAX_MAX; i++) {
+ register struct sockaddr *sa;
+
+ if ((sa = rtinfo->rti_info[i]) == 0)
+ continue;
+ rtinfo->rti_addrs |= (1 << i);
+ dlen = ROUNDUP(sa->sa_len);
+ if (cp) {
+ bcopy((caddr_t)sa, cp, (unsigned)dlen);
+ cp += dlen;
+ }
+ len += dlen;
+ }
+ if (cp == 0 && w != NULL && !second_time) {
+ register struct walkarg *rw = w;
+
+ if (rw->w_req) {
+ if (rw->w_tmemsize < len) {
+ if (rw->w_tmem)
+ free(rw->w_tmem, M_RTABLE);
+ rw->w_tmem = (caddr_t)
+ malloc(len, M_RTABLE, M_NOWAIT);
+ if (rw->w_tmem)
+ rw->w_tmemsize = len;
+ }
+ if (rw->w_tmem) {
+ cp = rw->w_tmem;
+ second_time = 1;
+ goto again;
+ }
+ }
+ }
+ if (cp) {
+ register struct rt_msghdr *rtm = (struct rt_msghdr *)cp0;
+
+ rtm->rtm_version = RTM_VERSION;
+ rtm->rtm_type = type;
+ rtm->rtm_msglen = len;
+ }
+ return (len);
+}
+
+/*
+ * This routine is called to generate a message from the routing
+ * socket indicating that a redirect has occured, a routing lookup
+ * has failed, or that a protocol has detected timeouts to a particular
+ * destination.
+ */
+void
+rt_missmsg(type, rtinfo, flags, error)
+ int type, flags, error;
+ register struct rt_addrinfo *rtinfo;
+{
+ register struct rt_msghdr *rtm;
+ register struct mbuf *m;
+ struct sockaddr *sa = rtinfo->rti_info[RTAX_DST];
+
+ if (route_cb.any_count == 0)
+ return;
+ m = rt_msg1(type, rtinfo);
+ if (m == 0)
+ return;
+ rtm = mtod(m, struct rt_msghdr *);
+ rtm->rtm_flags = RTF_DONE | flags;
+ rtm->rtm_errno = error;
+ rtm->rtm_addrs = rtinfo->rti_addrs;
+ route_proto.sp_protocol = sa ? sa->sa_family : 0;
+ raw_input(m, &route_proto, &route_src, &route_dst);
+}
+
+/*
+ * This routine is called to generate a message from the routing
+ * socket indicating that the status of a network interface has changed.
+ */
+void
+rt_ifmsg(ifp)
+ register struct ifnet *ifp;
+{
+ register struct if_msghdr *ifm;
+ struct mbuf *m;
+ struct rt_addrinfo info;
+
+ if (route_cb.any_count == 0)
+ return;
+ bzero((caddr_t)&info, sizeof(info));
+ m = rt_msg1(RTM_IFINFO, &info);
+ if (m == 0)
+ return;
+ ifm = mtod(m, struct if_msghdr *);
+ ifm->ifm_index = ifp->if_index;
+ ifm->ifm_flags = (u_short)ifp->if_flags;
+ ifm->ifm_data = ifp->if_data;
+ ifm->ifm_addrs = 0;
+ route_proto.sp_protocol = 0;
+ raw_input(m, &route_proto, &route_src, &route_dst);
+}
+
+/*
+ * This is called to generate messages from the routing socket
+ * indicating a network interface has had addresses associated with it.
+ * if we ever reverse the logic and replace messages TO the routing
+ * socket indicate a request to configure interfaces, then it will
+ * be unnecessary as the routing socket will automatically generate
+ * copies of it.
+ */
+void
+rt_newaddrmsg(cmd, ifa, error, rt)
+ int cmd, error;
+ register struct ifaddr *ifa;
+ register struct rtentry *rt;
+{
+ struct rt_addrinfo info;
+ struct sockaddr *sa = 0;
+ int pass;
+ struct mbuf *m = 0;
+ struct ifnet *ifp = ifa->ifa_ifp;
+
+ if (route_cb.any_count == 0)
+ return;
+ for (pass = 1; pass < 3; pass++) {
+ bzero((caddr_t)&info, sizeof(info));
+ if ((cmd == RTM_ADD && pass == 1) ||
+ (cmd == RTM_DELETE && pass == 2)) {
+ register struct ifa_msghdr *ifam;
+ int ncmd = cmd == RTM_ADD ? RTM_NEWADDR : RTM_DELADDR;
+
+ ifaaddr = sa = ifa->ifa_addr;
+ ifpaddr = ifp->if_addrhead.tqh_first->ifa_addr;
+ netmask = ifa->ifa_netmask;
+ brdaddr = ifa->ifa_dstaddr;
+ if ((m = rt_msg1(ncmd, &info)) == NULL)
+ continue;
+ ifam = mtod(m, struct ifa_msghdr *);
+ ifam->ifam_index = ifp->if_index;
+ ifam->ifam_metric = ifa->ifa_metric;
+ ifam->ifam_flags = ifa->ifa_flags;
+ ifam->ifam_addrs = info.rti_addrs;
+ }
+ if ((cmd == RTM_ADD && pass == 2) ||
+ (cmd == RTM_DELETE && pass == 1)) {
+ register struct rt_msghdr *rtm;
+
+ if (rt == 0)
+ continue;
+ netmask = rt_mask(rt);
+ dst = sa = rt_key(rt);
+ gate = rt->rt_gateway;
+ if ((m = rt_msg1(cmd, &info)) == NULL)
+ continue;
+ rtm = mtod(m, struct rt_msghdr *);
+ rtm->rtm_index = ifp->if_index;
+ rtm->rtm_flags |= rt->rt_flags;
+ rtm->rtm_errno = error;
+ rtm->rtm_addrs = info.rti_addrs;
+ }
+ route_proto.sp_protocol = sa ? sa->sa_family : 0;
+ raw_input(m, &route_proto, &route_src, &route_dst);
+ }
+}
+
+/*
+ * This is the analogue to the rt_newaddrmsg which performs the same
+ * function but for multicast group memberhips. This is easier since
+ * there is no route state to worry about.
+ */
+void
+rt_newmaddrmsg(cmd, ifma)
+ int cmd;
+ struct ifmultiaddr *ifma;
+{
+ struct rt_addrinfo info;
+ struct mbuf *m = 0;
+ struct ifnet *ifp = ifma->ifma_ifp;
+ struct ifma_msghdr *ifmam;
+
+ if (route_cb.any_count == 0)
+ return;
+
+ bzero((caddr_t)&info, sizeof(info));
+ ifaaddr = ifma->ifma_addr;
+ ifpaddr = ifp->if_addrhead.tqh_first->ifa_addr;
+ /*
+ * If a link-layer address is present, present it as a ``gateway''
+ * (similarly to how ARP entries, e.g., are presented).
+ */
+ gate = ifma->ifma_lladdr;
+ if ((m = rt_msg1(cmd, &info)) == NULL)
+ return;
+ ifmam = mtod(m, struct ifma_msghdr *);
+ ifmam->ifmam_index = ifp->if_index;
+ ifmam->ifmam_addrs = info.rti_addrs;
+ route_proto.sp_protocol = ifma->ifma_addr->sa_family;
+ raw_input(m, &route_proto, &route_src, &route_dst);
+}
+
+/*
+ * This is used in dumping the kernel table via sysctl().
+ */
+int
+sysctl_dumpentry(rn, vw)
+ struct radix_node *rn;
+ void *vw;
+{
+ register struct walkarg *w = vw;
+ register struct rtentry *rt = (struct rtentry *)rn;
+ int error = 0, size;
+ struct rt_addrinfo info;
+
+ if (w->w_op == NET_RT_FLAGS && !(rt->rt_flags & w->w_arg))
+ return 0;
+ bzero((caddr_t)&info, sizeof(info));
+ dst = rt_key(rt);
+ gate = rt->rt_gateway;
+ netmask = rt_mask(rt);
+ genmask = rt->rt_genmask;
+ size = rt_msg2(RTM_GET, &info, 0, w);
+ if (w->w_req && w->w_tmem) {
+ register struct rt_msghdr *rtm = (struct rt_msghdr *)w->w_tmem;
+
+ rtm->rtm_flags = rt->rt_flags;
+ rtm->rtm_use = rt->rt_use;
+ rtm->rtm_rmx = rt->rt_rmx;
+ rtm->rtm_index = rt->rt_ifp->if_index;
+ rtm->rtm_errno = rtm->rtm_pid = rtm->rtm_seq = 0;
+ rtm->rtm_addrs = info.rti_addrs;
+ error = SYSCTL_OUT(w->w_req, (caddr_t)rtm, size);
+ return (error);
+ }
+ return (error);
+}
+
+int
+sysctl_iflist(af, w)
+ int af;
+ register struct walkarg *w;
+{
+ register struct ifnet *ifp;
+ register struct ifaddr *ifa;
+ struct rt_addrinfo info;
+ int len, error = 0;
+
+ bzero((caddr_t)&info, sizeof(info));
+ for (ifp = ifnet.tqh_first; ifp; ifp = ifp->if_link.tqe_next) {
+ if (w->w_arg && w->w_arg != ifp->if_index)
+ continue;
+ ifa = ifp->if_addrhead.tqh_first;
+ ifpaddr = ifa->ifa_addr;
+ len = rt_msg2(RTM_IFINFO, &info, (caddr_t)0, w);
+ ifpaddr = 0;
+ if (w->w_req && w->w_tmem) {
+ register struct if_msghdr *ifm;
+
+ ifm = (struct if_msghdr *)w->w_tmem;
+ ifm->ifm_index = ifp->if_index;
+ ifm->ifm_flags = (u_short)ifp->if_flags;
+ ifm->ifm_data = ifp->if_data;
+ ifm->ifm_addrs = info.rti_addrs;
+ error = SYSCTL_OUT(w->w_req,(caddr_t)ifm, len);
+ if (error)
+ return (error);
+ }
+ while ((ifa = ifa->ifa_link.tqe_next) != 0) {
+ if (af && af != ifa->ifa_addr->sa_family)
+ continue;
+ if (curproc->p_prison && prison_if(curproc, ifa->ifa_addr))
+ continue;
+ ifaaddr = ifa->ifa_addr;
+ netmask = ifa->ifa_netmask;
+ brdaddr = ifa->ifa_dstaddr;
+ len = rt_msg2(RTM_NEWADDR, &info, 0, w);
+ if (w->w_req && w->w_tmem) {
+ register struct ifa_msghdr *ifam;
+
+ ifam = (struct ifa_msghdr *)w->w_tmem;
+ ifam->ifam_index = ifa->ifa_ifp->if_index;
+ ifam->ifam_flags = ifa->ifa_flags;
+ ifam->ifam_metric = ifa->ifa_metric;
+ ifam->ifam_addrs = info.rti_addrs;
+ error = SYSCTL_OUT(w->w_req, w->w_tmem, len);
+ if (error)
+ return (error);
+ }
+ }
+ ifaaddr = netmask = brdaddr = 0;
+ }
+ return (0);
+}
+
+static int
+sysctl_rtsock SYSCTL_HANDLER_ARGS
+{
+ int *name = (int *)arg1;
+ u_int namelen = arg2;
+ register struct radix_node_head *rnh;
+ int i, s, error = EINVAL;
+ u_char af;
+ struct walkarg w;
+
+ name ++;
+ namelen--;
+ if (req->newptr)
+ return (EPERM);
+ if (namelen != 3)
+ return (EINVAL);
+ af = name[0];
+ Bzero(&w, sizeof(w));
+ w.w_op = name[1];
+ w.w_arg = name[2];
+ w.w_req = req;
+
+ s = splnet();
+ switch (w.w_op) {
+
+ case NET_RT_DUMP:
+ case NET_RT_FLAGS:
+ for (i = 1; i <= AF_MAX; i++)
+ if ((rnh = rt_tables[i]) && (af == 0 || af == i) &&
+ (error = rnh->rnh_walktree(rnh,
+ sysctl_dumpentry, &w)))
+ break;
+ break;
+
+ case NET_RT_IFLIST:
+ error = sysctl_iflist(af, &w);
+ }
+ splx(s);
+ if (w.w_tmem)
+ free(w.w_tmem, M_RTABLE);
+ return (error);
+}
+
+SYSCTL_NODE(_net, PF_ROUTE, routetable, CTLFLAG_RD, sysctl_rtsock, "");
+
+/*
+ * Definitions of protocols supported in the ROUTE domain.
+ */
+
+extern struct domain routedomain; /* or at least forward */
+
+static struct protosw routesw[] = {
+{ SOCK_RAW, &routedomain, 0, PR_ATOMIC|PR_ADDR,
+ 0, route_output, raw_ctlinput, 0,
+ 0,
+ raw_init, 0, 0, 0,
+ &route_usrreqs
+}
+};
+
+static struct domain routedomain =
+ { PF_ROUTE, "route", 0, 0, 0,
+ routesw, &routesw[sizeof(routesw)/sizeof(routesw[0])] };
+
+DOMAIN_SET(route);
diff --git a/sys/net/slcompress.c b/sys/net/slcompress.c
new file mode 100644
index 0000000..200e9c1
--- /dev/null
+++ b/sys/net/slcompress.c
@@ -0,0 +1,614 @@
+/*-
+ * Copyright (c) 1989, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)slcompress.c 8.2 (Berkeley) 4/16/94
+ * $FreeBSD$
+ */
+
+/*
+ * Routines to compress and uncompess tcp packets (for transmission
+ * over low speed serial lines.
+ *
+ * Van Jacobson (van@helios.ee.lbl.gov), Dec 31, 1989:
+ * - Initial distribution.
+ *
+ */
+
+#include <sys/param.h>
+#include <sys/mbuf.h>
+#include <sys/systm.h>
+
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/ip.h>
+#include <netinet/tcp.h>
+
+#include <net/slcompress.h>
+
+#ifndef SL_NO_STATS
+#define INCR(counter) ++comp->counter;
+#else
+#define INCR(counter)
+#endif
+
+#define BCMP(p1, p2, n) bcmp((char *)(p1), (char *)(p2), (int)(n))
+#define BCOPY(p1, p2, n) bcopy((char *)(p1), (char *)(p2), (int)(n))
+#ifndef KERNEL
+#define ovbcopy bcopy
+#endif
+
+void
+sl_compress_init(comp, max_state)
+ struct slcompress *comp;
+ int max_state;
+{
+ register u_int i;
+ register struct cstate *tstate = comp->tstate;
+
+ if (max_state == -1) {
+ max_state = MAX_STATES - 1;
+ bzero((char *)comp, sizeof(*comp));
+ } else {
+ /* Don't reset statistics */
+ bzero((char *)comp->tstate, sizeof(comp->tstate));
+ bzero((char *)comp->rstate, sizeof(comp->rstate));
+ }
+ for (i = max_state; i > 0; --i) {
+ tstate[i].cs_id = i;
+ tstate[i].cs_next = &tstate[i - 1];
+ }
+ tstate[0].cs_next = &tstate[max_state];
+ tstate[0].cs_id = 0;
+ comp->last_cs = &tstate[0];
+ comp->last_recv = 255;
+ comp->last_xmit = 255;
+ comp->flags = SLF_TOSS;
+}
+
+
+/* ENCODE encodes a number that is known to be non-zero. ENCODEZ
+ * checks for zero (since zero has to be encoded in the long, 3 byte
+ * form).
+ */
+#define ENCODE(n) { \
+ if ((u_int16_t)(n) >= 256) { \
+ *cp++ = 0; \
+ cp[1] = (n); \
+ cp[0] = (n) >> 8; \
+ cp += 2; \
+ } else { \
+ *cp++ = (n); \
+ } \
+}
+#define ENCODEZ(n) { \
+ if ((u_int16_t)(n) >= 256 || (u_int16_t)(n) == 0) { \
+ *cp++ = 0; \
+ cp[1] = (n); \
+ cp[0] = (n) >> 8; \
+ cp += 2; \
+ } else { \
+ *cp++ = (n); \
+ } \
+}
+
+#define DECODEL(f) { \
+ if (*cp == 0) {\
+ (f) = htonl(ntohl(f) + ((cp[1] << 8) | cp[2])); \
+ cp += 3; \
+ } else { \
+ (f) = htonl(ntohl(f) + (u_int32_t)*cp++); \
+ } \
+}
+
+#define DECODES(f) { \
+ if (*cp == 0) {\
+ (f) = htons(ntohs(f) + ((cp[1] << 8) | cp[2])); \
+ cp += 3; \
+ } else { \
+ (f) = htons(ntohs(f) + (u_int32_t)*cp++); \
+ } \
+}
+
+#define DECODEU(f) { \
+ if (*cp == 0) {\
+ (f) = htons((cp[1] << 8) | cp[2]); \
+ cp += 3; \
+ } else { \
+ (f) = htons((u_int32_t)*cp++); \
+ } \
+}
+
+/*
+ * Attempt to compress an outgoing TCP packet and return the type of
+ * the result. The caller must have already verified that the protocol
+ * is TCP. The first mbuf must contain the complete IP and TCP headers,
+ * and "ip" must be == mtod(m, struct ip *). "comp" supplies the
+ * compression state, and "compress_cid" tells us whether it is OK
+ * to leave out the CID field when feasible.
+ *
+ * The caller is responsible for adjusting m->m_pkthdr.len upon return,
+ * if m is an M_PKTHDR mbuf.
+ */
+u_int
+sl_compress_tcp(m, ip, comp, compress_cid)
+ struct mbuf *m;
+ register struct ip *ip;
+ struct slcompress *comp;
+ int compress_cid;
+{
+ register struct cstate *cs = comp->last_cs->cs_next;
+ register u_int hlen = ip->ip_hl;
+ register struct tcphdr *oth;
+ register struct tcphdr *th;
+ register u_int deltaS, deltaA;
+ register u_int changes = 0;
+ u_char new_seq[16];
+ register u_char *cp = new_seq;
+
+ /*
+ * Bail if this is an IP fragment or if the TCP packet isn't
+ * `compressible' (i.e., ACK isn't set or some other control bit is
+ * set). (We assume that the caller has already made sure the
+ * packet is IP proto TCP).
+ */
+ if ((ip->ip_off & htons(0x3fff)) || m->m_len < 40)
+ return (TYPE_IP);
+
+ th = (struct tcphdr *)&((int32_t *)ip)[hlen];
+ if ((th->th_flags & (TH_SYN|TH_FIN|TH_RST|TH_ACK)) != TH_ACK)
+ return (TYPE_IP);
+ /*
+ * Packet is compressible -- we're going to send either a
+ * COMPRESSED_TCP or UNCOMPRESSED_TCP packet. Either way we need
+ * to locate (or create) the connection state. Special case the
+ * most recently used connection since it's most likely to be used
+ * again & we don't have to do any reordering if it's used.
+ */
+ INCR(sls_packets)
+ if (ip->ip_src.s_addr != cs->cs_ip.ip_src.s_addr ||
+ ip->ip_dst.s_addr != cs->cs_ip.ip_dst.s_addr ||
+ *(int32_t *)th != ((int32_t *)&cs->cs_ip)[cs->cs_ip.ip_hl]) {
+ /*
+ * Wasn't the first -- search for it.
+ *
+ * States are kept in a circularly linked list with
+ * last_cs pointing to the end of the list. The
+ * list is kept in lru order by moving a state to the
+ * head of the list whenever it is referenced. Since
+ * the list is short and, empirically, the connection
+ * we want is almost always near the front, we locate
+ * states via linear search. If we don't find a state
+ * for the datagram, the oldest state is (re-)used.
+ */
+ register struct cstate *lcs;
+ register struct cstate *lastcs = comp->last_cs;
+
+ do {
+ lcs = cs; cs = cs->cs_next;
+ INCR(sls_searches)
+ if (ip->ip_src.s_addr == cs->cs_ip.ip_src.s_addr
+ && ip->ip_dst.s_addr == cs->cs_ip.ip_dst.s_addr
+ && *(int32_t *)th ==
+ ((int32_t *)&cs->cs_ip)[cs->cs_ip.ip_hl])
+ goto found;
+ } while (cs != lastcs);
+
+ /*
+ * Didn't find it -- re-use oldest cstate. Send an
+ * uncompressed packet that tells the other side what
+ * connection number we're using for this conversation.
+ * Note that since the state list is circular, the oldest
+ * state points to the newest and we only need to set
+ * last_cs to update the lru linkage.
+ */
+ INCR(sls_misses)
+ comp->last_cs = lcs;
+ hlen += th->th_off;
+ hlen <<= 2;
+ if (hlen > m->m_len)
+ return TYPE_IP;
+ goto uncompressed;
+
+ found:
+ /*
+ * Found it -- move to the front on the connection list.
+ */
+ if (cs == lastcs)
+ comp->last_cs = lcs;
+ else {
+ lcs->cs_next = cs->cs_next;
+ cs->cs_next = lastcs->cs_next;
+ lastcs->cs_next = cs;
+ }
+ }
+
+ /*
+ * Make sure that only what we expect to change changed. The first
+ * line of the `if' checks the IP protocol version, header length &
+ * type of service. The 2nd line checks the "Don't fragment" bit.
+ * The 3rd line checks the time-to-live and protocol (the protocol
+ * check is unnecessary but costless). The 4th line checks the TCP
+ * header length. The 5th line checks IP options, if any. The 6th
+ * line checks TCP options, if any. If any of these things are
+ * different between the previous & current datagram, we send the
+ * current datagram `uncompressed'.
+ */
+ oth = (struct tcphdr *)&((int32_t *)&cs->cs_ip)[hlen];
+ deltaS = hlen;
+ hlen += th->th_off;
+ hlen <<= 2;
+ if (hlen > m->m_len)
+ return TYPE_IP;
+
+ if (((u_int16_t *)ip)[0] != ((u_int16_t *)&cs->cs_ip)[0] ||
+ ((u_int16_t *)ip)[3] != ((u_int16_t *)&cs->cs_ip)[3] ||
+ ((u_int16_t *)ip)[4] != ((u_int16_t *)&cs->cs_ip)[4] ||
+ th->th_off != oth->th_off ||
+ (deltaS > 5 &&
+ BCMP(ip + 1, &cs->cs_ip + 1, (deltaS - 5) << 2)) ||
+ (th->th_off > 5 &&
+ BCMP(th + 1, oth + 1, (th->th_off - 5) << 2)))
+ goto uncompressed;
+
+ /*
+ * Figure out which of the changing fields changed. The
+ * receiver expects changes in the order: urgent, window,
+ * ack, seq (the order minimizes the number of temporaries
+ * needed in this section of code).
+ */
+ if (th->th_flags & TH_URG) {
+ deltaS = ntohs(th->th_urp);
+ ENCODEZ(deltaS);
+ changes |= NEW_U;
+ } else if (th->th_urp != oth->th_urp)
+ /* argh! URG not set but urp changed -- a sensible
+ * implementation should never do this but RFC793
+ * doesn't prohibit the change so we have to deal
+ * with it. */
+ goto uncompressed;
+
+ deltaS = (u_int16_t)(ntohs(th->th_win) - ntohs(oth->th_win));
+ if (deltaS) {
+ ENCODE(deltaS);
+ changes |= NEW_W;
+ }
+
+ deltaA = ntohl(th->th_ack) - ntohl(oth->th_ack);
+ if (deltaA) {
+ if (deltaA > 0xffff)
+ goto uncompressed;
+ ENCODE(deltaA);
+ changes |= NEW_A;
+ }
+
+ deltaS = ntohl(th->th_seq) - ntohl(oth->th_seq);
+ if (deltaS) {
+ if (deltaS > 0xffff)
+ goto uncompressed;
+ ENCODE(deltaS);
+ changes |= NEW_S;
+ }
+
+ switch(changes) {
+
+ case 0:
+ /*
+ * Nothing changed. If this packet contains data and the
+ * last one didn't, this is probably a data packet following
+ * an ack (normal on an interactive connection) and we send
+ * it compressed. Otherwise it's probably a retransmit,
+ * retransmitted ack or window probe. Send it uncompressed
+ * in case the other side missed the compressed version.
+ */
+ if (ip->ip_len != cs->cs_ip.ip_len &&
+ ntohs(cs->cs_ip.ip_len) == hlen)
+ break;
+
+ /* (fall through) */
+
+ case SPECIAL_I:
+ case SPECIAL_D:
+ /*
+ * actual changes match one of our special case encodings --
+ * send packet uncompressed.
+ */
+ goto uncompressed;
+
+ case NEW_S|NEW_A:
+ if (deltaS == deltaA &&
+ deltaS == ntohs(cs->cs_ip.ip_len) - hlen) {
+ /* special case for echoed terminal traffic */
+ changes = SPECIAL_I;
+ cp = new_seq;
+ }
+ break;
+
+ case NEW_S:
+ if (deltaS == ntohs(cs->cs_ip.ip_len) - hlen) {
+ /* special case for data xfer */
+ changes = SPECIAL_D;
+ cp = new_seq;
+ }
+ break;
+ }
+
+ deltaS = ntohs(ip->ip_id) - ntohs(cs->cs_ip.ip_id);
+ if (deltaS != 1) {
+ ENCODEZ(deltaS);
+ changes |= NEW_I;
+ }
+ if (th->th_flags & TH_PUSH)
+ changes |= TCP_PUSH_BIT;
+ /*
+ * Grab the cksum before we overwrite it below. Then update our
+ * state with this packet's header.
+ */
+ deltaA = ntohs(th->th_sum);
+ BCOPY(ip, &cs->cs_ip, hlen);
+
+ /*
+ * We want to use the original packet as our compressed packet.
+ * (cp - new_seq) is the number of bytes we need for compressed
+ * sequence numbers. In addition we need one byte for the change
+ * mask, one for the connection id and two for the tcp checksum.
+ * So, (cp - new_seq) + 4 bytes of header are needed. hlen is how
+ * many bytes of the original packet to toss so subtract the two to
+ * get the new packet size.
+ */
+ deltaS = cp - new_seq;
+ cp = (u_char *)ip;
+ if (compress_cid == 0 || comp->last_xmit != cs->cs_id) {
+ comp->last_xmit = cs->cs_id;
+ hlen -= deltaS + 4;
+ cp += hlen;
+ *cp++ = changes | NEW_C;
+ *cp++ = cs->cs_id;
+ } else {
+ hlen -= deltaS + 3;
+ cp += hlen;
+ *cp++ = changes;
+ }
+ m->m_len -= hlen;
+ m->m_data += hlen;
+ *cp++ = deltaA >> 8;
+ *cp++ = deltaA;
+ BCOPY(new_seq, cp, deltaS);
+ INCR(sls_compressed)
+ return (TYPE_COMPRESSED_TCP);
+
+ /*
+ * Update connection state cs & send uncompressed packet ('uncompressed'
+ * means a regular ip/tcp packet but with the 'conversation id' we hope
+ * to use on future compressed packets in the protocol field).
+ */
+uncompressed:
+ BCOPY(ip, &cs->cs_ip, hlen);
+ ip->ip_p = cs->cs_id;
+ comp->last_xmit = cs->cs_id;
+ return (TYPE_UNCOMPRESSED_TCP);
+}
+
+
+int
+sl_uncompress_tcp(bufp, len, type, comp)
+ u_char **bufp;
+ int len;
+ u_int type;
+ struct slcompress *comp;
+{
+ u_char *hdr, *cp;
+ int hlen, vjlen;
+
+ cp = bufp? *bufp: NULL;
+ vjlen = sl_uncompress_tcp_core(cp, len, len, type, comp, &hdr, &hlen);
+ if (vjlen < 0)
+ return (0); /* error */
+ if (vjlen == 0)
+ return (len); /* was uncompressed already */
+
+ cp += vjlen;
+ len -= vjlen;
+
+ /*
+ * At this point, cp points to the first byte of data in the
+ * packet. If we're not aligned on a 4-byte boundary, copy the
+ * data down so the ip & tcp headers will be aligned. Then back up
+ * cp by the tcp/ip header length to make room for the reconstructed
+ * header (we assume the packet we were handed has enough space to
+ * prepend 128 bytes of header).
+ */
+ if ((intptr_t)cp & 3) {
+ if (len > 0)
+ (void) ovbcopy(cp, (caddr_t)((intptr_t)cp &~ 3), len);
+ cp = (u_char *)((intptr_t)cp &~ 3);
+ }
+ cp -= hlen;
+ len += hlen;
+ BCOPY(hdr, cp, hlen);
+
+ *bufp = cp;
+ return (len);
+}
+
+/*
+ * Uncompress a packet of total length total_len. The first buflen
+ * bytes are at buf; this must include the entire (compressed or
+ * uncompressed) TCP/IP header. This procedure returns the length
+ * of the VJ header, with a pointer to the uncompressed IP header
+ * in *hdrp and its length in *hlenp.
+ */
+int
+sl_uncompress_tcp_core(buf, buflen, total_len, type, comp, hdrp, hlenp)
+ u_char *buf;
+ int buflen, total_len;
+ u_int type;
+ struct slcompress *comp;
+ u_char **hdrp;
+ u_int *hlenp;
+{
+ register u_char *cp;
+ register u_int hlen, changes;
+ register struct tcphdr *th;
+ register struct cstate *cs;
+ register struct ip *ip;
+ register u_int16_t *bp;
+ register u_int vjlen;
+
+ switch (type) {
+
+ case TYPE_UNCOMPRESSED_TCP:
+ ip = (struct ip *) buf;
+ if (ip->ip_p >= MAX_STATES)
+ goto bad;
+ cs = &comp->rstate[comp->last_recv = ip->ip_p];
+ comp->flags &=~ SLF_TOSS;
+ ip->ip_p = IPPROTO_TCP;
+ /*
+ * Calculate the size of the TCP/IP header and make sure that
+ * we don't overflow the space we have available for it.
+ */
+ hlen = ip->ip_hl << 2;
+ if (hlen + sizeof(struct tcphdr) > buflen)
+ goto bad;
+ hlen += ((struct tcphdr *)&((char *)ip)[hlen])->th_off << 2;
+ if (hlen > MAX_HDR || hlen > buflen)
+ goto bad;
+ BCOPY(ip, &cs->cs_ip, hlen);
+ cs->cs_hlen = hlen;
+ INCR(sls_uncompressedin)
+ *hdrp = (u_char *) &cs->cs_ip;
+ *hlenp = hlen;
+ return (0);
+
+ default:
+ goto bad;
+
+ case TYPE_COMPRESSED_TCP:
+ break;
+ }
+ /* We've got a compressed packet. */
+ INCR(sls_compressedin)
+ cp = buf;
+ changes = *cp++;
+ if (changes & NEW_C) {
+ /* Make sure the state index is in range, then grab the state.
+ * If we have a good state index, clear the 'discard' flag. */
+ if (*cp >= MAX_STATES)
+ goto bad;
+
+ comp->flags &=~ SLF_TOSS;
+ comp->last_recv = *cp++;
+ } else {
+ /* this packet has an implicit state index. If we've
+ * had a line error since the last time we got an
+ * explicit state index, we have to toss the packet. */
+ if (comp->flags & SLF_TOSS) {
+ INCR(sls_tossed)
+ return (-1);
+ }
+ }
+ cs = &comp->rstate[comp->last_recv];
+ hlen = cs->cs_ip.ip_hl << 2;
+ th = (struct tcphdr *)&((u_char *)&cs->cs_ip)[hlen];
+ th->th_sum = htons((*cp << 8) | cp[1]);
+ cp += 2;
+ if (changes & TCP_PUSH_BIT)
+ th->th_flags |= TH_PUSH;
+ else
+ th->th_flags &=~ TH_PUSH;
+
+ switch (changes & SPECIALS_MASK) {
+ case SPECIAL_I:
+ {
+ register u_int i = ntohs(cs->cs_ip.ip_len) - cs->cs_hlen;
+ th->th_ack = htonl(ntohl(th->th_ack) + i);
+ th->th_seq = htonl(ntohl(th->th_seq) + i);
+ }
+ break;
+
+ case SPECIAL_D:
+ th->th_seq = htonl(ntohl(th->th_seq) + ntohs(cs->cs_ip.ip_len)
+ - cs->cs_hlen);
+ break;
+
+ default:
+ if (changes & NEW_U) {
+ th->th_flags |= TH_URG;
+ DECODEU(th->th_urp)
+ } else
+ th->th_flags &=~ TH_URG;
+ if (changes & NEW_W)
+ DECODES(th->th_win)
+ if (changes & NEW_A)
+ DECODEL(th->th_ack)
+ if (changes & NEW_S)
+ DECODEL(th->th_seq)
+ break;
+ }
+ if (changes & NEW_I) {
+ DECODES(cs->cs_ip.ip_id)
+ } else
+ cs->cs_ip.ip_id = htons(ntohs(cs->cs_ip.ip_id) + 1);
+
+ /*
+ * At this point, cp points to the first byte of data in the
+ * packet. Fill in the IP total length and update the IP
+ * header checksum.
+ */
+ vjlen = cp - buf;
+ buflen -= vjlen;
+ if (buflen < 0)
+ /* we must have dropped some characters (crc should detect
+ * this but the old slip framing won't) */
+ goto bad;
+
+ total_len += cs->cs_hlen - vjlen;
+ cs->cs_ip.ip_len = htons(total_len);
+
+ /* recompute the ip header checksum */
+ bp = (u_int16_t *) &cs->cs_ip;
+ cs->cs_ip.ip_sum = 0;
+ for (changes = 0; hlen > 0; hlen -= 2)
+ changes += *bp++;
+ changes = (changes & 0xffff) + (changes >> 16);
+ changes = (changes & 0xffff) + (changes >> 16);
+ cs->cs_ip.ip_sum = ~ changes;
+
+ *hdrp = (u_char *) &cs->cs_ip;
+ *hlenp = cs->cs_hlen;
+ return vjlen;
+
+bad:
+ comp->flags |= SLF_TOSS;
+ INCR(sls_errorin)
+ return (-1);
+}
diff --git a/sys/net/slcompress.h b/sys/net/slcompress.h
new file mode 100644
index 0000000..ca7ed03
--- /dev/null
+++ b/sys/net/slcompress.h
@@ -0,0 +1,162 @@
+/*
+ * Definitions for tcp compression routines.
+ *
+ * Copyright (c) 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Van Jacobson (van@helios.ee.lbl.gov), Dec 31, 1989:
+ * - Initial distribution.
+ * $FreeBSD$
+ */
+
+#ifndef _NET_SLCOMPRESS_H_
+#define _NET_SLCOMPRESS_H_
+
+#define MAX_STATES 16 /* must be > 2 and < 256 */
+#define MAX_HDR MLEN /* XXX 4bsd-ism: should really be 128 */
+
+/*
+ * Compressed packet format:
+ *
+ * The first octet contains the packet type (top 3 bits), TCP
+ * 'push' bit, and flags that indicate which of the 4 TCP sequence
+ * numbers have changed (bottom 5 bits). The next octet is a
+ * conversation number that associates a saved IP/TCP header with
+ * the compressed packet. The next two octets are the TCP checksum
+ * from the original datagram. The next 0 to 15 octets are
+ * sequence number changes, one change per bit set in the header
+ * (there may be no changes and there are two special cases where
+ * the receiver implicitly knows what changed -- see below).
+ *
+ * There are 5 numbers which can change (they are always inserted
+ * in the following order): TCP urgent pointer, window,
+ * acknowledgement, sequence number and IP ID. (The urgent pointer
+ * is different from the others in that its value is sent, not the
+ * change in value.) Since typical use of SLIP links is biased
+ * toward small packets (see comments on MTU/MSS below), changes
+ * use a variable length coding with one octet for numbers in the
+ * range 1 - 255 and 3 octets (0, MSB, LSB) for numbers in the
+ * range 256 - 65535 or 0. (If the change in sequence number or
+ * ack is more than 65535, an uncompressed packet is sent.)
+ */
+
+/*
+ * Packet types (must not conflict with IP protocol version)
+ *
+ * The top nibble of the first octet is the packet type. There are
+ * three possible types: IP (not proto TCP or tcp with one of the
+ * control flags set); uncompressed TCP (a normal IP/TCP packet but
+ * with the 8-bit protocol field replaced by an 8-bit connection id --
+ * this type of packet syncs the sender & receiver); and compressed
+ * TCP (described above).
+ *
+ * LSB of 4-bit field is TCP "PUSH" bit (a worthless anachronism) and
+ * is logically part of the 4-bit "changes" field that follows. Top
+ * three bits are actual packet type. For backward compatibility
+ * and in the interest of conserving bits, numbers are chosen so the
+ * IP protocol version number (4) which normally appears in this nibble
+ * means "IP packet".
+ */
+
+/* packet types */
+#define TYPE_IP 0x40
+#define TYPE_UNCOMPRESSED_TCP 0x70
+#define TYPE_COMPRESSED_TCP 0x80
+#define TYPE_ERROR 0x00
+
+/* Bits in first octet of compressed packet */
+#define NEW_C 0x40 /* flag bits for what changed in a packet */
+#define NEW_I 0x20
+#define NEW_S 0x08
+#define NEW_A 0x04
+#define NEW_W 0x02
+#define NEW_U 0x01
+
+/* reserved, special-case values of above */
+#define SPECIAL_I (NEW_S|NEW_W|NEW_U) /* echoed interactive traffic */
+#define SPECIAL_D (NEW_S|NEW_A|NEW_W|NEW_U) /* unidirectional data */
+#define SPECIALS_MASK (NEW_S|NEW_A|NEW_W|NEW_U)
+
+#define TCP_PUSH_BIT 0x10
+
+
+/*
+ * "state" data for each active tcp conversation on the wire. This is
+ * basically a copy of the entire IP/TCP header from the last packet
+ * we saw from the conversation together with a small identifier
+ * the transmit & receive ends of the line use to locate saved header.
+ */
+struct cstate {
+ struct cstate *cs_next; /* next most recently used cstate (xmit only) */
+ u_int16_t cs_hlen; /* size of hdr (receive only) */
+ u_char cs_id; /* connection # associated with this state */
+ u_char cs_filler;
+ union {
+ char csu_hdr[MAX_HDR];
+ struct ip csu_ip; /* ip/tcp hdr from most recent packet */
+ } slcs_u;
+};
+#define cs_ip slcs_u.csu_ip
+#define cs_hdr slcs_u.csu_hdr
+
+/*
+ * all the state data for one serial line (we need one of these
+ * per line).
+ */
+struct slcompress {
+ struct cstate *last_cs; /* most recently used tstate */
+ u_char last_recv; /* last rcvd conn. id */
+ u_char last_xmit; /* last sent conn. id */
+ u_int16_t flags;
+#ifndef SL_NO_STATS
+ int sls_packets; /* outbound packets */
+ int sls_compressed; /* outbound compressed packets */
+ int sls_searches; /* searches for connection state */
+ int sls_misses; /* times couldn't find conn. state */
+ int sls_uncompressedin; /* inbound uncompressed packets */
+ int sls_compressedin; /* inbound compressed packets */
+ int sls_errorin; /* inbound unknown type packets */
+ int sls_tossed; /* inbound packets tossed because of error */
+#endif
+ struct cstate tstate[MAX_STATES]; /* xmit connection states */
+ struct cstate rstate[MAX_STATES]; /* receive connection states */
+};
+/* flag values */
+#define SLF_TOSS 1 /* tossing rcvd frames because of input err */
+
+void sl_compress_init __P((struct slcompress *, int));
+u_int sl_compress_tcp __P((struct mbuf *,
+ struct ip *, struct slcompress *, int));
+int sl_uncompress_tcp __P((u_char **, int, u_int, struct slcompress *));
+int sl_uncompress_tcp_core __P((u_char *, int, int, u_int,
+ struct slcompress *, u_char **, u_int *));
+
+#endif /* !_NET_SLCOMPRESS_H_ */
diff --git a/sys/net/slip.h b/sys/net/slip.h
new file mode 100644
index 0000000..917ebf0
--- /dev/null
+++ b/sys/net/slip.h
@@ -0,0 +1,62 @@
+/*-
+ * Copyright (c) 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)slip.h 8.1 (Berkeley) 2/12/94
+ * $FreeBSD$
+ */
+
+#ifndef _NET_SLIP_H_
+#define _NET_SLIP_H_
+
+/* Ioctls operating on SLIP ttys. */
+#define SLIOCGUNIT _IOR('t', 88, int) /* get slip unit number */
+#define SLIOCSKEEPAL _IOW('t', 84, int) /* set keepalive */
+#define SLIOCSOUTFILL _IOW('t', 83, int) /* set out fill time */
+#define SLIOCGKEEPAL _IOR('t', 82, int) /* get keepalive time */
+#define SLIOCGOUTFILL _IOR('t', 81, int) /* get out fill time */
+#define SLIOCSUNIT _IOW('t', 80, int) /* set slip unit number */
+
+/*
+ * Definitions of the pseudo-link-level header attached to slip
+ * packets grabbed by the packet filter (bpf) traffic monitor.
+ */
+#define SLIP_HDRLEN 16 /* BPF SLIP header length */
+
+/* Offsets into BPF SLIP header. */
+#define SLX_DIR 0 /* direction; see below */
+#define SLX_CHDR 1 /* compressed header data */
+#define CHDR_LEN 15 /* length of compressed header data */
+
+#define SLIPDIR_IN 0 /* incoming */
+#define SLIPDIR_OUT 1 /* outgoing */
+
+#endif /* !_NET_SLIP_H */
diff --git a/sys/net/zlib.c b/sys/net/zlib.c
new file mode 100644
index 0000000..64c4ad0
--- /dev/null
+++ b/sys/net/zlib.c
@@ -0,0 +1,5379 @@
+/*
+ * This file is derived from various .h and .c files from the zlib-1.0.4
+ * distribution by Jean-loup Gailly and Mark Adler, with some additions
+ * by Paul Mackerras to aid in implementing Deflate compression and
+ * decompression for PPP packets. See zlib.h for conditions of
+ * distribution and use.
+ *
+ * Changes that have been made include:
+ * - added Z_PACKET_FLUSH (see zlib.h for details)
+ * - added inflateIncomp and deflateOutputPending
+ * - allow strm->next_out to be NULL, meaning discard the output
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * ==FILEVERSION 971210==
+ *
+ * This marker is used by the Linux installation script to determine
+ * whether an up-to-date version of this file is already installed.
+ */
+
+#define NO_DUMMY_DECL
+#define NO_ZCFUNCS
+#define MY_ZCALLOC
+
+#if defined(__FreeBSD__) && (defined(KERNEL) || defined(_KERNEL))
+#define inflate inflate_ppp /* FreeBSD already has an inflate :-( */
+#endif
+
+
+/* +++ zutil.h */
+/* zutil.h -- internal interface and configuration of the compression library
+ * Copyright (C) 1995-1996 Jean-loup Gailly.
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+/* From: zutil.h,v 1.16 1996/07/24 13:41:13 me Exp $ */
+
+#ifndef _Z_UTIL_H
+#define _Z_UTIL_H
+
+#ifdef KERNEL
+#include <net/zlib.h>
+#else
+#include "zlib.h"
+#endif
+
+#if defined(KERNEL) || defined(_KERNEL)
+/* Assume this is a *BSD or SVR4 kernel */
+#include <sys/types.h>
+#include <sys/time.h>
+#include <sys/systm.h>
+# define HAVE_MEMCPY
+# define memcpy(d, s, n) bcopy((s), (d), (n))
+# define memset(d, v, n) bzero((d), (n))
+# define memcmp bcmp
+
+#else
+#if defined(__KERNEL__)
+/* Assume this is a Linux kernel */
+#include <linux/string.h>
+#define HAVE_MEMCPY
+
+#else /* not kernel */
+
+#if defined(MSDOS)||defined(VMS)||defined(CRAY)||defined(WIN32)||defined(RISCOS)
+# include <stddef.h>
+# include <errno.h>
+#else
+ extern int errno;
+#endif
+#ifdef STDC
+# include <string.h>
+# include <stdlib.h>
+#endif
+#endif /* __KERNEL__ */
+#endif /* _KERNEL || KERNEL */
+
+#ifndef local
+# define local static
+#endif
+/* compile with -Dlocal if your debugger can't find static symbols */
+
+typedef unsigned char uch;
+typedef uch FAR uchf;
+typedef unsigned short ush;
+typedef ush FAR ushf;
+typedef unsigned long ulg;
+
+extern const char *z_errmsg[10]; /* indexed by 2-zlib_error */
+/* (size given to avoid silly warnings with Visual C++) */
+
+#define ERR_MSG(err) z_errmsg[Z_NEED_DICT-(err)]
+
+#define ERR_RETURN(strm,err) \
+ return (strm->msg = (const char*)ERR_MSG(err), (err))
+/* To be used only when the state is known to be valid */
+
+ /* common constants */
+
+#ifndef DEF_WBITS
+# define DEF_WBITS MAX_WBITS
+#endif
+/* default windowBits for decompression. MAX_WBITS is for compression only */
+
+#if MAX_MEM_LEVEL >= 8
+# define DEF_MEM_LEVEL 8
+#else
+# define DEF_MEM_LEVEL MAX_MEM_LEVEL
+#endif
+/* default memLevel */
+
+#define STORED_BLOCK 0
+#define STATIC_TREES 1
+#define DYN_TREES 2
+/* The three kinds of block type */
+
+#define MIN_MATCH 3
+#define MAX_MATCH 258
+/* The minimum and maximum match lengths */
+
+#define PRESET_DICT 0x20 /* preset dictionary flag in zlib header */
+
+ /* target dependencies */
+
+#ifdef MSDOS
+# define OS_CODE 0x00
+# ifdef __TURBOC__
+# include <alloc.h>
+# else /* MSC or DJGPP */
+# include <malloc.h>
+# endif
+#endif
+
+#ifdef OS2
+# define OS_CODE 0x06
+#endif
+
+#ifdef WIN32 /* Window 95 & Windows NT */
+# define OS_CODE 0x0b
+#endif
+
+#if defined(VAXC) || defined(VMS)
+# define OS_CODE 0x02
+# define FOPEN(name, mode) \
+ fopen((name), (mode), "mbc=60", "ctx=stm", "rfm=fix", "mrs=512")
+#endif
+
+#ifdef AMIGA
+# define OS_CODE 0x01
+#endif
+
+#if defined(ATARI) || defined(atarist)
+# define OS_CODE 0x05
+#endif
+
+#ifdef MACOS
+# define OS_CODE 0x07
+#endif
+
+#ifdef __50SERIES /* Prime/PRIMOS */
+# define OS_CODE 0x0F
+#endif
+
+#ifdef TOPS20
+# define OS_CODE 0x0a
+#endif
+
+#if defined(_BEOS_) || defined(RISCOS)
+# define fdopen(fd,mode) NULL /* No fdopen() */
+#endif
+
+ /* Common defaults */
+
+#ifndef OS_CODE
+# define OS_CODE 0x03 /* assume Unix */
+#endif
+
+#ifndef FOPEN
+# define FOPEN(name, mode) fopen((name), (mode))
+#endif
+
+ /* functions */
+
+#ifdef HAVE_STRERROR
+ extern char *strerror OF((int));
+# define zstrerror(errnum) strerror(errnum)
+#else
+# define zstrerror(errnum) ""
+#endif
+
+#if defined(pyr)
+# define NO_MEMCPY
+#endif
+#if (defined(M_I86SM) || defined(M_I86MM)) && !defined(_MSC_VER)
+ /* Use our own functions for small and medium model with MSC <= 5.0.
+ * You may have to use the same strategy for Borland C (untested).
+ */
+# define NO_MEMCPY
+#endif
+#if defined(STDC) && !defined(HAVE_MEMCPY) && !defined(NO_MEMCPY)
+# define HAVE_MEMCPY
+#endif
+#ifdef HAVE_MEMCPY
+# ifdef SMALL_MEDIUM /* MSDOS small or medium model */
+# define zmemcpy _fmemcpy
+# define zmemcmp _fmemcmp
+# define zmemzero(dest, len) _fmemset(dest, 0, len)
+# else
+# define zmemcpy memcpy
+# define zmemcmp memcmp
+# define zmemzero(dest, len) memset(dest, 0, len)
+# endif
+#else
+ extern void zmemcpy OF((Bytef* dest, Bytef* source, uInt len));
+ extern int zmemcmp OF((Bytef* s1, Bytef* s2, uInt len));
+ extern void zmemzero OF((Bytef* dest, uInt len));
+#endif
+
+/* Diagnostic functions */
+#ifdef DEBUG_ZLIB
+# include <stdio.h>
+# ifndef verbose
+# define verbose 0
+# endif
+ extern void z_error OF((char *m));
+# define Assert(cond,msg) {if(!(cond)) z_error(msg);}
+# define Trace(x) fprintf x
+# define Tracev(x) {if (verbose) fprintf x ;}
+# define Tracevv(x) {if (verbose>1) fprintf x ;}
+# define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
+# define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
+#else
+# define Assert(cond,msg)
+# define Trace(x)
+# define Tracev(x)
+# define Tracevv(x)
+# define Tracec(c,x)
+# define Tracecv(c,x)
+#endif
+
+
+typedef uLong (*check_func) OF((uLong check, const Bytef *buf, uInt len));
+
+voidpf zcalloc OF((voidpf opaque, unsigned items, unsigned size));
+void zcfree OF((voidpf opaque, voidpf ptr));
+
+#define ZALLOC(strm, items, size) \
+ (*((strm)->zalloc))((strm)->opaque, (items), (size))
+#define ZFREE(strm, addr) (*((strm)->zfree))((strm)->opaque, (voidpf)(addr))
+#define TRY_FREE(s, p) {if (p) ZFREE(s, p);}
+
+#endif /* _Z_UTIL_H */
+/* --- zutil.h */
+
+/* +++ deflate.h */
+/* deflate.h -- internal compression state
+ * Copyright (C) 1995-1996 Jean-loup Gailly
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+/* From: deflate.h,v 1.10 1996/07/02 12:41:00 me Exp $ */
+
+#ifndef _DEFLATE_H
+#define _DEFLATE_H
+
+/* #include "zutil.h" */
+
+/* ===========================================================================
+ * Internal compression state.
+ */
+
+#define LENGTH_CODES 29
+/* number of length codes, not counting the special END_BLOCK code */
+
+#define LITERALS 256
+/* number of literal bytes 0..255 */
+
+#define L_CODES (LITERALS+1+LENGTH_CODES)
+/* number of Literal or Length codes, including the END_BLOCK code */
+
+#define D_CODES 30
+/* number of distance codes */
+
+#define BL_CODES 19
+/* number of codes used to transfer the bit lengths */
+
+#define HEAP_SIZE (2*L_CODES+1)
+/* maximum heap size */
+
+#define MAX_BITS 15
+/* All codes must not exceed MAX_BITS bits */
+
+#define INIT_STATE 42
+#define BUSY_STATE 113
+#define FINISH_STATE 666
+/* Stream status */
+
+
+/* Data structure describing a single value and its code string. */
+typedef struct ct_data_s {
+ union {
+ ush freq; /* frequency count */
+ ush code; /* bit string */
+ } fc;
+ union {
+ ush dad; /* father node in Huffman tree */
+ ush len; /* length of bit string */
+ } dl;
+} FAR ct_data;
+
+#define Freq fc.freq
+#define Code fc.code
+#define Dad dl.dad
+#define Len dl.len
+
+typedef struct static_tree_desc_s static_tree_desc;
+
+typedef struct tree_desc_s {
+ ct_data *dyn_tree; /* the dynamic tree */
+ int max_code; /* largest code with non zero frequency */
+ static_tree_desc *stat_desc; /* the corresponding static tree */
+} FAR tree_desc;
+
+typedef ush Pos;
+typedef Pos FAR Posf;
+typedef unsigned IPos;
+
+/* A Pos is an index in the character window. We use short instead of int to
+ * save space in the various tables. IPos is used only for parameter passing.
+ */
+
+typedef struct deflate_state {
+ z_streamp strm; /* pointer back to this zlib stream */
+ int status; /* as the name implies */
+ Bytef *pending_buf; /* output still pending */
+ ulg pending_buf_size; /* size of pending_buf */
+ Bytef *pending_out; /* next pending byte to output to the stream */
+ int pending; /* nb of bytes in the pending buffer */
+ int noheader; /* suppress zlib header and adler32 */
+ Byte data_type; /* UNKNOWN, BINARY or ASCII */
+ Byte method; /* STORED (for zip only) or DEFLATED */
+ int last_flush; /* value of flush param for previous deflate call */
+
+ /* used by deflate.c: */
+
+ uInt w_size; /* LZ77 window size (32K by default) */
+ uInt w_bits; /* log2(w_size) (8..16) */
+ uInt w_mask; /* w_size - 1 */
+
+ Bytef *window;
+ /* Sliding window. Input bytes are read into the second half of the window,
+ * and move to the first half later to keep a dictionary of at least wSize
+ * bytes. With this organization, matches are limited to a distance of
+ * wSize-MAX_MATCH bytes, but this ensures that IO is always
+ * performed with a length multiple of the block size. Also, it limits
+ * the window size to 64K, which is quite useful on MSDOS.
+ * To do: use the user input buffer as sliding window.
+ */
+
+ ulg window_size;
+ /* Actual size of window: 2*wSize, except when the user input buffer
+ * is directly used as sliding window.
+ */
+
+ Posf *prev;
+ /* Link to older string with same hash index. To limit the size of this
+ * array to 64K, this link is maintained only for the last 32K strings.
+ * An index in this array is thus a window index modulo 32K.
+ */
+
+ Posf *head; /* Heads of the hash chains or NIL. */
+
+ uInt ins_h; /* hash index of string to be inserted */
+ uInt hash_size; /* number of elements in hash table */
+ uInt hash_bits; /* log2(hash_size) */
+ uInt hash_mask; /* hash_size-1 */
+
+ uInt hash_shift;
+ /* Number of bits by which ins_h must be shifted at each input
+ * step. It must be such that after MIN_MATCH steps, the oldest
+ * byte no longer takes part in the hash key, that is:
+ * hash_shift * MIN_MATCH >= hash_bits
+ */
+
+ long block_start;
+ /* Window position at the beginning of the current output block. Gets
+ * negative when the window is moved backwards.
+ */
+
+ uInt match_length; /* length of best match */
+ IPos prev_match; /* previous match */
+ int match_available; /* set if previous match exists */
+ uInt strstart; /* start of string to insert */
+ uInt match_start; /* start of matching string */
+ uInt lookahead; /* number of valid bytes ahead in window */
+
+ uInt prev_length;
+ /* Length of the best match at previous step. Matches not greater than this
+ * are discarded. This is used in the lazy match evaluation.
+ */
+
+ uInt max_chain_length;
+ /* To speed up deflation, hash chains are never searched beyond this
+ * length. A higher limit improves compression ratio but degrades the
+ * speed.
+ */
+
+ uInt max_lazy_match;
+ /* Attempt to find a better match only when the current match is strictly
+ * smaller than this value. This mechanism is used only for compression
+ * levels >= 4.
+ */
+# define max_insert_length max_lazy_match
+ /* Insert new strings in the hash table only if the match length is not
+ * greater than this length. This saves time but degrades compression.
+ * max_insert_length is used only for compression levels <= 3.
+ */
+
+ int level; /* compression level (1..9) */
+ int strategy; /* favor or force Huffman coding*/
+
+ uInt good_match;
+ /* Use a faster search when the previous match is longer than this */
+
+ int nice_match; /* Stop searching when current match exceeds this */
+
+ /* used by trees.c: */
+ /* Didn't use ct_data typedef below to supress compiler warning */
+ struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */
+ struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */
+ struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */
+
+ struct tree_desc_s l_desc; /* desc. for literal tree */
+ struct tree_desc_s d_desc; /* desc. for distance tree */
+ struct tree_desc_s bl_desc; /* desc. for bit length tree */
+
+ ush bl_count[MAX_BITS+1];
+ /* number of codes at each bit length for an optimal tree */
+
+ int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */
+ int heap_len; /* number of elements in the heap */
+ int heap_max; /* element of largest frequency */
+ /* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used.
+ * The same heap array is used to build all trees.
+ */
+
+ uch depth[2*L_CODES+1];
+ /* Depth of each subtree used as tie breaker for trees of equal frequency
+ */
+
+ uchf *l_buf; /* buffer for literals or lengths */
+
+ uInt lit_bufsize;
+ /* Size of match buffer for literals/lengths. There are 4 reasons for
+ * limiting lit_bufsize to 64K:
+ * - frequencies can be kept in 16 bit counters
+ * - if compression is not successful for the first block, all input
+ * data is still in the window so we can still emit a stored block even
+ * when input comes from standard input. (This can also be done for
+ * all blocks if lit_bufsize is not greater than 32K.)
+ * - if compression is not successful for a file smaller than 64K, we can
+ * even emit a stored file instead of a stored block (saving 5 bytes).
+ * This is applicable only for zip (not gzip or zlib).
+ * - creating new Huffman trees less frequently may not provide fast
+ * adaptation to changes in the input data statistics. (Take for
+ * example a binary file with poorly compressible code followed by
+ * a highly compressible string table.) Smaller buffer sizes give
+ * fast adaptation but have of course the overhead of transmitting
+ * trees more frequently.
+ * - I can't count above 4
+ */
+
+ uInt last_lit; /* running index in l_buf */
+
+ ushf *d_buf;
+ /* Buffer for distances. To simplify the code, d_buf and l_buf have
+ * the same number of elements. To use different lengths, an extra flag
+ * array would be necessary.
+ */
+
+ ulg opt_len; /* bit length of current block with optimal trees */
+ ulg static_len; /* bit length of current block with static trees */
+ ulg compressed_len; /* total bit length of compressed file */
+ uInt matches; /* number of string matches in current block */
+ int last_eob_len; /* bit length of EOB code for last block */
+
+#ifdef DEBUG_ZLIB
+ ulg bits_sent; /* bit length of the compressed data */
+#endif
+
+ ush bi_buf;
+ /* Output buffer. bits are inserted starting at the bottom (least
+ * significant bits).
+ */
+ int bi_valid;
+ /* Number of valid bits in bi_buf. All bits above the last valid bit
+ * are always zero.
+ */
+
+} FAR deflate_state;
+
+/* Output a byte on the stream.
+ * IN assertion: there is enough room in pending_buf.
+ */
+#define put_byte(s, c) {s->pending_buf[s->pending++] = (c);}
+
+
+#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1)
+/* Minimum amount of lookahead, except at the end of the input file.
+ * See deflate.c for comments about the MIN_MATCH+1.
+ */
+
+#define MAX_DIST(s) ((s)->w_size-MIN_LOOKAHEAD)
+/* In order to simplify the code, particularly on 16 bit machines, match
+ * distances are limited to MAX_DIST instead of WSIZE.
+ */
+
+ /* in trees.c */
+void _tr_init OF((deflate_state *s));
+int _tr_tally OF((deflate_state *s, unsigned dist, unsigned lc));
+ulg _tr_flush_block OF((deflate_state *s, charf *buf, ulg stored_len,
+ int eof));
+void _tr_align OF((deflate_state *s));
+void _tr_stored_block OF((deflate_state *s, charf *buf, ulg stored_len,
+ int eof));
+void _tr_stored_type_only OF((deflate_state *));
+
+#endif
+/* --- deflate.h */
+
+/* +++ deflate.c */
+/* deflate.c -- compress data using the deflation algorithm
+ * Copyright (C) 1995-1996 Jean-loup Gailly.
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/*
+ * ALGORITHM
+ *
+ * The "deflation" process depends on being able to identify portions
+ * of the input text which are identical to earlier input (within a
+ * sliding window trailing behind the input currently being processed).
+ *
+ * The most straightforward technique turns out to be the fastest for
+ * most input files: try all possible matches and select the longest.
+ * The key feature of this algorithm is that insertions into the string
+ * dictionary are very simple and thus fast, and deletions are avoided
+ * completely. Insertions are performed at each input character, whereas
+ * string matches are performed only when the previous match ends. So it
+ * is preferable to spend more time in matches to allow very fast string
+ * insertions and avoid deletions. The matching algorithm for small
+ * strings is inspired from that of Rabin & Karp. A brute force approach
+ * is used to find longer strings when a small match has been found.
+ * A similar algorithm is used in comic (by Jan-Mark Wams) and freeze
+ * (by Leonid Broukhis).
+ * A previous version of this file used a more sophisticated algorithm
+ * (by Fiala and Greene) which is guaranteed to run in linear amortized
+ * time, but has a larger average cost, uses more memory and is patented.
+ * However the F&G algorithm may be faster for some highly redundant
+ * files if the parameter max_chain_length (described below) is too large.
+ *
+ * ACKNOWLEDGEMENTS
+ *
+ * The idea of lazy evaluation of matches is due to Jan-Mark Wams, and
+ * I found it in 'freeze' written by Leonid Broukhis.
+ * Thanks to many people for bug reports and testing.
+ *
+ * REFERENCES
+ *
+ * Deutsch, L.P.,"DEFLATE Compressed Data Format Specification".
+ * Available in ftp://ds.internic.net/rfc/rfc1951.txt
+ *
+ * A description of the Rabin and Karp algorithm is given in the book
+ * "Algorithms" by R. Sedgewick, Addison-Wesley, p252.
+ *
+ * Fiala,E.R., and Greene,D.H.
+ * Data Compression with Finite Windows, Comm.ACM, 32,4 (1989) 490-595
+ *
+ */
+
+/* From: deflate.c,v 1.15 1996/07/24 13:40:58 me Exp $ */
+
+/* #include "deflate.h" */
+
+char deflate_copyright[] = " deflate 1.0.4 Copyright 1995-1996 Jean-loup Gailly ";
+/*
+ If you use the zlib library in a product, an acknowledgment is welcome
+ in the documentation of your product. If for some reason you cannot
+ include such an acknowledgment, I would appreciate that you keep this
+ copyright string in the executable of your product.
+ */
+
+/* ===========================================================================
+ * Function prototypes.
+ */
+typedef enum {
+ need_more, /* block not completed, need more input or more output */
+ block_done, /* block flush performed */
+ finish_started, /* finish started, need only more output at next deflate */
+ finish_done /* finish done, accept no more input or output */
+} block_state;
+
+typedef block_state (*compress_func) OF((deflate_state *s, int flush));
+/* Compression function. Returns the block state after the call. */
+
+local void fill_window OF((deflate_state *s));
+local block_state deflate_stored OF((deflate_state *s, int flush));
+local block_state deflate_fast OF((deflate_state *s, int flush));
+local block_state deflate_slow OF((deflate_state *s, int flush));
+local void lm_init OF((deflate_state *s));
+local void putShortMSB OF((deflate_state *s, uInt b));
+local void flush_pending OF((z_streamp strm));
+local int read_buf OF((z_streamp strm, charf *buf, unsigned size));
+#ifdef ASMV
+ void match_init OF((void)); /* asm code initialization */
+ uInt longest_match OF((deflate_state *s, IPos cur_match));
+#else
+local uInt longest_match OF((deflate_state *s, IPos cur_match));
+#endif
+
+#ifdef DEBUG_ZLIB
+local void check_match OF((deflate_state *s, IPos start, IPos match,
+ int length));
+#endif
+
+/* ===========================================================================
+ * Local data
+ */
+
+#define NIL 0
+/* Tail of hash chains */
+
+#ifndef TOO_FAR
+# define TOO_FAR 4096
+#endif
+/* Matches of length 3 are discarded if their distance exceeds TOO_FAR */
+
+#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1)
+/* Minimum amount of lookahead, except at the end of the input file.
+ * See deflate.c for comments about the MIN_MATCH+1.
+ */
+
+/* Values for max_lazy_match, good_match and max_chain_length, depending on
+ * the desired pack level (0..9). The values given below have been tuned to
+ * exclude worst case performance for pathological files. Better values may be
+ * found for specific files.
+ */
+typedef struct config_s {
+ ush good_length; /* reduce lazy search above this match length */
+ ush max_lazy; /* do not perform lazy search above this match length */
+ ush nice_length; /* quit search above this match length */
+ ush max_chain;
+ compress_func func;
+} config;
+
+local config configuration_table[10] = {
+/* good lazy nice chain */
+/* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */
+/* 1 */ {4, 4, 8, 4, deflate_fast}, /* maximum speed, no lazy matches */
+/* 2 */ {4, 5, 16, 8, deflate_fast},
+/* 3 */ {4, 6, 32, 32, deflate_fast},
+
+/* 4 */ {4, 4, 16, 16, deflate_slow}, /* lazy matches */
+/* 5 */ {8, 16, 32, 32, deflate_slow},
+/* 6 */ {8, 16, 128, 128, deflate_slow},
+/* 7 */ {8, 32, 128, 256, deflate_slow},
+/* 8 */ {32, 128, 258, 1024, deflate_slow},
+/* 9 */ {32, 258, 258, 4096, deflate_slow}}; /* maximum compression */
+
+/* Note: the deflate() code requires max_lazy >= MIN_MATCH and max_chain >= 4
+ * For deflate_fast() (levels <= 3) good is ignored and lazy has a different
+ * meaning.
+ */
+
+#define EQUAL 0
+/* result of memcmp for equal strings */
+
+#ifndef NO_DUMMY_DECL
+struct static_tree_desc_s {int dummy;}; /* for buggy compilers */
+#endif
+
+/* ===========================================================================
+ * Update a hash value with the given input byte
+ * IN assertion: all calls to to UPDATE_HASH are made with consecutive
+ * input characters, so that a running hash key can be computed from the
+ * previous key instead of complete recalculation each time.
+ */
+#define UPDATE_HASH(s,h,c) (h = (((h)<<s->hash_shift) ^ (c)) & s->hash_mask)
+
+
+/* ===========================================================================
+ * Insert string str in the dictionary and set match_head to the previous head
+ * of the hash chain (the most recent string with same hash key). Return
+ * the previous length of the hash chain.
+ * IN assertion: all calls to to INSERT_STRING are made with consecutive
+ * input characters and the first MIN_MATCH bytes of str are valid
+ * (except for the last MIN_MATCH-1 bytes of the input file).
+ */
+#define INSERT_STRING(s, str, match_head) \
+ (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \
+ s->prev[(str) & s->w_mask] = match_head = s->head[s->ins_h], \
+ s->head[s->ins_h] = (Pos)(str))
+
+/* ===========================================================================
+ * Initialize the hash table (avoiding 64K overflow for 16 bit systems).
+ * prev[] will be initialized on the fly.
+ */
+#define CLEAR_HASH(s) \
+ s->head[s->hash_size-1] = NIL; \
+ zmemzero((charf *)s->head, (unsigned)(s->hash_size-1)*sizeof(*s->head));
+
+/* ========================================================================= */
+int deflateInit_(strm, level, version, stream_size)
+ z_streamp strm;
+ int level;
+ const char *version;
+ int stream_size;
+{
+ return deflateInit2_(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL,
+ Z_DEFAULT_STRATEGY, version, stream_size);
+ /* To do: ignore strm->next_in if we use it as window */
+}
+
+/* ========================================================================= */
+int deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
+ version, stream_size)
+ z_streamp strm;
+ int level;
+ int method;
+ int windowBits;
+ int memLevel;
+ int strategy;
+ const char *version;
+ int stream_size;
+{
+ deflate_state *s;
+ int noheader = 0;
+ static char* my_version = ZLIB_VERSION;
+
+ ushf *overlay;
+ /* We overlay pending_buf and d_buf+l_buf. This works since the average
+ * output size for (length,distance) codes is <= 24 bits.
+ */
+
+ if (version == Z_NULL || version[0] != my_version[0] ||
+ stream_size != sizeof(z_stream)) {
+ return Z_VERSION_ERROR;
+ }
+ if (strm == Z_NULL) return Z_STREAM_ERROR;
+
+ strm->msg = Z_NULL;
+#ifndef NO_ZCFUNCS
+ if (strm->zalloc == Z_NULL) {
+ strm->zalloc = zcalloc;
+ strm->opaque = (voidpf)0;
+ }
+ if (strm->zfree == Z_NULL) strm->zfree = zcfree;
+#endif
+
+ if (level == Z_DEFAULT_COMPRESSION) level = 6;
+
+ if (windowBits < 0) { /* undocumented feature: suppress zlib header */
+ noheader = 1;
+ windowBits = -windowBits;
+ }
+ if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method != Z_DEFLATED ||
+ windowBits < 8 || windowBits > 15 || level < 0 || level > 9 ||
+ strategy < 0 || strategy > Z_HUFFMAN_ONLY) {
+ return Z_STREAM_ERROR;
+ }
+ s = (deflate_state *) ZALLOC(strm, 1, sizeof(deflate_state));
+ if (s == Z_NULL) return Z_MEM_ERROR;
+ strm->state = (struct internal_state FAR *)s;
+ s->strm = strm;
+
+ s->noheader = noheader;
+ s->w_bits = windowBits;
+ s->w_size = 1 << s->w_bits;
+ s->w_mask = s->w_size - 1;
+
+ s->hash_bits = memLevel + 7;
+ s->hash_size = 1 << s->hash_bits;
+ s->hash_mask = s->hash_size - 1;
+ s->hash_shift = ((s->hash_bits+MIN_MATCH-1)/MIN_MATCH);
+
+ s->window = (Bytef *) ZALLOC(strm, s->w_size, 2*sizeof(Byte));
+ s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos));
+ s->head = (Posf *) ZALLOC(strm, s->hash_size, sizeof(Pos));
+
+ s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */
+
+ overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2);
+ s->pending_buf = (uchf *) overlay;
+ s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L);
+
+ if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL ||
+ s->pending_buf == Z_NULL) {
+ strm->msg = (const char*)ERR_MSG(Z_MEM_ERROR);
+ deflateEnd (strm);
+ return Z_MEM_ERROR;
+ }
+ s->d_buf = overlay + s->lit_bufsize/sizeof(ush);
+ s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize;
+
+ s->level = level;
+ s->strategy = strategy;
+ s->method = (Byte)method;
+
+ return deflateReset(strm);
+}
+
+/* ========================================================================= */
+int deflateSetDictionary (strm, dictionary, dictLength)
+ z_streamp strm;
+ const Bytef *dictionary;
+ uInt dictLength;
+{
+ deflate_state *s;
+ uInt length = dictLength;
+ uInt n;
+ IPos hash_head = 0;
+
+ if (strm == Z_NULL || strm->state == Z_NULL || dictionary == Z_NULL)
+ return Z_STREAM_ERROR;
+
+ s = (deflate_state *) strm->state;
+ if (s->status != INIT_STATE) return Z_STREAM_ERROR;
+
+ strm->adler = adler32(strm->adler, dictionary, dictLength);
+
+ if (length < MIN_MATCH) return Z_OK;
+ if (length > MAX_DIST(s)) {
+ length = MAX_DIST(s);
+#ifndef USE_DICT_HEAD
+ dictionary += dictLength - length; /* use the tail of the dictionary */
+#endif
+ }
+ zmemcpy((charf *)s->window, dictionary, length);
+ s->strstart = length;
+ s->block_start = (long)length;
+
+ /* Insert all strings in the hash table (except for the last two bytes).
+ * s->lookahead stays null, so s->ins_h will be recomputed at the next
+ * call of fill_window.
+ */
+ s->ins_h = s->window[0];
+ UPDATE_HASH(s, s->ins_h, s->window[1]);
+ for (n = 0; n <= length - MIN_MATCH; n++) {
+ INSERT_STRING(s, n, hash_head);
+ }
+ if (hash_head) hash_head = 0; /* to make compiler happy */
+ return Z_OK;
+}
+
+/* ========================================================================= */
+int deflateReset (strm)
+ z_streamp strm;
+{
+ deflate_state *s;
+
+ if (strm == Z_NULL || strm->state == Z_NULL ||
+ strm->zalloc == Z_NULL || strm->zfree == Z_NULL) return Z_STREAM_ERROR;
+
+ strm->total_in = strm->total_out = 0;
+ strm->msg = Z_NULL; /* use zfree if we ever allocate msg dynamically */
+ strm->data_type = Z_UNKNOWN;
+
+ s = (deflate_state *)strm->state;
+ s->pending = 0;
+ s->pending_out = s->pending_buf;
+
+ if (s->noheader < 0) {
+ s->noheader = 0; /* was set to -1 by deflate(..., Z_FINISH); */
+ }
+ s->status = s->noheader ? BUSY_STATE : INIT_STATE;
+ strm->adler = 1;
+ s->last_flush = Z_NO_FLUSH;
+
+ _tr_init(s);
+ lm_init(s);
+
+ return Z_OK;
+}
+
+/* ========================================================================= */
+int deflateParams(strm, level, strategy)
+ z_streamp strm;
+ int level;
+ int strategy;
+{
+ deflate_state *s;
+ compress_func func;
+ int err = Z_OK;
+
+ if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
+ s = (deflate_state *) strm->state;
+
+ if (level == Z_DEFAULT_COMPRESSION) {
+ level = 6;
+ }
+ if (level < 0 || level > 9 || strategy < 0 || strategy > Z_HUFFMAN_ONLY) {
+ return Z_STREAM_ERROR;
+ }
+ func = configuration_table[s->level].func;
+
+ if (func != configuration_table[level].func && strm->total_in != 0) {
+ /* Flush the last buffer: */
+ err = deflate(strm, Z_PARTIAL_FLUSH);
+ }
+ if (s->level != level) {
+ s->level = level;
+ s->max_lazy_match = configuration_table[level].max_lazy;
+ s->good_match = configuration_table[level].good_length;
+ s->nice_match = configuration_table[level].nice_length;
+ s->max_chain_length = configuration_table[level].max_chain;
+ }
+ s->strategy = strategy;
+ return err;
+}
+
+/* =========================================================================
+ * Put a short in the pending buffer. The 16-bit value is put in MSB order.
+ * IN assertion: the stream state is correct and there is enough room in
+ * pending_buf.
+ */
+local void putShortMSB (s, b)
+ deflate_state *s;
+ uInt b;
+{
+ put_byte(s, (Byte)(b >> 8));
+ put_byte(s, (Byte)(b & 0xff));
+}
+
+/* =========================================================================
+ * Flush as much pending output as possible. All deflate() output goes
+ * through this function so some applications may wish to modify it
+ * to avoid allocating a large strm->next_out buffer and copying into it.
+ * (See also read_buf()).
+ */
+local void flush_pending(strm)
+ z_streamp strm;
+{
+ deflate_state *s = (deflate_state *) strm->state;
+ unsigned len = s->pending;
+
+ if (len > strm->avail_out) len = strm->avail_out;
+ if (len == 0) return;
+
+ if (strm->next_out != Z_NULL) {
+ zmemcpy(strm->next_out, s->pending_out, len);
+ strm->next_out += len;
+ }
+ s->pending_out += len;
+ strm->total_out += len;
+ strm->avail_out -= len;
+ s->pending -= len;
+ if (s->pending == 0) {
+ s->pending_out = s->pending_buf;
+ }
+}
+
+/* ========================================================================= */
+int deflate (strm, flush)
+ z_streamp strm;
+ int flush;
+{
+ int old_flush; /* value of flush param for previous deflate call */
+ deflate_state *s;
+
+ if (strm == Z_NULL || strm->state == Z_NULL ||
+ flush > Z_FINISH || flush < 0) {
+ return Z_STREAM_ERROR;
+ }
+ s = (deflate_state *) strm->state;
+
+ if ((strm->next_in == Z_NULL && strm->avail_in != 0) ||
+ (s->status == FINISH_STATE && flush != Z_FINISH)) {
+ ERR_RETURN(strm, Z_STREAM_ERROR);
+ }
+ if (strm->avail_out == 0) ERR_RETURN(strm, Z_BUF_ERROR);
+
+ s->strm = strm; /* just in case */
+ old_flush = s->last_flush;
+ s->last_flush = flush;
+
+ /* Write the zlib header */
+ if (s->status == INIT_STATE) {
+
+ uInt header = (Z_DEFLATED + ((s->w_bits-8)<<4)) << 8;
+ uInt level_flags = (s->level-1) >> 1;
+
+ if (level_flags > 3) level_flags = 3;
+ header |= (level_flags << 6);
+ if (s->strstart != 0) header |= PRESET_DICT;
+ header += 31 - (header % 31);
+
+ s->status = BUSY_STATE;
+ putShortMSB(s, header);
+
+ /* Save the adler32 of the preset dictionary: */
+ if (s->strstart != 0) {
+ putShortMSB(s, (uInt)(strm->adler >> 16));
+ putShortMSB(s, (uInt)(strm->adler & 0xffff));
+ }
+ strm->adler = 1L;
+ }
+
+ /* Flush as much pending output as possible */
+ if (s->pending != 0) {
+ flush_pending(strm);
+ if (strm->avail_out == 0) {
+ /* Since avail_out is 0, deflate will be called again with
+ * more output space, but possibly with both pending and
+ * avail_in equal to zero. There won't be anything to do,
+ * but this is not an error situation so make sure we
+ * return OK instead of BUF_ERROR at next call of deflate:
+ */
+ s->last_flush = -1;
+ return Z_OK;
+ }
+
+ /* Make sure there is something to do and avoid duplicate consecutive
+ * flushes. For repeated and useless calls with Z_FINISH, we keep
+ * returning Z_STREAM_END instead of Z_BUFF_ERROR.
+ */
+ } else if (strm->avail_in == 0 && flush <= old_flush &&
+ flush != Z_FINISH) {
+ ERR_RETURN(strm, Z_BUF_ERROR);
+ }
+
+ /* User must not provide more input after the first FINISH: */
+ if (s->status == FINISH_STATE && strm->avail_in != 0) {
+ ERR_RETURN(strm, Z_BUF_ERROR);
+ }
+
+ /* Start a new block or continue the current one.
+ */
+ if (strm->avail_in != 0 || s->lookahead != 0 ||
+ (flush != Z_NO_FLUSH && s->status != FINISH_STATE)) {
+ block_state bstate;
+
+ bstate = (*(configuration_table[s->level].func))(s, flush);
+
+ if (bstate == finish_started || bstate == finish_done) {
+ s->status = FINISH_STATE;
+ }
+ if (bstate == need_more || bstate == finish_started) {
+ if (strm->avail_out == 0) {
+ s->last_flush = -1; /* avoid BUF_ERROR next call, see above */
+ }
+ return Z_OK;
+ /* If flush != Z_NO_FLUSH && avail_out == 0, the next call
+ * of deflate should use the same flush parameter to make sure
+ * that the flush is complete. So we don't have to output an
+ * empty block here, this will be done at next call. This also
+ * ensures that for a very small output buffer, we emit at most
+ * one empty block.
+ */
+ }
+ if (bstate == block_done) {
+ if (flush == Z_PARTIAL_FLUSH) {
+ _tr_align(s);
+ } else if (flush == Z_PACKET_FLUSH) {
+ /* Output just the 3-bit `stored' block type value,
+ but not a zero length. */
+ _tr_stored_type_only(s);
+ } else { /* FULL_FLUSH or SYNC_FLUSH */
+ _tr_stored_block(s, (char*)0, 0L, 0);
+ /* For a full flush, this empty block will be recognized
+ * as a special marker by inflate_sync().
+ */
+ if (flush == Z_FULL_FLUSH) {
+ CLEAR_HASH(s); /* forget history */
+ }
+ }
+ flush_pending(strm);
+ if (strm->avail_out == 0) {
+ s->last_flush = -1; /* avoid BUF_ERROR at next call, see above */
+ return Z_OK;
+ }
+ }
+ }
+ Assert(strm->avail_out > 0, "bug2");
+
+ if (flush != Z_FINISH) return Z_OK;
+ if (s->noheader) return Z_STREAM_END;
+
+ /* Write the zlib trailer (adler32) */
+ putShortMSB(s, (uInt)(strm->adler >> 16));
+ putShortMSB(s, (uInt)(strm->adler & 0xffff));
+ flush_pending(strm);
+ /* If avail_out is zero, the application will call deflate again
+ * to flush the rest.
+ */
+ s->noheader = -1; /* write the trailer only once! */
+ return s->pending != 0 ? Z_OK : Z_STREAM_END;
+}
+
+/* ========================================================================= */
+int deflateEnd (strm)
+ z_streamp strm;
+{
+ int status;
+ deflate_state *s;
+
+ if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
+ s = (deflate_state *) strm->state;
+
+ status = s->status;
+ if (status != INIT_STATE && status != BUSY_STATE &&
+ status != FINISH_STATE) {
+ return Z_STREAM_ERROR;
+ }
+
+ /* Deallocate in reverse order of allocations: */
+ TRY_FREE(strm, s->pending_buf);
+ TRY_FREE(strm, s->head);
+ TRY_FREE(strm, s->prev);
+ TRY_FREE(strm, s->window);
+
+ ZFREE(strm, s);
+ strm->state = Z_NULL;
+
+ return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK;
+}
+
+/* =========================================================================
+ * Copy the source state to the destination state.
+ */
+int deflateCopy (dest, source)
+ z_streamp dest;
+ z_streamp source;
+{
+ deflate_state *ds;
+ deflate_state *ss;
+ ushf *overlay;
+
+ if (source == Z_NULL || dest == Z_NULL || source->state == Z_NULL)
+ return Z_STREAM_ERROR;
+ ss = (deflate_state *) source->state;
+
+ zmemcpy(dest, source, sizeof(*dest));
+
+ ds = (deflate_state *) ZALLOC(dest, 1, sizeof(deflate_state));
+ if (ds == Z_NULL) return Z_MEM_ERROR;
+ dest->state = (struct internal_state FAR *) ds;
+ zmemcpy(ds, ss, sizeof(*ds));
+ ds->strm = dest;
+
+ ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte));
+ ds->prev = (Posf *) ZALLOC(dest, ds->w_size, sizeof(Pos));
+ ds->head = (Posf *) ZALLOC(dest, ds->hash_size, sizeof(Pos));
+ overlay = (ushf *) ZALLOC(dest, ds->lit_bufsize, sizeof(ush)+2);
+ ds->pending_buf = (uchf *) overlay;
+
+ if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL ||
+ ds->pending_buf == Z_NULL) {
+ deflateEnd (dest);
+ return Z_MEM_ERROR;
+ }
+ /* ??? following zmemcpy doesn't work for 16-bit MSDOS */
+ zmemcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte));
+ zmemcpy(ds->prev, ss->prev, ds->w_size * sizeof(Pos));
+ zmemcpy(ds->head, ss->head, ds->hash_size * sizeof(Pos));
+ zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size);
+
+ ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf);
+ ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush);
+ ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize;
+
+ ds->l_desc.dyn_tree = ds->dyn_ltree;
+ ds->d_desc.dyn_tree = ds->dyn_dtree;
+ ds->bl_desc.dyn_tree = ds->bl_tree;
+
+ return Z_OK;
+}
+
+/* ===========================================================================
+ * Return the number of bytes of output which are immediately available
+ * for output from the decompressor.
+ */
+int deflateOutputPending (strm)
+ z_streamp strm;
+{
+ if (strm == Z_NULL || strm->state == Z_NULL) return 0;
+
+ return ((deflate_state *)(strm->state))->pending;
+}
+
+/* ===========================================================================
+ * Read a new buffer from the current input stream, update the adler32
+ * and total number of bytes read. All deflate() input goes through
+ * this function so some applications may wish to modify it to avoid
+ * allocating a large strm->next_in buffer and copying from it.
+ * (See also flush_pending()).
+ */
+local int read_buf(strm, buf, size)
+ z_streamp strm;
+ charf *buf;
+ unsigned size;
+{
+ unsigned len = strm->avail_in;
+
+ if (len > size) len = size;
+ if (len == 0) return 0;
+
+ strm->avail_in -= len;
+
+ if (!((deflate_state *)(strm->state))->noheader) {
+ strm->adler = adler32(strm->adler, strm->next_in, len);
+ }
+ zmemcpy(buf, strm->next_in, len);
+ strm->next_in += len;
+ strm->total_in += len;
+
+ return (int)len;
+}
+
+/* ===========================================================================
+ * Initialize the "longest match" routines for a new zlib stream
+ */
+local void lm_init (s)
+ deflate_state *s;
+{
+ s->window_size = (ulg)2L*s->w_size;
+
+ CLEAR_HASH(s);
+
+ /* Set the default configuration parameters:
+ */
+ s->max_lazy_match = configuration_table[s->level].max_lazy;
+ s->good_match = configuration_table[s->level].good_length;
+ s->nice_match = configuration_table[s->level].nice_length;
+ s->max_chain_length = configuration_table[s->level].max_chain;
+
+ s->strstart = 0;
+ s->block_start = 0L;
+ s->lookahead = 0;
+ s->match_length = s->prev_length = MIN_MATCH-1;
+ s->match_available = 0;
+ s->ins_h = 0;
+#ifdef ASMV
+ match_init(); /* initialize the asm code */
+#endif
+}
+
+/* ===========================================================================
+ * Set match_start to the longest match starting at the given string and
+ * return its length. Matches shorter or equal to prev_length are discarded,
+ * in which case the result is equal to prev_length and match_start is
+ * garbage.
+ * IN assertions: cur_match is the head of the hash chain for the current
+ * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1
+ * OUT assertion: the match length is not greater than s->lookahead.
+ */
+#ifndef ASMV
+/* For 80x86 and 680x0, an optimized version will be provided in match.asm or
+ * match.S. The code will be functionally equivalent.
+ */
+local uInt longest_match(s, cur_match)
+ deflate_state *s;
+ IPos cur_match; /* current match */
+{
+ unsigned chain_length = s->max_chain_length;/* max hash chain length */
+ register Bytef *scan = s->window + s->strstart; /* current string */
+ register Bytef *match; /* matched string */
+ register int len; /* length of current match */
+ int best_len = s->prev_length; /* best match length so far */
+ int nice_match = s->nice_match; /* stop if match long enough */
+ IPos limit = s->strstart > (IPos)MAX_DIST(s) ?
+ s->strstart - (IPos)MAX_DIST(s) : NIL;
+ /* Stop when cur_match becomes <= limit. To simplify the code,
+ * we prevent matches with the string of window index 0.
+ */
+ Posf *prev = s->prev;
+ uInt wmask = s->w_mask;
+
+#ifdef UNALIGNED_OK
+ /* Compare two bytes at a time. Note: this is not always beneficial.
+ * Try with and without -DUNALIGNED_OK to check.
+ */
+ register Bytef *strend = s->window + s->strstart + MAX_MATCH - 1;
+ register ush scan_start = *(ushf*)scan;
+ register ush scan_end = *(ushf*)(scan+best_len-1);
+#else
+ register Bytef *strend = s->window + s->strstart + MAX_MATCH;
+ register Byte scan_end1 = scan[best_len-1];
+ register Byte scan_end = scan[best_len];
+#endif
+
+ /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
+ * It is easy to get rid of this optimization if necessary.
+ */
+ Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
+
+ /* Do not waste too much time if we already have a good match: */
+ if (s->prev_length >= s->good_match) {
+ chain_length >>= 2;
+ }
+ /* Do not look for matches beyond the end of the input. This is necessary
+ * to make deflate deterministic.
+ */
+ if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead;
+
+ Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead");
+
+ do {
+ Assert(cur_match < s->strstart, "no future");
+ match = s->window + cur_match;
+
+ /* Skip to next match if the match length cannot increase
+ * or if the match length is less than 2:
+ */
+#if (defined(UNALIGNED_OK) && MAX_MATCH == 258)
+ /* This code assumes sizeof(unsigned short) == 2. Do not use
+ * UNALIGNED_OK if your compiler uses a different size.
+ */
+ if (*(ushf*)(match+best_len-1) != scan_end ||
+ *(ushf*)match != scan_start) continue;
+
+ /* It is not necessary to compare scan[2] and match[2] since they are
+ * always equal when the other bytes match, given that the hash keys
+ * are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at
+ * strstart+3, +5, ... up to strstart+257. We check for insufficient
+ * lookahead only every 4th comparison; the 128th check will be made
+ * at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is
+ * necessary to put more guard bytes at the end of the window, or
+ * to check more often for insufficient lookahead.
+ */
+ Assert(scan[2] == match[2], "scan[2]?");
+ scan++, match++;
+ do {
+ } while (*(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
+ *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
+ *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
+ *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
+ scan < strend);
+ /* The funny "do {}" generates better code on most compilers */
+
+ /* Here, scan <= window+strstart+257 */
+ Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
+ if (*scan == *match) scan++;
+
+ len = (MAX_MATCH - 1) - (int)(strend-scan);
+ scan = strend - (MAX_MATCH-1);
+
+#else /* UNALIGNED_OK */
+
+ if (match[best_len] != scan_end ||
+ match[best_len-1] != scan_end1 ||
+ *match != *scan ||
+ *++match != scan[1]) continue;
+
+ /* The check at best_len-1 can be removed because it will be made
+ * again later. (This heuristic is not always a win.)
+ * It is not necessary to compare scan[2] and match[2] since they
+ * are always equal when the other bytes match, given that
+ * the hash keys are equal and that HASH_BITS >= 8.
+ */
+ scan += 2, match++;
+ Assert(*scan == *match, "match[2]?");
+
+ /* We check for insufficient lookahead only every 8th comparison;
+ * the 256th check will be made at strstart+258.
+ */
+ do {
+ } while (*++scan == *++match && *++scan == *++match &&
+ *++scan == *++match && *++scan == *++match &&
+ *++scan == *++match && *++scan == *++match &&
+ *++scan == *++match && *++scan == *++match &&
+ scan < strend);
+
+ Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
+
+ len = MAX_MATCH - (int)(strend - scan);
+ scan = strend - MAX_MATCH;
+
+#endif /* UNALIGNED_OK */
+
+ if (len > best_len) {
+ s->match_start = cur_match;
+ best_len = len;
+ if (len >= nice_match) break;
+#ifdef UNALIGNED_OK
+ scan_end = *(ushf*)(scan+best_len-1);
+#else
+ scan_end1 = scan[best_len-1];
+ scan_end = scan[best_len];
+#endif
+ }
+ } while ((cur_match = prev[cur_match & wmask]) > limit
+ && --chain_length != 0);
+
+ if ((uInt)best_len <= s->lookahead) return best_len;
+ return s->lookahead;
+}
+#endif /* ASMV */
+
+#ifdef DEBUG_ZLIB
+/* ===========================================================================
+ * Check that the match at match_start is indeed a match.
+ */
+local void check_match(s, start, match, length)
+ deflate_state *s;
+ IPos start, match;
+ int length;
+{
+ /* check that the match is indeed a match */
+ if (zmemcmp((charf *)s->window + match,
+ (charf *)s->window + start, length) != EQUAL) {
+ fprintf(stderr, " start %u, match %u, length %d\n",
+ start, match, length);
+ do {
+ fprintf(stderr, "%c%c", s->window[match++], s->window[start++]);
+ } while (--length != 0);
+ z_error("invalid match");
+ }
+ if (z_verbose > 1) {
+ fprintf(stderr,"\\[%d,%d]", start-match, length);
+ do { putc(s->window[start++], stderr); } while (--length != 0);
+ }
+}
+#else
+# define check_match(s, start, match, length)
+#endif
+
+/* ===========================================================================
+ * Fill the window when the lookahead becomes insufficient.
+ * Updates strstart and lookahead.
+ *
+ * IN assertion: lookahead < MIN_LOOKAHEAD
+ * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD
+ * At least one byte has been read, or avail_in == 0; reads are
+ * performed for at least two bytes (required for the zip translate_eol
+ * option -- not supported here).
+ */
+local void fill_window(s)
+ deflate_state *s;
+{
+ register unsigned n, m;
+ register Posf *p;
+ unsigned more; /* Amount of free space at the end of the window. */
+ uInt wsize = s->w_size;
+
+ do {
+ more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart);
+
+ /* Deal with !@#$% 64K limit: */
+ if (more == 0 && s->strstart == 0 && s->lookahead == 0) {
+ more = wsize;
+
+ } else if (more == (unsigned)(-1)) {
+ /* Very unlikely, but possible on 16 bit machine if strstart == 0
+ * and lookahead == 1 (input done one byte at time)
+ */
+ more--;
+
+ /* If the window is almost full and there is insufficient lookahead,
+ * move the upper half to the lower one to make room in the upper half.
+ */
+ } else if (s->strstart >= wsize+MAX_DIST(s)) {
+
+ zmemcpy((charf *)s->window, (charf *)s->window+wsize,
+ (unsigned)wsize);
+ s->match_start -= wsize;
+ s->strstart -= wsize; /* we now have strstart >= MAX_DIST */
+ s->block_start -= (long) wsize;
+
+ /* Slide the hash table (could be avoided with 32 bit values
+ at the expense of memory usage). We slide even when level == 0
+ to keep the hash table consistent if we switch back to level > 0
+ later. (Using level 0 permanently is not an optimal usage of
+ zlib, so we don't care about this pathological case.)
+ */
+ n = s->hash_size;
+ p = &s->head[n];
+ do {
+ m = *--p;
+ *p = (Pos)(m >= wsize ? m-wsize : NIL);
+ } while (--n);
+
+ n = wsize;
+ p = &s->prev[n];
+ do {
+ m = *--p;
+ *p = (Pos)(m >= wsize ? m-wsize : NIL);
+ /* If n is not on any hash chain, prev[n] is garbage but
+ * its value will never be used.
+ */
+ } while (--n);
+ more += wsize;
+ }
+ if (s->strm->avail_in == 0) return;
+
+ /* If there was no sliding:
+ * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&
+ * more == window_size - lookahead - strstart
+ * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)
+ * => more >= window_size - 2*WSIZE + 2
+ * In the BIG_MEM or MMAP case (not yet supported),
+ * window_size == input_size + MIN_LOOKAHEAD &&
+ * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.
+ * Otherwise, window_size == 2*WSIZE so more >= 2.
+ * If there was sliding, more >= WSIZE. So in all cases, more >= 2.
+ */
+ Assert(more >= 2, "more < 2");
+
+ n = read_buf(s->strm, (charf *)s->window + s->strstart + s->lookahead,
+ more);
+ s->lookahead += n;
+
+ /* Initialize the hash value now that we have some input: */
+ if (s->lookahead >= MIN_MATCH) {
+ s->ins_h = s->window[s->strstart];
+ UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]);
+#if MIN_MATCH != 3
+ Call UPDATE_HASH() MIN_MATCH-3 more times
+#endif
+ }
+ /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage,
+ * but this is not important since only literal bytes will be emitted.
+ */
+
+ } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0);
+}
+
+/* ===========================================================================
+ * Flush the current block, with given end-of-file flag.
+ * IN assertion: strstart is set to the end of the current match.
+ */
+#define FLUSH_BLOCK_ONLY(s, eof) { \
+ _tr_flush_block(s, (s->block_start >= 0L ? \
+ (charf *)&s->window[(unsigned)s->block_start] : \
+ (charf *)Z_NULL), \
+ (ulg)((long)s->strstart - s->block_start), \
+ (eof)); \
+ s->block_start = s->strstart; \
+ flush_pending(s->strm); \
+ Tracev((stderr,"[FLUSH]")); \
+}
+
+/* Same but force premature exit if necessary. */
+#define FLUSH_BLOCK(s, eof) { \
+ FLUSH_BLOCK_ONLY(s, eof); \
+ if (s->strm->avail_out == 0) return (eof) ? finish_started : need_more; \
+}
+
+/* ===========================================================================
+ * Copy without compression as much as possible from the input stream, return
+ * the current block state.
+ * This function does not insert new strings in the dictionary since
+ * uncompressible data is probably not useful. This function is used
+ * only for the level=0 compression option.
+ * NOTE: this function should be optimized to avoid extra copying from
+ * window to pending_buf.
+ */
+local block_state deflate_stored(s, flush)
+ deflate_state *s;
+ int flush;
+{
+ /* Stored blocks are limited to 0xffff bytes, pending_buf is limited
+ * to pending_buf_size, and each stored block has a 5 byte header:
+ */
+ ulg max_block_size = 0xffff;
+ ulg max_start;
+
+ if (max_block_size > s->pending_buf_size - 5) {
+ max_block_size = s->pending_buf_size - 5;
+ }
+
+ /* Copy as much as possible from input to output: */
+ for (;;) {
+ /* Fill the window as much as possible: */
+ if (s->lookahead <= 1) {
+
+ Assert(s->strstart < s->w_size+MAX_DIST(s) ||
+ s->block_start >= (long)s->w_size, "slide too late");
+
+ fill_window(s);
+ if (s->lookahead == 0 && flush == Z_NO_FLUSH) return need_more;
+
+ if (s->lookahead == 0) break; /* flush the current block */
+ }
+ Assert(s->block_start >= 0L, "block gone");
+
+ s->strstart += s->lookahead;
+ s->lookahead = 0;
+
+ /* Emit a stored block if pending_buf will be full: */
+ max_start = s->block_start + max_block_size;
+ if (s->strstart == 0 || (ulg)s->strstart >= max_start) {
+ /* strstart == 0 is possible when wraparound on 16-bit machine */
+ s->lookahead = (uInt)(s->strstart - max_start);
+ s->strstart = (uInt)max_start;
+ FLUSH_BLOCK(s, 0);
+ }
+ /* Flush if we may have to slide, otherwise block_start may become
+ * negative and the data will be gone:
+ */
+ if (s->strstart - (uInt)s->block_start >= MAX_DIST(s)) {
+ FLUSH_BLOCK(s, 0);
+ }
+ }
+ FLUSH_BLOCK(s, flush == Z_FINISH);
+ return flush == Z_FINISH ? finish_done : block_done;
+}
+
+/* ===========================================================================
+ * Compress as much as possible from the input stream, return the current
+ * block state.
+ * This function does not perform lazy evaluation of matches and inserts
+ * new strings in the dictionary only for unmatched strings or for short
+ * matches. It is used only for the fast compression options.
+ */
+local block_state deflate_fast(s, flush)
+ deflate_state *s;
+ int flush;
+{
+ IPos hash_head = NIL; /* head of the hash chain */
+ int bflush; /* set if current block must be flushed */
+
+ for (;;) {
+ /* Make sure that we always have enough lookahead, except
+ * at the end of the input file. We need MAX_MATCH bytes
+ * for the next match, plus MIN_MATCH bytes to insert the
+ * string following the next match.
+ */
+ if (s->lookahead < MIN_LOOKAHEAD) {
+ fill_window(s);
+ if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
+ return need_more;
+ }
+ if (s->lookahead == 0) break; /* flush the current block */
+ }
+
+ /* Insert the string window[strstart .. strstart+2] in the
+ * dictionary, and set hash_head to the head of the hash chain:
+ */
+ if (s->lookahead >= MIN_MATCH) {
+ INSERT_STRING(s, s->strstart, hash_head);
+ }
+
+ /* Find the longest match, discarding those <= prev_length.
+ * At this point we have always match_length < MIN_MATCH
+ */
+ if (hash_head != NIL && s->strstart - hash_head <= MAX_DIST(s)) {
+ /* To simplify the code, we prevent matches with the string
+ * of window index 0 (in particular we have to avoid a match
+ * of the string with itself at the start of the input file).
+ */
+ if (s->strategy != Z_HUFFMAN_ONLY) {
+ s->match_length = longest_match (s, hash_head);
+ }
+ /* longest_match() sets match_start */
+ }
+ if (s->match_length >= MIN_MATCH) {
+ check_match(s, s->strstart, s->match_start, s->match_length);
+
+ bflush = _tr_tally(s, s->strstart - s->match_start,
+ s->match_length - MIN_MATCH);
+
+ s->lookahead -= s->match_length;
+
+ /* Insert new strings in the hash table only if the match length
+ * is not too large. This saves time but degrades compression.
+ */
+ if (s->match_length <= s->max_insert_length &&
+ s->lookahead >= MIN_MATCH) {
+ s->match_length--; /* string at strstart already in hash table */
+ do {
+ s->strstart++;
+ INSERT_STRING(s, s->strstart, hash_head);
+ /* strstart never exceeds WSIZE-MAX_MATCH, so there are
+ * always MIN_MATCH bytes ahead.
+ */
+ } while (--s->match_length != 0);
+ s->strstart++;
+ } else {
+ s->strstart += s->match_length;
+ s->match_length = 0;
+ s->ins_h = s->window[s->strstart];
+ UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]);
+#if MIN_MATCH != 3
+ Call UPDATE_HASH() MIN_MATCH-3 more times
+#endif
+ /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not
+ * matter since it will be recomputed at next deflate call.
+ */
+ }
+ } else {
+ /* No match, output a literal byte */
+ Tracevv((stderr,"%c", s->window[s->strstart]));
+ bflush = _tr_tally (s, 0, s->window[s->strstart]);
+ s->lookahead--;
+ s->strstart++;
+ }
+ if (bflush) FLUSH_BLOCK(s, 0);
+ }
+ FLUSH_BLOCK(s, flush == Z_FINISH);
+ return flush == Z_FINISH ? finish_done : block_done;
+}
+
+/* ===========================================================================
+ * Same as above, but achieves better compression. We use a lazy
+ * evaluation for matches: a match is finally adopted only if there is
+ * no better match at the next window position.
+ */
+local block_state deflate_slow(s, flush)
+ deflate_state *s;
+ int flush;
+{
+ IPos hash_head = NIL; /* head of hash chain */
+ int bflush; /* set if current block must be flushed */
+
+ /* Process the input block. */
+ for (;;) {
+ /* Make sure that we always have enough lookahead, except
+ * at the end of the input file. We need MAX_MATCH bytes
+ * for the next match, plus MIN_MATCH bytes to insert the
+ * string following the next match.
+ */
+ if (s->lookahead < MIN_LOOKAHEAD) {
+ fill_window(s);
+ if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
+ return need_more;
+ }
+ if (s->lookahead == 0) break; /* flush the current block */
+ }
+
+ /* Insert the string window[strstart .. strstart+2] in the
+ * dictionary, and set hash_head to the head of the hash chain:
+ */
+ if (s->lookahead >= MIN_MATCH) {
+ INSERT_STRING(s, s->strstart, hash_head);
+ }
+
+ /* Find the longest match, discarding those <= prev_length.
+ */
+ s->prev_length = s->match_length, s->prev_match = s->match_start;
+ s->match_length = MIN_MATCH-1;
+
+ if (hash_head != NIL && s->prev_length < s->max_lazy_match &&
+ s->strstart - hash_head <= MAX_DIST(s)) {
+ /* To simplify the code, we prevent matches with the string
+ * of window index 0 (in particular we have to avoid a match
+ * of the string with itself at the start of the input file).
+ */
+ if (s->strategy != Z_HUFFMAN_ONLY) {
+ s->match_length = longest_match (s, hash_head);
+ }
+ /* longest_match() sets match_start */
+
+ if (s->match_length <= 5 && (s->strategy == Z_FILTERED ||
+ (s->match_length == MIN_MATCH &&
+ s->strstart - s->match_start > TOO_FAR))) {
+
+ /* If prev_match is also MIN_MATCH, match_start is garbage
+ * but we will ignore the current match anyway.
+ */
+ s->match_length = MIN_MATCH-1;
+ }
+ }
+ /* If there was a match at the previous step and the current
+ * match is not better, output the previous match:
+ */
+ if (s->prev_length >= MIN_MATCH && s->match_length <= s->prev_length) {
+ uInt max_insert = s->strstart + s->lookahead - MIN_MATCH;
+ /* Do not insert strings in hash table beyond this. */
+
+ check_match(s, s->strstart-1, s->prev_match, s->prev_length);
+
+ bflush = _tr_tally(s, s->strstart -1 - s->prev_match,
+ s->prev_length - MIN_MATCH);
+
+ /* Insert in hash table all strings up to the end of the match.
+ * strstart-1 and strstart are already inserted. If there is not
+ * enough lookahead, the last two strings are not inserted in
+ * the hash table.
+ */
+ s->lookahead -= s->prev_length-1;
+ s->prev_length -= 2;
+ do {
+ if (++s->strstart <= max_insert) {
+ INSERT_STRING(s, s->strstart, hash_head);
+ }
+ } while (--s->prev_length != 0);
+ s->match_available = 0;
+ s->match_length = MIN_MATCH-1;
+ s->strstart++;
+
+ if (bflush) FLUSH_BLOCK(s, 0);
+
+ } else if (s->match_available) {
+ /* If there was no match at the previous position, output a
+ * single literal. If there was a match but the current match
+ * is longer, truncate the previous match to a single literal.
+ */
+ Tracevv((stderr,"%c", s->window[s->strstart-1]));
+ if (_tr_tally (s, 0, s->window[s->strstart-1])) {
+ FLUSH_BLOCK_ONLY(s, 0);
+ }
+ s->strstart++;
+ s->lookahead--;
+ if (s->strm->avail_out == 0) return need_more;
+ } else {
+ /* There is no previous match to compare with, wait for
+ * the next step to decide.
+ */
+ s->match_available = 1;
+ s->strstart++;
+ s->lookahead--;
+ }
+ }
+ Assert (flush != Z_NO_FLUSH, "no flush?");
+ if (s->match_available) {
+ Tracevv((stderr,"%c", s->window[s->strstart-1]));
+ _tr_tally (s, 0, s->window[s->strstart-1]);
+ s->match_available = 0;
+ }
+ FLUSH_BLOCK(s, flush == Z_FINISH);
+ return flush == Z_FINISH ? finish_done : block_done;
+}
+/* --- deflate.c */
+
+/* +++ trees.c */
+/* trees.c -- output deflated data using Huffman coding
+ * Copyright (C) 1995-1996 Jean-loup Gailly
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/*
+ * ALGORITHM
+ *
+ * The "deflation" process uses several Huffman trees. The more
+ * common source values are represented by shorter bit sequences.
+ *
+ * Each code tree is stored in a compressed form which is itself
+ * a Huffman encoding of the lengths of all the code strings (in
+ * ascending order by source values). The actual code strings are
+ * reconstructed from the lengths in the inflate process, as described
+ * in the deflate specification.
+ *
+ * REFERENCES
+ *
+ * Deutsch, L.P.,"'Deflate' Compressed Data Format Specification".
+ * Available in ftp.uu.net:/pub/archiving/zip/doc/deflate-1.1.doc
+ *
+ * Storer, James A.
+ * Data Compression: Methods and Theory, pp. 49-50.
+ * Computer Science Press, 1988. ISBN 0-7167-8156-5.
+ *
+ * Sedgewick, R.
+ * Algorithms, p290.
+ * Addison-Wesley, 1983. ISBN 0-201-06672-6.
+ */
+
+/* From: trees.c,v 1.11 1996/07/24 13:41:06 me Exp $ */
+
+/* #include "deflate.h" */
+
+#ifdef DEBUG_ZLIB
+# include <ctype.h>
+#endif
+
+/* ===========================================================================
+ * Constants
+ */
+
+#define MAX_BL_BITS 7
+/* Bit length codes must not exceed MAX_BL_BITS bits */
+
+#define END_BLOCK 256
+/* end of block literal code */
+
+#define REP_3_6 16
+/* repeat previous bit length 3-6 times (2 bits of repeat count) */
+
+#define REPZ_3_10 17
+/* repeat a zero length 3-10 times (3 bits of repeat count) */
+
+#define REPZ_11_138 18
+/* repeat a zero length 11-138 times (7 bits of repeat count) */
+
+local int extra_lbits[LENGTH_CODES] /* extra bits for each length code */
+ = {0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0};
+
+local int extra_dbits[D_CODES] /* extra bits for each distance code */
+ = {0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13};
+
+local int extra_blbits[BL_CODES]/* extra bits for each bit length code */
+ = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7};
+
+local uch bl_order[BL_CODES]
+ = {16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15};
+/* The lengths of the bit length codes are sent in order of decreasing
+ * probability, to avoid transmitting the lengths for unused bit length codes.
+ */
+
+#define Buf_size (8 * 2*sizeof(char))
+/* Number of bits used within bi_buf. (bi_buf might be implemented on
+ * more than 16 bits on some systems.)
+ */
+
+/* ===========================================================================
+ * Local data. These are initialized only once.
+ */
+
+local ct_data static_ltree[L_CODES+2];
+/* The static literal tree. Since the bit lengths are imposed, there is no
+ * need for the L_CODES extra codes used during heap construction. However
+ * The codes 286 and 287 are needed to build a canonical tree (see _tr_init
+ * below).
+ */
+
+local ct_data static_dtree[D_CODES];
+/* The static distance tree. (Actually a trivial tree since all codes use
+ * 5 bits.)
+ */
+
+local uch dist_code[512];
+/* distance codes. The first 256 values correspond to the distances
+ * 3 .. 258, the last 256 values correspond to the top 8 bits of
+ * the 15 bit distances.
+ */
+
+local uch length_code[MAX_MATCH-MIN_MATCH+1];
+/* length code for each normalized match length (0 == MIN_MATCH) */
+
+local int base_length[LENGTH_CODES];
+/* First normalized length for each code (0 = MIN_MATCH) */
+
+local int base_dist[D_CODES];
+/* First normalized distance for each code (0 = distance of 1) */
+
+struct static_tree_desc_s {
+ ct_data *static_tree; /* static tree or NULL */
+ intf *extra_bits; /* extra bits for each code or NULL */
+ int extra_base; /* base index for extra_bits */
+ int elems; /* max number of elements in the tree */
+ int max_length; /* max bit length for the codes */
+};
+
+local static_tree_desc static_l_desc =
+{static_ltree, extra_lbits, LITERALS+1, L_CODES, MAX_BITS};
+
+local static_tree_desc static_d_desc =
+{static_dtree, extra_dbits, 0, D_CODES, MAX_BITS};
+
+local static_tree_desc static_bl_desc =
+{(ct_data *)0, extra_blbits, 0, BL_CODES, MAX_BL_BITS};
+
+/* ===========================================================================
+ * Local (static) routines in this file.
+ */
+
+local void tr_static_init OF((void));
+local void init_block OF((deflate_state *s));
+local void pqdownheap OF((deflate_state *s, ct_data *tree, int k));
+local void gen_bitlen OF((deflate_state *s, tree_desc *desc));
+local void gen_codes OF((ct_data *tree, int max_code, ushf *bl_count));
+local void build_tree OF((deflate_state *s, tree_desc *desc));
+local void scan_tree OF((deflate_state *s, ct_data *tree, int max_code));
+local void send_tree OF((deflate_state *s, ct_data *tree, int max_code));
+local int build_bl_tree OF((deflate_state *s));
+local void send_all_trees OF((deflate_state *s, int lcodes, int dcodes,
+ int blcodes));
+local void compress_block OF((deflate_state *s, ct_data *ltree,
+ ct_data *dtree));
+local void set_data_type OF((deflate_state *s));
+local unsigned bi_reverse OF((unsigned value, int length));
+local void bi_windup OF((deflate_state *s));
+local void bi_flush OF((deflate_state *s));
+local void copy_block OF((deflate_state *s, charf *buf, unsigned len,
+ int header));
+
+#ifndef DEBUG_ZLIB
+# define send_code(s, c, tree) send_bits(s, tree[c].Code, tree[c].Len)
+ /* Send a code of the given tree. c and tree must not have side effects */
+
+#else /* DEBUG_ZLIB */
+# define send_code(s, c, tree) \
+ { if (verbose>2) fprintf(stderr,"\ncd %3d ",(c)); \
+ send_bits(s, tree[c].Code, tree[c].Len); }
+#endif
+
+#define d_code(dist) \
+ ((dist) < 256 ? dist_code[dist] : dist_code[256+((dist)>>7)])
+/* Mapping from a distance to a distance code. dist is the distance - 1 and
+ * must not have side effects. dist_code[256] and dist_code[257] are never
+ * used.
+ */
+
+/* ===========================================================================
+ * Output a short LSB first on the stream.
+ * IN assertion: there is enough room in pendingBuf.
+ */
+#define put_short(s, w) { \
+ put_byte(s, (uch)((w) & 0xff)); \
+ put_byte(s, (uch)((ush)(w) >> 8)); \
+}
+
+/* ===========================================================================
+ * Send a value on a given number of bits.
+ * IN assertion: length <= 16 and value fits in length bits.
+ */
+#ifdef DEBUG_ZLIB
+local void send_bits OF((deflate_state *s, int value, int length));
+
+local void send_bits(s, value, length)
+ deflate_state *s;
+ int value; /* value to send */
+ int length; /* number of bits */
+{
+ Tracevv((stderr," l %2d v %4x ", length, value));
+ Assert(length > 0 && length <= 15, "invalid length");
+ s->bits_sent += (ulg)length;
+
+ /* If not enough room in bi_buf, use (valid) bits from bi_buf and
+ * (16 - bi_valid) bits from value, leaving (width - (16-bi_valid))
+ * unused bits in value.
+ */
+ if (s->bi_valid > (int)Buf_size - length) {
+ s->bi_buf |= (value << s->bi_valid);
+ put_short(s, s->bi_buf);
+ s->bi_buf = (ush)value >> (Buf_size - s->bi_valid);
+ s->bi_valid += length - Buf_size;
+ } else {
+ s->bi_buf |= value << s->bi_valid;
+ s->bi_valid += length;
+ }
+}
+#else /* !DEBUG_ZLIB */
+
+#define send_bits(s, value, length) \
+{ int len = length;\
+ if (s->bi_valid > (int)Buf_size - len) {\
+ int val = value;\
+ s->bi_buf |= (val << s->bi_valid);\
+ put_short(s, s->bi_buf);\
+ s->bi_buf = (ush)val >> (Buf_size - s->bi_valid);\
+ s->bi_valid += len - Buf_size;\
+ } else {\
+ s->bi_buf |= (value) << s->bi_valid;\
+ s->bi_valid += len;\
+ }\
+}
+#endif /* DEBUG_ZLIB */
+
+
+#define MAX(a,b) (a >= b ? a : b)
+/* the arguments must not have side effects */
+
+/* ===========================================================================
+ * Initialize the various 'constant' tables. In a multi-threaded environment,
+ * this function may be called by two threads concurrently, but this is
+ * harmless since both invocations do exactly the same thing.
+ */
+local void tr_static_init()
+{
+ static int static_init_done = 0;
+ int n; /* iterates over tree elements */
+ int bits; /* bit counter */
+ int length; /* length value */
+ int code; /* code value */
+ int dist; /* distance index */
+ ush bl_count[MAX_BITS+1];
+ /* number of codes at each bit length for an optimal tree */
+
+ if (static_init_done) return;
+
+ /* Initialize the mapping length (0..255) -> length code (0..28) */
+ length = 0;
+ for (code = 0; code < LENGTH_CODES-1; code++) {
+ base_length[code] = length;
+ for (n = 0; n < (1<<extra_lbits[code]); n++) {
+ length_code[length++] = (uch)code;
+ }
+ }
+ Assert (length == 256, "tr_static_init: length != 256");
+ /* Note that the length 255 (match length 258) can be represented
+ * in two different ways: code 284 + 5 bits or code 285, so we
+ * overwrite length_code[255] to use the best encoding:
+ */
+ length_code[length-1] = (uch)code;
+
+ /* Initialize the mapping dist (0..32K) -> dist code (0..29) */
+ dist = 0;
+ for (code = 0 ; code < 16; code++) {
+ base_dist[code] = dist;
+ for (n = 0; n < (1<<extra_dbits[code]); n++) {
+ dist_code[dist++] = (uch)code;
+ }
+ }
+ Assert (dist == 256, "tr_static_init: dist != 256");
+ dist >>= 7; /* from now on, all distances are divided by 128 */
+ for ( ; code < D_CODES; code++) {
+ base_dist[code] = dist << 7;
+ for (n = 0; n < (1<<(extra_dbits[code]-7)); n++) {
+ dist_code[256 + dist++] = (uch)code;
+ }
+ }
+ Assert (dist == 256, "tr_static_init: 256+dist != 512");
+
+ /* Construct the codes of the static literal tree */
+ for (bits = 0; bits <= MAX_BITS; bits++) bl_count[bits] = 0;
+ n = 0;
+ while (n <= 143) static_ltree[n++].Len = 8, bl_count[8]++;
+ while (n <= 255) static_ltree[n++].Len = 9, bl_count[9]++;
+ while (n <= 279) static_ltree[n++].Len = 7, bl_count[7]++;
+ while (n <= 287) static_ltree[n++].Len = 8, bl_count[8]++;
+ /* Codes 286 and 287 do not exist, but we must include them in the
+ * tree construction to get a canonical Huffman tree (longest code
+ * all ones)
+ */
+ gen_codes((ct_data *)static_ltree, L_CODES+1, bl_count);
+
+ /* The static distance tree is trivial: */
+ for (n = 0; n < D_CODES; n++) {
+ static_dtree[n].Len = 5;
+ static_dtree[n].Code = bi_reverse((unsigned)n, 5);
+ }
+ static_init_done = 1;
+}
+
+/* ===========================================================================
+ * Initialize the tree data structures for a new zlib stream.
+ */
+void _tr_init(s)
+ deflate_state *s;
+{
+ tr_static_init();
+
+ s->compressed_len = 0L;
+
+ s->l_desc.dyn_tree = s->dyn_ltree;
+ s->l_desc.stat_desc = &static_l_desc;
+
+ s->d_desc.dyn_tree = s->dyn_dtree;
+ s->d_desc.stat_desc = &static_d_desc;
+
+ s->bl_desc.dyn_tree = s->bl_tree;
+ s->bl_desc.stat_desc = &static_bl_desc;
+
+ s->bi_buf = 0;
+ s->bi_valid = 0;
+ s->last_eob_len = 8; /* enough lookahead for inflate */
+#ifdef DEBUG_ZLIB
+ s->bits_sent = 0L;
+#endif
+
+ /* Initialize the first block of the first file: */
+ init_block(s);
+}
+
+/* ===========================================================================
+ * Initialize a new block.
+ */
+local void init_block(s)
+ deflate_state *s;
+{
+ int n; /* iterates over tree elements */
+
+ /* Initialize the trees. */
+ for (n = 0; n < L_CODES; n++) s->dyn_ltree[n].Freq = 0;
+ for (n = 0; n < D_CODES; n++) s->dyn_dtree[n].Freq = 0;
+ for (n = 0; n < BL_CODES; n++) s->bl_tree[n].Freq = 0;
+
+ s->dyn_ltree[END_BLOCK].Freq = 1;
+ s->opt_len = s->static_len = 0L;
+ s->last_lit = s->matches = 0;
+}
+
+#define SMALLEST 1
+/* Index within the heap array of least frequent node in the Huffman tree */
+
+
+/* ===========================================================================
+ * Remove the smallest element from the heap and recreate the heap with
+ * one less element. Updates heap and heap_len.
+ */
+#define pqremove(s, tree, top) \
+{\
+ top = s->heap[SMALLEST]; \
+ s->heap[SMALLEST] = s->heap[s->heap_len--]; \
+ pqdownheap(s, tree, SMALLEST); \
+}
+
+/* ===========================================================================
+ * Compares to subtrees, using the tree depth as tie breaker when
+ * the subtrees have equal frequency. This minimizes the worst case length.
+ */
+#define smaller(tree, n, m, depth) \
+ (tree[n].Freq < tree[m].Freq || \
+ (tree[n].Freq == tree[m].Freq && depth[n] <= depth[m]))
+
+/* ===========================================================================
+ * Restore the heap property by moving down the tree starting at node k,
+ * exchanging a node with the smallest of its two sons if necessary, stopping
+ * when the heap property is re-established (each father smaller than its
+ * two sons).
+ */
+local void pqdownheap(s, tree, k)
+ deflate_state *s;
+ ct_data *tree; /* the tree to restore */
+ int k; /* node to move down */
+{
+ int v = s->heap[k];
+ int j = k << 1; /* left son of k */
+ while (j <= s->heap_len) {
+ /* Set j to the smallest of the two sons: */
+ if (j < s->heap_len &&
+ smaller(tree, s->heap[j+1], s->heap[j], s->depth)) {
+ j++;
+ }
+ /* Exit if v is smaller than both sons */
+ if (smaller(tree, v, s->heap[j], s->depth)) break;
+
+ /* Exchange v with the smallest son */
+ s->heap[k] = s->heap[j]; k = j;
+
+ /* And continue down the tree, setting j to the left son of k */
+ j <<= 1;
+ }
+ s->heap[k] = v;
+}
+
+/* ===========================================================================
+ * Compute the optimal bit lengths for a tree and update the total bit length
+ * for the current block.
+ * IN assertion: the fields freq and dad are set, heap[heap_max] and
+ * above are the tree nodes sorted by increasing frequency.
+ * OUT assertions: the field len is set to the optimal bit length, the
+ * array bl_count contains the frequencies for each bit length.
+ * The length opt_len is updated; static_len is also updated if stree is
+ * not null.
+ */
+local void gen_bitlen(s, desc)
+ deflate_state *s;
+ tree_desc *desc; /* the tree descriptor */
+{
+ ct_data *tree = desc->dyn_tree;
+ int max_code = desc->max_code;
+ ct_data *stree = desc->stat_desc->static_tree;
+ intf *extra = desc->stat_desc->extra_bits;
+ int base = desc->stat_desc->extra_base;
+ int max_length = desc->stat_desc->max_length;
+ int h; /* heap index */
+ int n, m; /* iterate over the tree elements */
+ int bits; /* bit length */
+ int xbits; /* extra bits */
+ ush f; /* frequency */
+ int overflow = 0; /* number of elements with bit length too large */
+
+ for (bits = 0; bits <= MAX_BITS; bits++) s->bl_count[bits] = 0;
+
+ /* In a first pass, compute the optimal bit lengths (which may
+ * overflow in the case of the bit length tree).
+ */
+ tree[s->heap[s->heap_max]].Len = 0; /* root of the heap */
+
+ for (h = s->heap_max+1; h < HEAP_SIZE; h++) {
+ n = s->heap[h];
+ bits = tree[tree[n].Dad].Len + 1;
+ if (bits > max_length) bits = max_length, overflow++;
+ tree[n].Len = (ush)bits;
+ /* We overwrite tree[n].Dad which is no longer needed */
+
+ if (n > max_code) continue; /* not a leaf node */
+
+ s->bl_count[bits]++;
+ xbits = 0;
+ if (n >= base) xbits = extra[n-base];
+ f = tree[n].Freq;
+ s->opt_len += (ulg)f * (bits + xbits);
+ if (stree) s->static_len += (ulg)f * (stree[n].Len + xbits);
+ }
+ if (overflow == 0) return;
+
+ Trace((stderr,"\nbit length overflow\n"));
+ /* This happens for example on obj2 and pic of the Calgary corpus */
+
+ /* Find the first bit length which could increase: */
+ do {
+ bits = max_length-1;
+ while (s->bl_count[bits] == 0) bits--;
+ s->bl_count[bits]--; /* move one leaf down the tree */
+ s->bl_count[bits+1] += 2; /* move one overflow item as its brother */
+ s->bl_count[max_length]--;
+ /* The brother of the overflow item also moves one step up,
+ * but this does not affect bl_count[max_length]
+ */
+ overflow -= 2;
+ } while (overflow > 0);
+
+ /* Now recompute all bit lengths, scanning in increasing frequency.
+ * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all
+ * lengths instead of fixing only the wrong ones. This idea is taken
+ * from 'ar' written by Haruhiko Okumura.)
+ */
+ for (bits = max_length; bits != 0; bits--) {
+ n = s->bl_count[bits];
+ while (n != 0) {
+ m = s->heap[--h];
+ if (m > max_code) continue;
+ if (tree[m].Len != (unsigned) bits) {
+ Trace((stderr,"code %d bits %d->%d\n", m, tree[m].Len, bits));
+ s->opt_len += ((long)bits - (long)tree[m].Len)
+ *(long)tree[m].Freq;
+ tree[m].Len = (ush)bits;
+ }
+ n--;
+ }
+ }
+}
+
+/* ===========================================================================
+ * Generate the codes for a given tree and bit counts (which need not be
+ * optimal).
+ * IN assertion: the array bl_count contains the bit length statistics for
+ * the given tree and the field len is set for all tree elements.
+ * OUT assertion: the field code is set for all tree elements of non
+ * zero code length.
+ */
+local void gen_codes (tree, max_code, bl_count)
+ ct_data *tree; /* the tree to decorate */
+ int max_code; /* largest code with non zero frequency */
+ ushf *bl_count; /* number of codes at each bit length */
+{
+ ush next_code[MAX_BITS+1]; /* next code value for each bit length */
+ ush code = 0; /* running code value */
+ int bits; /* bit index */
+ int n; /* code index */
+
+ /* The distribution counts are first used to generate the code values
+ * without bit reversal.
+ */
+ for (bits = 1; bits <= MAX_BITS; bits++) {
+ next_code[bits] = code = (code + bl_count[bits-1]) << 1;
+ }
+ /* Check that the bit counts in bl_count are consistent. The last code
+ * must be all ones.
+ */
+ Assert (code + bl_count[MAX_BITS]-1 == (1<<MAX_BITS)-1,
+ "inconsistent bit counts");
+ Tracev((stderr,"\ngen_codes: max_code %d ", max_code));
+
+ for (n = 0; n <= max_code; n++) {
+ int len = tree[n].Len;
+ if (len == 0) continue;
+ /* Now reverse the bits */
+ tree[n].Code = bi_reverse(next_code[len]++, len);
+
+ Tracecv(tree != static_ltree, (stderr,"\nn %3d %c l %2d c %4x (%x) ",
+ n, (isgraph(n) ? n : ' '), len, tree[n].Code, next_code[len]-1));
+ }
+}
+
+/* ===========================================================================
+ * Construct one Huffman tree and assigns the code bit strings and lengths.
+ * Update the total bit length for the current block.
+ * IN assertion: the field freq is set for all tree elements.
+ * OUT assertions: the fields len and code are set to the optimal bit length
+ * and corresponding code. The length opt_len is updated; static_len is
+ * also updated if stree is not null. The field max_code is set.
+ */
+local void build_tree(s, desc)
+ deflate_state *s;
+ tree_desc *desc; /* the tree descriptor */
+{
+ ct_data *tree = desc->dyn_tree;
+ ct_data *stree = desc->stat_desc->static_tree;
+ int elems = desc->stat_desc->elems;
+ int n, m; /* iterate over heap elements */
+ int max_code = -1; /* largest code with non zero frequency */
+ int node; /* new node being created */
+
+ /* Construct the initial heap, with least frequent element in
+ * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1].
+ * heap[0] is not used.
+ */
+ s->heap_len = 0, s->heap_max = HEAP_SIZE;
+
+ for (n = 0; n < elems; n++) {
+ if (tree[n].Freq != 0) {
+ s->heap[++(s->heap_len)] = max_code = n;
+ s->depth[n] = 0;
+ } else {
+ tree[n].Len = 0;
+ }
+ }
+
+ /* The pkzip format requires that at least one distance code exists,
+ * and that at least one bit should be sent even if there is only one
+ * possible code. So to avoid special checks later on we force at least
+ * two codes of non zero frequency.
+ */
+ while (s->heap_len < 2) {
+ node = s->heap[++(s->heap_len)] = (max_code < 2 ? ++max_code : 0);
+ tree[node].Freq = 1;
+ s->depth[node] = 0;
+ s->opt_len--; if (stree) s->static_len -= stree[node].Len;
+ /* node is 0 or 1 so it does not have extra bits */
+ }
+ desc->max_code = max_code;
+
+ /* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree,
+ * establish sub-heaps of increasing lengths:
+ */
+ for (n = s->heap_len/2; n >= 1; n--) pqdownheap(s, tree, n);
+
+ /* Construct the Huffman tree by repeatedly combining the least two
+ * frequent nodes.
+ */
+ node = elems; /* next internal node of the tree */
+ do {
+ pqremove(s, tree, n); /* n = node of least frequency */
+ m = s->heap[SMALLEST]; /* m = node of next least frequency */
+
+ s->heap[--(s->heap_max)] = n; /* keep the nodes sorted by frequency */
+ s->heap[--(s->heap_max)] = m;
+
+ /* Create a new node father of n and m */
+ tree[node].Freq = tree[n].Freq + tree[m].Freq;
+ s->depth[node] = (uch) (MAX(s->depth[n], s->depth[m]) + 1);
+ tree[n].Dad = tree[m].Dad = (ush)node;
+#ifdef DUMP_BL_TREE
+ if (tree == s->bl_tree) {
+ fprintf(stderr,"\nnode %d(%d), sons %d(%d) %d(%d)",
+ node, tree[node].Freq, n, tree[n].Freq, m, tree[m].Freq);
+ }
+#endif
+ /* and insert the new node in the heap */
+ s->heap[SMALLEST] = node++;
+ pqdownheap(s, tree, SMALLEST);
+
+ } while (s->heap_len >= 2);
+
+ s->heap[--(s->heap_max)] = s->heap[SMALLEST];
+
+ /* At this point, the fields freq and dad are set. We can now
+ * generate the bit lengths.
+ */
+ gen_bitlen(s, (tree_desc *)desc);
+
+ /* The field len is now set, we can generate the bit codes */
+ gen_codes ((ct_data *)tree, max_code, s->bl_count);
+}
+
+/* ===========================================================================
+ * Scan a literal or distance tree to determine the frequencies of the codes
+ * in the bit length tree.
+ */
+local void scan_tree (s, tree, max_code)
+ deflate_state *s;
+ ct_data *tree; /* the tree to be scanned */
+ int max_code; /* and its largest code of non zero frequency */
+{
+ int n; /* iterates over all tree elements */
+ int prevlen = -1; /* last emitted length */
+ int curlen; /* length of current code */
+ int nextlen = tree[0].Len; /* length of next code */
+ int count = 0; /* repeat count of the current code */
+ int max_count = 7; /* max repeat count */
+ int min_count = 4; /* min repeat count */
+
+ if (nextlen == 0) max_count = 138, min_count = 3;
+ tree[max_code+1].Len = (ush)0xffff; /* guard */
+
+ for (n = 0; n <= max_code; n++) {
+ curlen = nextlen; nextlen = tree[n+1].Len;
+ if (++count < max_count && curlen == nextlen) {
+ continue;
+ } else if (count < min_count) {
+ s->bl_tree[curlen].Freq += count;
+ } else if (curlen != 0) {
+ if (curlen != prevlen) s->bl_tree[curlen].Freq++;
+ s->bl_tree[REP_3_6].Freq++;
+ } else if (count <= 10) {
+ s->bl_tree[REPZ_3_10].Freq++;
+ } else {
+ s->bl_tree[REPZ_11_138].Freq++;
+ }
+ count = 0; prevlen = curlen;
+ if (nextlen == 0) {
+ max_count = 138, min_count = 3;
+ } else if (curlen == nextlen) {
+ max_count = 6, min_count = 3;
+ } else {
+ max_count = 7, min_count = 4;
+ }
+ }
+}
+
+/* ===========================================================================
+ * Send a literal or distance tree in compressed form, using the codes in
+ * bl_tree.
+ */
+local void send_tree (s, tree, max_code)
+ deflate_state *s;
+ ct_data *tree; /* the tree to be scanned */
+ int max_code; /* and its largest code of non zero frequency */
+{
+ int n; /* iterates over all tree elements */
+ int prevlen = -1; /* last emitted length */
+ int curlen; /* length of current code */
+ int nextlen = tree[0].Len; /* length of next code */
+ int count = 0; /* repeat count of the current code */
+ int max_count = 7; /* max repeat count */
+ int min_count = 4; /* min repeat count */
+
+ /* tree[max_code+1].Len = -1; */ /* guard already set */
+ if (nextlen == 0) max_count = 138, min_count = 3;
+
+ for (n = 0; n <= max_code; n++) {
+ curlen = nextlen; nextlen = tree[n+1].Len;
+ if (++count < max_count && curlen == nextlen) {
+ continue;
+ } else if (count < min_count) {
+ do { send_code(s, curlen, s->bl_tree); } while (--count != 0);
+
+ } else if (curlen != 0) {
+ if (curlen != prevlen) {
+ send_code(s, curlen, s->bl_tree); count--;
+ }
+ Assert(count >= 3 && count <= 6, " 3_6?");
+ send_code(s, REP_3_6, s->bl_tree); send_bits(s, count-3, 2);
+
+ } else if (count <= 10) {
+ send_code(s, REPZ_3_10, s->bl_tree); send_bits(s, count-3, 3);
+
+ } else {
+ send_code(s, REPZ_11_138, s->bl_tree); send_bits(s, count-11, 7);
+ }
+ count = 0; prevlen = curlen;
+ if (nextlen == 0) {
+ max_count = 138, min_count = 3;
+ } else if (curlen == nextlen) {
+ max_count = 6, min_count = 3;
+ } else {
+ max_count = 7, min_count = 4;
+ }
+ }
+}
+
+/* ===========================================================================
+ * Construct the Huffman tree for the bit lengths and return the index in
+ * bl_order of the last bit length code to send.
+ */
+local int build_bl_tree(s)
+ deflate_state *s;
+{
+ int max_blindex; /* index of last bit length code of non zero freq */
+
+ /* Determine the bit length frequencies for literal and distance trees */
+ scan_tree(s, (ct_data *)s->dyn_ltree, s->l_desc.max_code);
+ scan_tree(s, (ct_data *)s->dyn_dtree, s->d_desc.max_code);
+
+ /* Build the bit length tree: */
+ build_tree(s, (tree_desc *)(&(s->bl_desc)));
+ /* opt_len now includes the length of the tree representations, except
+ * the lengths of the bit lengths codes and the 5+5+4 bits for the counts.
+ */
+
+ /* Determine the number of bit length codes to send. The pkzip format
+ * requires that at least 4 bit length codes be sent. (appnote.txt says
+ * 3 but the actual value used is 4.)
+ */
+ for (max_blindex = BL_CODES-1; max_blindex >= 3; max_blindex--) {
+ if (s->bl_tree[bl_order[max_blindex]].Len != 0) break;
+ }
+ /* Update opt_len to include the bit length tree and counts */
+ s->opt_len += 3*(max_blindex+1) + 5+5+4;
+ Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld",
+ s->opt_len, s->static_len));
+
+ return max_blindex;
+}
+
+/* ===========================================================================
+ * Send the header for a block using dynamic Huffman trees: the counts, the
+ * lengths of the bit length codes, the literal tree and the distance tree.
+ * IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4.
+ */
+local void send_all_trees(s, lcodes, dcodes, blcodes)
+ deflate_state *s;
+ int lcodes, dcodes, blcodes; /* number of codes for each tree */
+{
+ int rank; /* index in bl_order */
+
+ Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes");
+ Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES,
+ "too many codes");
+ Tracev((stderr, "\nbl counts: "));
+ send_bits(s, lcodes-257, 5); /* not +255 as stated in appnote.txt */
+ send_bits(s, dcodes-1, 5);
+ send_bits(s, blcodes-4, 4); /* not -3 as stated in appnote.txt */
+ for (rank = 0; rank < blcodes; rank++) {
+ Tracev((stderr, "\nbl code %2d ", bl_order[rank]));
+ send_bits(s, s->bl_tree[bl_order[rank]].Len, 3);
+ }
+ Tracev((stderr, "\nbl tree: sent %ld", s->bits_sent));
+
+ send_tree(s, (ct_data *)s->dyn_ltree, lcodes-1); /* literal tree */
+ Tracev((stderr, "\nlit tree: sent %ld", s->bits_sent));
+
+ send_tree(s, (ct_data *)s->dyn_dtree, dcodes-1); /* distance tree */
+ Tracev((stderr, "\ndist tree: sent %ld", s->bits_sent));
+}
+
+/* ===========================================================================
+ * Send a stored block
+ */
+void _tr_stored_block(s, buf, stored_len, eof)
+ deflate_state *s;
+ charf *buf; /* input block */
+ ulg stored_len; /* length of input block */
+ int eof; /* true if this is the last block for a file */
+{
+ send_bits(s, (STORED_BLOCK<<1)+eof, 3); /* send block type */
+ s->compressed_len = (s->compressed_len + 3 + 7) & (ulg)~7L;
+ s->compressed_len += (stored_len + 4) << 3;
+
+ copy_block(s, buf, (unsigned)stored_len, 1); /* with header */
+}
+
+/* Send just the `stored block' type code without any length bytes or data.
+ */
+void _tr_stored_type_only(s)
+ deflate_state *s;
+{
+ send_bits(s, (STORED_BLOCK << 1), 3);
+ bi_windup(s);
+ s->compressed_len = (s->compressed_len + 3) & ~7L;
+}
+
+
+/* ===========================================================================
+ * Send one empty static block to give enough lookahead for inflate.
+ * This takes 10 bits, of which 7 may remain in the bit buffer.
+ * The current inflate code requires 9 bits of lookahead. If the
+ * last two codes for the previous block (real code plus EOB) were coded
+ * on 5 bits or less, inflate may have only 5+3 bits of lookahead to decode
+ * the last real code. In this case we send two empty static blocks instead
+ * of one. (There are no problems if the previous block is stored or fixed.)
+ * To simplify the code, we assume the worst case of last real code encoded
+ * on one bit only.
+ */
+void _tr_align(s)
+ deflate_state *s;
+{
+ send_bits(s, STATIC_TREES<<1, 3);
+ send_code(s, END_BLOCK, static_ltree);
+ s->compressed_len += 10L; /* 3 for block type, 7 for EOB */
+ bi_flush(s);
+ /* Of the 10 bits for the empty block, we have already sent
+ * (10 - bi_valid) bits. The lookahead for the last real code (before
+ * the EOB of the previous block) was thus at least one plus the length
+ * of the EOB plus what we have just sent of the empty static block.
+ */
+ if (1 + s->last_eob_len + 10 - s->bi_valid < 9) {
+ send_bits(s, STATIC_TREES<<1, 3);
+ send_code(s, END_BLOCK, static_ltree);
+ s->compressed_len += 10L;
+ bi_flush(s);
+ }
+ s->last_eob_len = 7;
+}
+
+/* ===========================================================================
+ * Determine the best encoding for the current block: dynamic trees, static
+ * trees or store, and output the encoded block to the zip file. This function
+ * returns the total compressed length for the file so far.
+ */
+ulg _tr_flush_block(s, buf, stored_len, eof)
+ deflate_state *s;
+ charf *buf; /* input block, or NULL if too old */
+ ulg stored_len; /* length of input block */
+ int eof; /* true if this is the last block for a file */
+{
+ ulg opt_lenb, static_lenb; /* opt_len and static_len in bytes */
+ int max_blindex = 0; /* index of last bit length code of non zero freq */
+
+ /* Build the Huffman trees unless a stored block is forced */
+ if (s->level > 0) {
+
+ /* Check if the file is ascii or binary */
+ if (s->data_type == Z_UNKNOWN) set_data_type(s);
+
+ /* Construct the literal and distance trees */
+ build_tree(s, (tree_desc *)(&(s->l_desc)));
+ Tracev((stderr, "\nlit data: dyn %ld, stat %ld", s->opt_len,
+ s->static_len));
+
+ build_tree(s, (tree_desc *)(&(s->d_desc)));
+ Tracev((stderr, "\ndist data: dyn %ld, stat %ld", s->opt_len,
+ s->static_len));
+ /* At this point, opt_len and static_len are the total bit lengths of
+ * the compressed block data, excluding the tree representations.
+ */
+
+ /* Build the bit length tree for the above two trees, and get the index
+ * in bl_order of the last bit length code to send.
+ */
+ max_blindex = build_bl_tree(s);
+
+ /* Determine the best encoding. Compute first the block length in bytes*/
+ opt_lenb = (s->opt_len+3+7)>>3;
+ static_lenb = (s->static_len+3+7)>>3;
+
+ Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ",
+ opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len,
+ s->last_lit));
+
+ if (static_lenb <= opt_lenb) opt_lenb = static_lenb;
+
+ } else {
+ Assert(buf != (char*)0, "lost buf");
+ opt_lenb = static_lenb = stored_len + 5; /* force a stored block */
+ }
+
+ /* If compression failed and this is the first and last block,
+ * and if the .zip file can be seeked (to rewrite the local header),
+ * the whole file is transformed into a stored file:
+ */
+#ifdef STORED_FILE_OK
+# ifdef FORCE_STORED_FILE
+ if (eof && s->compressed_len == 0L) { /* force stored file */
+# else
+ if (stored_len <= opt_lenb && eof && s->compressed_len==0L && seekable()) {
+# endif
+ /* Since LIT_BUFSIZE <= 2*WSIZE, the input data must be there: */
+ if (buf == (charf*)0) error ("block vanished");
+
+ copy_block(s, buf, (unsigned)stored_len, 0); /* without header */
+ s->compressed_len = stored_len << 3;
+ s->method = STORED;
+ } else
+#endif /* STORED_FILE_OK */
+
+#ifdef FORCE_STORED
+ if (buf != (char*)0) { /* force stored block */
+#else
+ if (stored_len+4 <= opt_lenb && buf != (char*)0) {
+ /* 4: two words for the lengths */
+#endif
+ /* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE.
+ * Otherwise we can't have processed more than WSIZE input bytes since
+ * the last block flush, because compression would have been
+ * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to
+ * transform a block into a stored block.
+ */
+ _tr_stored_block(s, buf, stored_len, eof);
+
+#ifdef FORCE_STATIC
+ } else if (static_lenb >= 0) { /* force static trees */
+#else
+ } else if (static_lenb == opt_lenb) {
+#endif
+ send_bits(s, (STATIC_TREES<<1)+eof, 3);
+ compress_block(s, (ct_data *)static_ltree, (ct_data *)static_dtree);
+ s->compressed_len += 3 + s->static_len;
+ } else {
+ send_bits(s, (DYN_TREES<<1)+eof, 3);
+ send_all_trees(s, s->l_desc.max_code+1, s->d_desc.max_code+1,
+ max_blindex+1);
+ compress_block(s, (ct_data *)s->dyn_ltree, (ct_data *)s->dyn_dtree);
+ s->compressed_len += 3 + s->opt_len;
+ }
+ Assert (s->compressed_len == s->bits_sent, "bad compressed size");
+ init_block(s);
+
+ if (eof) {
+ bi_windup(s);
+ s->compressed_len += 7; /* align on byte boundary */
+ }
+ Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len>>3,
+ s->compressed_len-7*eof));
+
+ return s->compressed_len >> 3;
+}
+
+/* ===========================================================================
+ * Save the match info and tally the frequency counts. Return true if
+ * the current block must be flushed.
+ */
+int _tr_tally (s, dist, lc)
+ deflate_state *s;
+ unsigned dist; /* distance of matched string */
+ unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */
+{
+ s->d_buf[s->last_lit] = (ush)dist;
+ s->l_buf[s->last_lit++] = (uch)lc;
+ if (dist == 0) {
+ /* lc is the unmatched char */
+ s->dyn_ltree[lc].Freq++;
+ } else {
+ s->matches++;
+ /* Here, lc is the match length - MIN_MATCH */
+ dist--; /* dist = match distance - 1 */
+ Assert((ush)dist < (ush)MAX_DIST(s) &&
+ (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) &&
+ (ush)d_code(dist) < (ush)D_CODES, "_tr_tally: bad match");
+
+ s->dyn_ltree[length_code[lc]+LITERALS+1].Freq++;
+ s->dyn_dtree[d_code(dist)].Freq++;
+ }
+
+ /* Try to guess if it is profitable to stop the current block here */
+ if (s->level > 2 && (s->last_lit & 0xfff) == 0) {
+ /* Compute an upper bound for the compressed length */
+ ulg out_length = (ulg)s->last_lit*8L;
+ ulg in_length = (ulg)((long)s->strstart - s->block_start);
+ int dcode;
+ for (dcode = 0; dcode < D_CODES; dcode++) {
+ out_length += (ulg)s->dyn_dtree[dcode].Freq *
+ (5L+extra_dbits[dcode]);
+ }
+ out_length >>= 3;
+ Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ",
+ s->last_lit, in_length, out_length,
+ 100L - out_length*100L/in_length));
+ if (s->matches < s->last_lit/2 && out_length < in_length/2) return 1;
+ }
+ return (s->last_lit == s->lit_bufsize-1);
+ /* We avoid equality with lit_bufsize because of wraparound at 64K
+ * on 16 bit machines and because stored blocks are restricted to
+ * 64K-1 bytes.
+ */
+}
+
+/* ===========================================================================
+ * Send the block data compressed using the given Huffman trees
+ */
+local void compress_block(s, ltree, dtree)
+ deflate_state *s;
+ ct_data *ltree; /* literal tree */
+ ct_data *dtree; /* distance tree */
+{
+ unsigned dist; /* distance of matched string */
+ int lc; /* match length or unmatched char (if dist == 0) */
+ unsigned lx = 0; /* running index in l_buf */
+ unsigned code; /* the code to send */
+ int extra; /* number of extra bits to send */
+
+ if (s->last_lit != 0) do {
+ dist = s->d_buf[lx];
+ lc = s->l_buf[lx++];
+ if (dist == 0) {
+ send_code(s, lc, ltree); /* send a literal byte */
+ Tracecv(isgraph(lc), (stderr," '%c' ", lc));
+ } else {
+ /* Here, lc is the match length - MIN_MATCH */
+ code = length_code[lc];
+ send_code(s, code+LITERALS+1, ltree); /* send the length code */
+ extra = extra_lbits[code];
+ if (extra != 0) {
+ lc -= base_length[code];
+ send_bits(s, lc, extra); /* send the extra length bits */
+ }
+ dist--; /* dist is now the match distance - 1 */
+ code = d_code(dist);
+ Assert (code < D_CODES, "bad d_code");
+
+ send_code(s, code, dtree); /* send the distance code */
+ extra = extra_dbits[code];
+ if (extra != 0) {
+ dist -= base_dist[code];
+ send_bits(s, dist, extra); /* send the extra distance bits */
+ }
+ } /* literal or match pair ? */
+
+ /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */
+ Assert(s->pending < s->lit_bufsize + 2*lx, "pendingBuf overflow");
+
+ } while (lx < s->last_lit);
+
+ send_code(s, END_BLOCK, ltree);
+ s->last_eob_len = ltree[END_BLOCK].Len;
+}
+
+/* ===========================================================================
+ * Set the data type to ASCII or BINARY, using a crude approximation:
+ * binary if more than 20% of the bytes are <= 6 or >= 128, ascii otherwise.
+ * IN assertion: the fields freq of dyn_ltree are set and the total of all
+ * frequencies does not exceed 64K (to fit in an int on 16 bit machines).
+ */
+local void set_data_type(s)
+ deflate_state *s;
+{
+ int n = 0;
+ unsigned ascii_freq = 0;
+ unsigned bin_freq = 0;
+ while (n < 7) bin_freq += s->dyn_ltree[n++].Freq;
+ while (n < 128) ascii_freq += s->dyn_ltree[n++].Freq;
+ while (n < LITERALS) bin_freq += s->dyn_ltree[n++].Freq;
+ s->data_type = (Byte)(bin_freq > (ascii_freq >> 2) ? Z_BINARY : Z_ASCII);
+}
+
+/* ===========================================================================
+ * Reverse the first len bits of a code, using straightforward code (a faster
+ * method would use a table)
+ * IN assertion: 1 <= len <= 15
+ */
+local unsigned bi_reverse(code, len)
+ unsigned code; /* the value to invert */
+ int len; /* its bit length */
+{
+ register unsigned res = 0;
+ do {
+ res |= code & 1;
+ code >>= 1, res <<= 1;
+ } while (--len > 0);
+ return res >> 1;
+}
+
+/* ===========================================================================
+ * Flush the bit buffer, keeping at most 7 bits in it.
+ */
+local void bi_flush(s)
+ deflate_state *s;
+{
+ if (s->bi_valid == 16) {
+ put_short(s, s->bi_buf);
+ s->bi_buf = 0;
+ s->bi_valid = 0;
+ } else if (s->bi_valid >= 8) {
+ put_byte(s, (Byte)s->bi_buf);
+ s->bi_buf >>= 8;
+ s->bi_valid -= 8;
+ }
+}
+
+/* ===========================================================================
+ * Flush the bit buffer and align the output on a byte boundary
+ */
+local void bi_windup(s)
+ deflate_state *s;
+{
+ if (s->bi_valid > 8) {
+ put_short(s, s->bi_buf);
+ } else if (s->bi_valid > 0) {
+ put_byte(s, (Byte)s->bi_buf);
+ }
+ s->bi_buf = 0;
+ s->bi_valid = 0;
+#ifdef DEBUG_ZLIB
+ s->bits_sent = (s->bits_sent+7) & ~7;
+#endif
+}
+
+/* ===========================================================================
+ * Copy a stored block, storing first the length and its
+ * one's complement if requested.
+ */
+local void copy_block(s, buf, len, header)
+ deflate_state *s;
+ charf *buf; /* the input data */
+ unsigned len; /* its length */
+ int header; /* true if block header must be written */
+{
+ bi_windup(s); /* align on byte boundary */
+ s->last_eob_len = 8; /* enough lookahead for inflate */
+
+ if (header) {
+ put_short(s, (ush)len);
+ put_short(s, (ush)~len);
+#ifdef DEBUG_ZLIB
+ s->bits_sent += 2*16;
+#endif
+ }
+#ifdef DEBUG_ZLIB
+ s->bits_sent += (ulg)len<<3;
+#endif
+ /* bundle up the put_byte(s, *buf++) calls */
+ zmemcpy(&s->pending_buf[s->pending], buf, len);
+ s->pending += len;
+}
+/* --- trees.c */
+
+/* +++ inflate.c */
+/* inflate.c -- zlib interface to inflate modules
+ * Copyright (C) 1995-1996 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* #include "zutil.h" */
+
+/* +++ infblock.h */
+/* infblock.h -- header to use infblock.c
+ * Copyright (C) 1995-1996 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+struct inflate_blocks_state;
+typedef struct inflate_blocks_state FAR inflate_blocks_statef;
+
+extern inflate_blocks_statef * inflate_blocks_new OF((
+ z_streamp z,
+ check_func c, /* check function */
+ uInt w)); /* window size */
+
+extern int inflate_blocks OF((
+ inflate_blocks_statef *,
+ z_streamp ,
+ int)); /* initial return code */
+
+extern void inflate_blocks_reset OF((
+ inflate_blocks_statef *,
+ z_streamp ,
+ uLongf *)); /* check value on output */
+
+extern int inflate_blocks_free OF((
+ inflate_blocks_statef *,
+ z_streamp ,
+ uLongf *)); /* check value on output */
+
+extern void inflate_set_dictionary OF((
+ inflate_blocks_statef *s,
+ const Bytef *d, /* dictionary */
+ uInt n)); /* dictionary length */
+
+extern int inflate_addhistory OF((
+ inflate_blocks_statef *,
+ z_streamp));
+
+extern int inflate_packet_flush OF((
+ inflate_blocks_statef *));
+/* --- infblock.h */
+
+#ifndef NO_DUMMY_DECL
+struct inflate_blocks_state {int dummy;}; /* for buggy compilers */
+#endif
+
+/* inflate private state */
+struct internal_state {
+
+ /* mode */
+ enum {
+ METHOD, /* waiting for method byte */
+ FLAG, /* waiting for flag byte */
+ DICT4, /* four dictionary check bytes to go */
+ DICT3, /* three dictionary check bytes to go */
+ DICT2, /* two dictionary check bytes to go */
+ DICT1, /* one dictionary check byte to go */
+ DICT0, /* waiting for inflateSetDictionary */
+ BLOCKS, /* decompressing blocks */
+ CHECK4, /* four check bytes to go */
+ CHECK3, /* three check bytes to go */
+ CHECK2, /* two check bytes to go */
+ CHECK1, /* one check byte to go */
+ DONE, /* finished check, done */
+ BAD} /* got an error--stay here */
+ mode; /* current inflate mode */
+
+ /* mode dependent information */
+ union {
+ uInt method; /* if FLAGS, method byte */
+ struct {
+ uLong was; /* computed check value */
+ uLong need; /* stream check value */
+ } check; /* if CHECK, check values to compare */
+ uInt marker; /* if BAD, inflateSync's marker bytes count */
+ } sub; /* submode */
+
+ /* mode independent information */
+ int nowrap; /* flag for no wrapper */
+ uInt wbits; /* log2(window size) (8..15, defaults to 15) */
+ inflate_blocks_statef
+ *blocks; /* current inflate_blocks state */
+
+};
+
+
+int inflateReset(z)
+z_streamp z;
+{
+ uLong c;
+
+ if (z == Z_NULL || z->state == Z_NULL)
+ return Z_STREAM_ERROR;
+ z->total_in = z->total_out = 0;
+ z->msg = Z_NULL;
+ z->state->mode = z->state->nowrap ? BLOCKS : METHOD;
+ inflate_blocks_reset(z->state->blocks, z, &c);
+ Trace((stderr, "inflate: reset\n"));
+ return Z_OK;
+}
+
+
+int inflateEnd(z)
+z_streamp z;
+{
+ uLong c;
+
+ if (z == Z_NULL || z->state == Z_NULL || z->zfree == Z_NULL)
+ return Z_STREAM_ERROR;
+ if (z->state->blocks != Z_NULL)
+ inflate_blocks_free(z->state->blocks, z, &c);
+ ZFREE(z, z->state);
+ z->state = Z_NULL;
+ Trace((stderr, "inflate: end\n"));
+ return Z_OK;
+}
+
+
+int inflateInit2_(z, w, version, stream_size)
+z_streamp z;
+int w;
+const char *version;
+int stream_size;
+{
+ if (version == Z_NULL || version[0] != ZLIB_VERSION[0] ||
+ stream_size != sizeof(z_stream))
+ return Z_VERSION_ERROR;
+
+ /* initialize state */
+ if (z == Z_NULL)
+ return Z_STREAM_ERROR;
+ z->msg = Z_NULL;
+#ifndef NO_ZCFUNCS
+ if (z->zalloc == Z_NULL)
+ {
+ z->zalloc = zcalloc;
+ z->opaque = (voidpf)0;
+ }
+ if (z->zfree == Z_NULL) z->zfree = zcfree;
+#endif
+ if ((z->state = (struct internal_state FAR *)
+ ZALLOC(z,1,sizeof(struct internal_state))) == Z_NULL)
+ return Z_MEM_ERROR;
+ z->state->blocks = Z_NULL;
+
+ /* handle undocumented nowrap option (no zlib header or check) */
+ z->state->nowrap = 0;
+ if (w < 0)
+ {
+ w = - w;
+ z->state->nowrap = 1;
+ }
+
+ /* set window size */
+ if (w < 8 || w > 15)
+ {
+ inflateEnd(z);
+ return Z_STREAM_ERROR;
+ }
+ z->state->wbits = (uInt)w;
+
+ /* create inflate_blocks state */
+ if ((z->state->blocks =
+ inflate_blocks_new(z, z->state->nowrap ? Z_NULL : adler32, (uInt)1 << w))
+ == Z_NULL)
+ {
+ inflateEnd(z);
+ return Z_MEM_ERROR;
+ }
+ Trace((stderr, "inflate: allocated\n"));
+
+ /* reset state */
+ inflateReset(z);
+ return Z_OK;
+}
+
+
+int inflateInit_(z, version, stream_size)
+z_streamp z;
+const char *version;
+int stream_size;
+{
+ return inflateInit2_(z, DEF_WBITS, version, stream_size);
+}
+
+
+#define NEEDBYTE {if(z->avail_in==0)goto empty;r=Z_OK;}
+#define NEXTBYTE (z->avail_in--,z->total_in++,*z->next_in++)
+
+int inflate(z, f)
+z_streamp z;
+int f;
+{
+ int r;
+ uInt b;
+
+ if (z == Z_NULL || z->state == Z_NULL || z->next_in == Z_NULL || f < 0)
+ return Z_STREAM_ERROR;
+ r = Z_BUF_ERROR;
+ while (1) switch (z->state->mode)
+ {
+ case METHOD:
+ NEEDBYTE
+ if (((z->state->sub.method = NEXTBYTE) & 0xf) != Z_DEFLATED)
+ {
+ z->state->mode = BAD;
+ z->msg = (char*)"unknown compression method";
+ z->state->sub.marker = 5; /* can't try inflateSync */
+ break;
+ }
+ if ((z->state->sub.method >> 4) + 8 > z->state->wbits)
+ {
+ z->state->mode = BAD;
+ z->msg = (char*)"invalid window size";
+ z->state->sub.marker = 5; /* can't try inflateSync */
+ break;
+ }
+ z->state->mode = FLAG;
+ case FLAG:
+ NEEDBYTE
+ b = NEXTBYTE;
+ if (((z->state->sub.method << 8) + b) % 31)
+ {
+ z->state->mode = BAD;
+ z->msg = (char*)"incorrect header check";
+ z->state->sub.marker = 5; /* can't try inflateSync */
+ break;
+ }
+ Trace((stderr, "inflate: zlib header ok\n"));
+ if (!(b & PRESET_DICT))
+ {
+ z->state->mode = BLOCKS;
+ break;
+ }
+ z->state->mode = DICT4;
+ case DICT4:
+ NEEDBYTE
+ z->state->sub.check.need = (uLong)NEXTBYTE << 24;
+ z->state->mode = DICT3;
+ case DICT3:
+ NEEDBYTE
+ z->state->sub.check.need += (uLong)NEXTBYTE << 16;
+ z->state->mode = DICT2;
+ case DICT2:
+ NEEDBYTE
+ z->state->sub.check.need += (uLong)NEXTBYTE << 8;
+ z->state->mode = DICT1;
+ case DICT1:
+ NEEDBYTE
+ z->state->sub.check.need += (uLong)NEXTBYTE;
+ z->adler = z->state->sub.check.need;
+ z->state->mode = DICT0;
+ return Z_NEED_DICT;
+ case DICT0:
+ z->state->mode = BAD;
+ z->msg = (char*)"need dictionary";
+ z->state->sub.marker = 0; /* can try inflateSync */
+ return Z_STREAM_ERROR;
+ case BLOCKS:
+ r = inflate_blocks(z->state->blocks, z, r);
+ if (f == Z_PACKET_FLUSH && z->avail_in == 0 && z->avail_out != 0)
+ r = inflate_packet_flush(z->state->blocks);
+ if (r == Z_DATA_ERROR)
+ {
+ z->state->mode = BAD;
+ z->state->sub.marker = 0; /* can try inflateSync */
+ break;
+ }
+ if (r != Z_STREAM_END)
+ return r;
+ r = Z_OK;
+ inflate_blocks_reset(z->state->blocks, z, &z->state->sub.check.was);
+ if (z->state->nowrap)
+ {
+ z->state->mode = DONE;
+ break;
+ }
+ z->state->mode = CHECK4;
+ case CHECK4:
+ NEEDBYTE
+ z->state->sub.check.need = (uLong)NEXTBYTE << 24;
+ z->state->mode = CHECK3;
+ case CHECK3:
+ NEEDBYTE
+ z->state->sub.check.need += (uLong)NEXTBYTE << 16;
+ z->state->mode = CHECK2;
+ case CHECK2:
+ NEEDBYTE
+ z->state->sub.check.need += (uLong)NEXTBYTE << 8;
+ z->state->mode = CHECK1;
+ case CHECK1:
+ NEEDBYTE
+ z->state->sub.check.need += (uLong)NEXTBYTE;
+
+ if (z->state->sub.check.was != z->state->sub.check.need)
+ {
+ z->state->mode = BAD;
+ z->msg = (char*)"incorrect data check";
+ z->state->sub.marker = 5; /* can't try inflateSync */
+ break;
+ }
+ Trace((stderr, "inflate: zlib check ok\n"));
+ z->state->mode = DONE;
+ case DONE:
+ return Z_STREAM_END;
+ case BAD:
+ return Z_DATA_ERROR;
+ default:
+ return Z_STREAM_ERROR;
+ }
+
+ empty:
+ if (f != Z_PACKET_FLUSH)
+ return r;
+ z->state->mode = BAD;
+ z->msg = (char *)"need more for packet flush";
+ z->state->sub.marker = 0; /* can try inflateSync */
+ return Z_DATA_ERROR;
+}
+
+
+int inflateSetDictionary(z, dictionary, dictLength)
+z_streamp z;
+const Bytef *dictionary;
+uInt dictLength;
+{
+ uInt length = dictLength;
+
+ if (z == Z_NULL || z->state == Z_NULL || z->state->mode != DICT0)
+ return Z_STREAM_ERROR;
+
+ if (adler32(1L, dictionary, dictLength) != z->adler) return Z_DATA_ERROR;
+ z->adler = 1L;
+
+ if (length >= ((uInt)1<<z->state->wbits))
+ {
+ length = (1<<z->state->wbits)-1;
+ dictionary += dictLength - length;
+ }
+ inflate_set_dictionary(z->state->blocks, dictionary, length);
+ z->state->mode = BLOCKS;
+ return Z_OK;
+}
+
+/*
+ * This subroutine adds the data at next_in/avail_in to the output history
+ * without performing any output. The output buffer must be "caught up";
+ * i.e. no pending output (hence s->read equals s->write), and the state must
+ * be BLOCKS (i.e. we should be willing to see the start of a series of
+ * BLOCKS). On exit, the output will also be caught up, and the checksum
+ * will have been updated if need be.
+ */
+
+int inflateIncomp(z)
+z_stream *z;
+{
+ if (z->state->mode != BLOCKS)
+ return Z_DATA_ERROR;
+ return inflate_addhistory(z->state->blocks, z);
+}
+
+
+int inflateSync(z)
+z_streamp z;
+{
+ uInt n; /* number of bytes to look at */
+ Bytef *p; /* pointer to bytes */
+ uInt m; /* number of marker bytes found in a row */
+ uLong r, w; /* temporaries to save total_in and total_out */
+
+ /* set up */
+ if (z == Z_NULL || z->state == Z_NULL)
+ return Z_STREAM_ERROR;
+ if (z->state->mode != BAD)
+ {
+ z->state->mode = BAD;
+ z->state->sub.marker = 0;
+ }
+ if ((n = z->avail_in) == 0)
+ return Z_BUF_ERROR;
+ p = z->next_in;
+ m = z->state->sub.marker;
+
+ /* search */
+ while (n && m < 4)
+ {
+ if (*p == (Byte)(m < 2 ? 0 : 0xff))
+ m++;
+ else if (*p)
+ m = 0;
+ else
+ m = 4 - m;
+ p++, n--;
+ }
+
+ /* restore */
+ z->total_in += p - z->next_in;
+ z->next_in = p;
+ z->avail_in = n;
+ z->state->sub.marker = m;
+
+ /* return no joy or set up to restart on a new block */
+ if (m != 4)
+ return Z_DATA_ERROR;
+ r = z->total_in; w = z->total_out;
+ inflateReset(z);
+ z->total_in = r; z->total_out = w;
+ z->state->mode = BLOCKS;
+ return Z_OK;
+}
+
+#undef NEEDBYTE
+#undef NEXTBYTE
+/* --- inflate.c */
+
+/* +++ infblock.c */
+/* infblock.c -- interpret and process block types to last block
+ * Copyright (C) 1995-1996 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* #include "zutil.h" */
+/* #include "infblock.h" */
+
+/* +++ inftrees.h */
+/* inftrees.h -- header to use inftrees.c
+ * Copyright (C) 1995-1996 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+/* Huffman code lookup table entry--this entry is four bytes for machines
+ that have 16-bit pointers (e.g. PC's in the small or medium model). */
+
+typedef struct inflate_huft_s FAR inflate_huft;
+
+struct inflate_huft_s {
+ union {
+ struct {
+ Byte Exop; /* number of extra bits or operation */
+ Byte Bits; /* number of bits in this code or subcode */
+ } what;
+ Bytef *pad; /* pad structure to a power of 2 (4 bytes for */
+ } word; /* 16-bit, 8 bytes for 32-bit machines) */
+ union {
+ uInt Base; /* literal, length base, or distance base */
+ inflate_huft *Next; /* pointer to next level of table */
+ } more;
+};
+
+#ifdef DEBUG_ZLIB
+ extern uInt inflate_hufts;
+#endif
+
+extern int inflate_trees_bits OF((
+ uIntf *, /* 19 code lengths */
+ uIntf *, /* bits tree desired/actual depth */
+ inflate_huft * FAR *, /* bits tree result */
+ z_streamp )); /* for zalloc, zfree functions */
+
+extern int inflate_trees_dynamic OF((
+ uInt, /* number of literal/length codes */
+ uInt, /* number of distance codes */
+ uIntf *, /* that many (total) code lengths */
+ uIntf *, /* literal desired/actual bit depth */
+ uIntf *, /* distance desired/actual bit depth */
+ inflate_huft * FAR *, /* literal/length tree result */
+ inflate_huft * FAR *, /* distance tree result */
+ z_streamp )); /* for zalloc, zfree functions */
+
+extern int inflate_trees_fixed OF((
+ uIntf *, /* literal desired/actual bit depth */
+ uIntf *, /* distance desired/actual bit depth */
+ inflate_huft * FAR *, /* literal/length tree result */
+ inflate_huft * FAR *)); /* distance tree result */
+
+extern int inflate_trees_free OF((
+ inflate_huft *, /* tables to free */
+ z_streamp )); /* for zfree function */
+
+/* --- inftrees.h */
+
+/* +++ infcodes.h */
+/* infcodes.h -- header to use infcodes.c
+ * Copyright (C) 1995-1996 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+struct inflate_codes_state;
+typedef struct inflate_codes_state FAR inflate_codes_statef;
+
+extern inflate_codes_statef *inflate_codes_new OF((
+ uInt, uInt,
+ inflate_huft *, inflate_huft *,
+ z_streamp ));
+
+extern int inflate_codes OF((
+ inflate_blocks_statef *,
+ z_streamp ,
+ int));
+
+extern void inflate_codes_free OF((
+ inflate_codes_statef *,
+ z_streamp ));
+
+/* --- infcodes.h */
+
+/* +++ infutil.h */
+/* infutil.h -- types and macros common to blocks and codes
+ * Copyright (C) 1995-1996 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+#ifndef _INFUTIL_H
+#define _INFUTIL_H
+
+typedef enum {
+ TYPE, /* get type bits (3, including end bit) */
+ LENS, /* get lengths for stored */
+ STORED, /* processing stored block */
+ TABLE, /* get table lengths */
+ BTREE, /* get bit lengths tree for a dynamic block */
+ DTREE, /* get length, distance trees for a dynamic block */
+ CODES, /* processing fixed or dynamic block */
+ DRY, /* output remaining window bytes */
+ DONEB, /* finished last block, done */
+ BADB} /* got a data error--stuck here */
+inflate_block_mode;
+
+/* inflate blocks semi-private state */
+struct inflate_blocks_state {
+
+ /* mode */
+ inflate_block_mode mode; /* current inflate_block mode */
+
+ /* mode dependent information */
+ union {
+ uInt left; /* if STORED, bytes left to copy */
+ struct {
+ uInt table; /* table lengths (14 bits) */
+ uInt index; /* index into blens (or border) */
+ uIntf *blens; /* bit lengths of codes */
+ uInt bb; /* bit length tree depth */
+ inflate_huft *tb; /* bit length decoding tree */
+ } trees; /* if DTREE, decoding info for trees */
+ struct {
+ inflate_huft *tl;
+ inflate_huft *td; /* trees to free */
+ inflate_codes_statef
+ *codes;
+ } decode; /* if CODES, current state */
+ } sub; /* submode */
+ uInt last; /* true if this block is the last block */
+
+ /* mode independent information */
+ uInt bitk; /* bits in bit buffer */
+ uLong bitb; /* bit buffer */
+ Bytef *window; /* sliding window */
+ Bytef *end; /* one byte after sliding window */
+ Bytef *read; /* window read pointer */
+ Bytef *write; /* window write pointer */
+ check_func checkfn; /* check function */
+ uLong check; /* check on output */
+
+};
+
+
+/* defines for inflate input/output */
+/* update pointers and return */
+#define UPDBITS {s->bitb=b;s->bitk=k;}
+#define UPDIN {z->avail_in=n;z->total_in+=p-z->next_in;z->next_in=p;}
+#define UPDOUT {s->write=q;}
+#define UPDATE {UPDBITS UPDIN UPDOUT}
+#define LEAVE {UPDATE return inflate_flush(s,z,r);}
+/* get bytes and bits */
+#define LOADIN {p=z->next_in;n=z->avail_in;b=s->bitb;k=s->bitk;}
+#define NEEDBYTE {if(n)r=Z_OK;else LEAVE}
+#define NEXTBYTE (n--,*p++)
+#define NEEDBITS(j) {while(k<(j)){NEEDBYTE;b|=((uLong)NEXTBYTE)<<k;k+=8;}}
+#define DUMPBITS(j) {b>>=(j);k-=(j);}
+/* output bytes */
+#define WAVAIL (uInt)(q<s->read?s->read-q-1:s->end-q)
+#define LOADOUT {q=s->write;m=(uInt)WAVAIL;}
+#define WWRAP {if(q==s->end&&s->read!=s->window){q=s->window;m=(uInt)WAVAIL;}}
+#define FLUSH {UPDOUT r=inflate_flush(s,z,r); LOADOUT}
+#define NEEDOUT {if(m==0){WWRAP if(m==0){FLUSH WWRAP if(m==0) LEAVE}}r=Z_OK;}
+#define OUTBYTE(a) {*q++=(Byte)(a);m--;}
+/* load local pointers */
+#define LOAD {LOADIN LOADOUT}
+
+/* masks for lower bits (size given to avoid silly warnings with Visual C++) */
+extern uInt inflate_mask[17];
+
+/* copy as much as possible from the sliding window to the output area */
+extern int inflate_flush OF((
+ inflate_blocks_statef *,
+ z_streamp ,
+ int));
+
+#ifndef NO_DUMMY_DECL
+struct internal_state {int dummy;}; /* for buggy compilers */
+#endif
+
+#endif
+/* --- infutil.h */
+
+#ifndef NO_DUMMY_DECL
+struct inflate_codes_state {int dummy;}; /* for buggy compilers */
+#endif
+
+/* Table for deflate from PKZIP's appnote.txt. */
+local const uInt border[] = { /* Order of the bit length code lengths */
+ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
+
+/*
+ Notes beyond the 1.93a appnote.txt:
+
+ 1. Distance pointers never point before the beginning of the output
+ stream.
+ 2. Distance pointers can point back across blocks, up to 32k away.
+ 3. There is an implied maximum of 7 bits for the bit length table and
+ 15 bits for the actual data.
+ 4. If only one code exists, then it is encoded using one bit. (Zero
+ would be more efficient, but perhaps a little confusing.) If two
+ codes exist, they are coded using one bit each (0 and 1).
+ 5. There is no way of sending zero distance codes--a dummy must be
+ sent if there are none. (History: a pre 2.0 version of PKZIP would
+ store blocks with no distance codes, but this was discovered to be
+ too harsh a criterion.) Valid only for 1.93a. 2.04c does allow
+ zero distance codes, which is sent as one code of zero bits in
+ length.
+ 6. There are up to 286 literal/length codes. Code 256 represents the
+ end-of-block. Note however that the static length tree defines
+ 288 codes just to fill out the Huffman codes. Codes 286 and 287
+ cannot be used though, since there is no length base or extra bits
+ defined for them. Similarily, there are up to 30 distance codes.
+ However, static trees define 32 codes (all 5 bits) to fill out the
+ Huffman codes, but the last two had better not show up in the data.
+ 7. Unzip can check dynamic Huffman blocks for complete code sets.
+ The exception is that a single code would not be complete (see #4).
+ 8. The five bits following the block type is really the number of
+ literal codes sent minus 257.
+ 9. Length codes 8,16,16 are interpreted as 13 length codes of 8 bits
+ (1+6+6). Therefore, to output three times the length, you output
+ three codes (1+1+1), whereas to output four times the same length,
+ you only need two codes (1+3). Hmm.
+ 10. In the tree reconstruction algorithm, Code = Code + Increment
+ only if BitLength(i) is not zero. (Pretty obvious.)
+ 11. Correction: 4 Bits: # of Bit Length codes - 4 (4 - 19)
+ 12. Note: length code 284 can represent 227-258, but length code 285
+ really is 258. The last length deserves its own, short code
+ since it gets used a lot in very redundant files. The length
+ 258 is special since 258 - 3 (the min match length) is 255.
+ 13. The literal/length and distance code bit lengths are read as a
+ single stream of lengths. It is possible (and advantageous) for
+ a repeat code (16, 17, or 18) to go across the boundary between
+ the two sets of lengths.
+ */
+
+
+void inflate_blocks_reset(s, z, c)
+inflate_blocks_statef *s;
+z_streamp z;
+uLongf *c;
+{
+ if (s->checkfn != Z_NULL)
+ *c = s->check;
+ if (s->mode == BTREE || s->mode == DTREE)
+ ZFREE(z, s->sub.trees.blens);
+ if (s->mode == CODES)
+ {
+ inflate_codes_free(s->sub.decode.codes, z);
+ inflate_trees_free(s->sub.decode.td, z);
+ inflate_trees_free(s->sub.decode.tl, z);
+ }
+ s->mode = TYPE;
+ s->bitk = 0;
+ s->bitb = 0;
+ s->read = s->write = s->window;
+ if (s->checkfn != Z_NULL)
+ z->adler = s->check = (*s->checkfn)(0L, Z_NULL, 0);
+ Trace((stderr, "inflate: blocks reset\n"));
+}
+
+
+inflate_blocks_statef *inflate_blocks_new(z, c, w)
+z_streamp z;
+check_func c;
+uInt w;
+{
+ inflate_blocks_statef *s;
+
+ if ((s = (inflate_blocks_statef *)ZALLOC
+ (z,1,sizeof(struct inflate_blocks_state))) == Z_NULL)
+ return s;
+ if ((s->window = (Bytef *)ZALLOC(z, 1, w)) == Z_NULL)
+ {
+ ZFREE(z, s);
+ return Z_NULL;
+ }
+ s->end = s->window + w;
+ s->checkfn = c;
+ s->mode = TYPE;
+ Trace((stderr, "inflate: blocks allocated\n"));
+ inflate_blocks_reset(s, z, &s->check);
+ return s;
+}
+
+
+#ifdef DEBUG_ZLIB
+ extern uInt inflate_hufts;
+#endif
+int inflate_blocks(s, z, r)
+inflate_blocks_statef *s;
+z_streamp z;
+int r;
+{
+ uInt t; /* temporary storage */
+ uLong b; /* bit buffer */
+ uInt k; /* bits in bit buffer */
+ Bytef *p; /* input data pointer */
+ uInt n; /* bytes available there */
+ Bytef *q; /* output window write pointer */
+ uInt m; /* bytes to end of window or read pointer */
+
+ /* copy input/output information to locals (UPDATE macro restores) */
+ LOAD
+
+ /* process input based on current state */
+ while (1) switch (s->mode)
+ {
+ case TYPE:
+ NEEDBITS(3)
+ t = (uInt)b & 7;
+ s->last = t & 1;
+ switch (t >> 1)
+ {
+ case 0: /* stored */
+ Trace((stderr, "inflate: stored block%s\n",
+ s->last ? " (last)" : ""));
+ DUMPBITS(3)
+ t = k & 7; /* go to byte boundary */
+ DUMPBITS(t)
+ s->mode = LENS; /* get length of stored block */
+ break;
+ case 1: /* fixed */
+ Trace((stderr, "inflate: fixed codes block%s\n",
+ s->last ? " (last)" : ""));
+ {
+ uInt bl, bd;
+ inflate_huft *tl, *td;
+
+ inflate_trees_fixed(&bl, &bd, &tl, &td);
+ s->sub.decode.codes = inflate_codes_new(bl, bd, tl, td, z);
+ if (s->sub.decode.codes == Z_NULL)
+ {
+ r = Z_MEM_ERROR;
+ LEAVE
+ }
+ s->sub.decode.tl = Z_NULL; /* don't try to free these */
+ s->sub.decode.td = Z_NULL;
+ }
+ DUMPBITS(3)
+ s->mode = CODES;
+ break;
+ case 2: /* dynamic */
+ Trace((stderr, "inflate: dynamic codes block%s\n",
+ s->last ? " (last)" : ""));
+ DUMPBITS(3)
+ s->mode = TABLE;
+ break;
+ case 3: /* illegal */
+ DUMPBITS(3)
+ s->mode = BADB;
+ z->msg = (char*)"invalid block type";
+ r = Z_DATA_ERROR;
+ LEAVE
+ }
+ break;
+ case LENS:
+ NEEDBITS(32)
+ if ((((~b) >> 16) & 0xffff) != (b & 0xffff))
+ {
+ s->mode = BADB;
+ z->msg = (char*)"invalid stored block lengths";
+ r = Z_DATA_ERROR;
+ LEAVE
+ }
+ s->sub.left = (uInt)b & 0xffff;
+ b = k = 0; /* dump bits */
+ Tracev((stderr, "inflate: stored length %u\n", s->sub.left));
+ s->mode = s->sub.left ? STORED : (s->last ? DRY : TYPE);
+ break;
+ case STORED:
+ if (n == 0)
+ LEAVE
+ NEEDOUT
+ t = s->sub.left;
+ if (t > n) t = n;
+ if (t > m) t = m;
+ zmemcpy(q, p, t);
+ p += t; n -= t;
+ q += t; m -= t;
+ if ((s->sub.left -= t) != 0)
+ break;
+ Tracev((stderr, "inflate: stored end, %lu total out\n",
+ z->total_out + (q >= s->read ? q - s->read :
+ (s->end - s->read) + (q - s->window))));
+ s->mode = s->last ? DRY : TYPE;
+ break;
+ case TABLE:
+ NEEDBITS(14)
+ s->sub.trees.table = t = (uInt)b & 0x3fff;
+#ifndef PKZIP_BUG_WORKAROUND
+ if ((t & 0x1f) > 29 || ((t >> 5) & 0x1f) > 29)
+ {
+ s->mode = BADB;
+ z->msg = (char*)"too many length or distance symbols";
+ r = Z_DATA_ERROR;
+ LEAVE
+ }
+#endif
+ t = 258 + (t & 0x1f) + ((t >> 5) & 0x1f);
+ if (t < 19)
+ t = 19;
+ if ((s->sub.trees.blens = (uIntf*)ZALLOC(z, t, sizeof(uInt))) == Z_NULL)
+ {
+ r = Z_MEM_ERROR;
+ LEAVE
+ }
+ DUMPBITS(14)
+ s->sub.trees.index = 0;
+ Tracev((stderr, "inflate: table sizes ok\n"));
+ s->mode = BTREE;
+ case BTREE:
+ while (s->sub.trees.index < 4 + (s->sub.trees.table >> 10))
+ {
+ NEEDBITS(3)
+ s->sub.trees.blens[border[s->sub.trees.index++]] = (uInt)b & 7;
+ DUMPBITS(3)
+ }
+ while (s->sub.trees.index < 19)
+ s->sub.trees.blens[border[s->sub.trees.index++]] = 0;
+ s->sub.trees.bb = 7;
+ t = inflate_trees_bits(s->sub.trees.blens, &s->sub.trees.bb,
+ &s->sub.trees.tb, z);
+ if (t != Z_OK)
+ {
+ ZFREE(z, s->sub.trees.blens);
+ r = t;
+ if (r == Z_DATA_ERROR)
+ s->mode = BADB;
+ LEAVE
+ }
+ s->sub.trees.index = 0;
+ Tracev((stderr, "inflate: bits tree ok\n"));
+ s->mode = DTREE;
+ case DTREE:
+ while (t = s->sub.trees.table,
+ s->sub.trees.index < 258 + (t & 0x1f) + ((t >> 5) & 0x1f))
+ {
+ inflate_huft *h;
+ uInt i, j, c;
+
+ t = s->sub.trees.bb;
+ NEEDBITS(t)
+ h = s->sub.trees.tb + ((uInt)b & inflate_mask[t]);
+ t = h->word.what.Bits;
+ c = h->more.Base;
+ if (c < 16)
+ {
+ DUMPBITS(t)
+ s->sub.trees.blens[s->sub.trees.index++] = c;
+ }
+ else /* c == 16..18 */
+ {
+ i = c == 18 ? 7 : c - 14;
+ j = c == 18 ? 11 : 3;
+ NEEDBITS(t + i)
+ DUMPBITS(t)
+ j += (uInt)b & inflate_mask[i];
+ DUMPBITS(i)
+ i = s->sub.trees.index;
+ t = s->sub.trees.table;
+ if (i + j > 258 + (t & 0x1f) + ((t >> 5) & 0x1f) ||
+ (c == 16 && i < 1))
+ {
+ inflate_trees_free(s->sub.trees.tb, z);
+ ZFREE(z, s->sub.trees.blens);
+ s->mode = BADB;
+ z->msg = (char*)"invalid bit length repeat";
+ r = Z_DATA_ERROR;
+ LEAVE
+ }
+ c = c == 16 ? s->sub.trees.blens[i - 1] : 0;
+ do {
+ s->sub.trees.blens[i++] = c;
+ } while (--j);
+ s->sub.trees.index = i;
+ }
+ }
+ inflate_trees_free(s->sub.trees.tb, z);
+ s->sub.trees.tb = Z_NULL;
+ {
+ uInt bl, bd;
+ inflate_huft *tl, *td;
+ inflate_codes_statef *c;
+
+ bl = 9; /* must be <= 9 for lookahead assumptions */
+ bd = 6; /* must be <= 9 for lookahead assumptions */
+ t = s->sub.trees.table;
+#ifdef DEBUG_ZLIB
+ inflate_hufts = 0;
+#endif
+ t = inflate_trees_dynamic(257 + (t & 0x1f), 1 + ((t >> 5) & 0x1f),
+ s->sub.trees.blens, &bl, &bd, &tl, &td, z);
+ ZFREE(z, s->sub.trees.blens);
+ if (t != Z_OK)
+ {
+ if (t == (uInt)Z_DATA_ERROR)
+ s->mode = BADB;
+ r = t;
+ LEAVE
+ }
+ Tracev((stderr, "inflate: trees ok, %d * %d bytes used\n",
+ inflate_hufts, sizeof(inflate_huft)));
+ if ((c = inflate_codes_new(bl, bd, tl, td, z)) == Z_NULL)
+ {
+ inflate_trees_free(td, z);
+ inflate_trees_free(tl, z);
+ r = Z_MEM_ERROR;
+ LEAVE
+ }
+ s->sub.decode.codes = c;
+ s->sub.decode.tl = tl;
+ s->sub.decode.td = td;
+ }
+ s->mode = CODES;
+ case CODES:
+ UPDATE
+ if ((r = inflate_codes(s, z, r)) != Z_STREAM_END)
+ return inflate_flush(s, z, r);
+ r = Z_OK;
+ inflate_codes_free(s->sub.decode.codes, z);
+ inflate_trees_free(s->sub.decode.td, z);
+ inflate_trees_free(s->sub.decode.tl, z);
+ LOAD
+ Tracev((stderr, "inflate: codes end, %lu total out\n",
+ z->total_out + (q >= s->read ? q - s->read :
+ (s->end - s->read) + (q - s->window))));
+ if (!s->last)
+ {
+ s->mode = TYPE;
+ break;
+ }
+ if (k > 7) /* return unused byte, if any */
+ {
+ Assert(k < 16, "inflate_codes grabbed too many bytes")
+ k -= 8;
+ n++;
+ p--; /* can always return one */
+ }
+ s->mode = DRY;
+ case DRY:
+ FLUSH
+ if (s->read != s->write)
+ LEAVE
+ s->mode = DONEB;
+ case DONEB:
+ r = Z_STREAM_END;
+ LEAVE
+ case BADB:
+ r = Z_DATA_ERROR;
+ LEAVE
+ default:
+ r = Z_STREAM_ERROR;
+ LEAVE
+ }
+}
+
+
+int inflate_blocks_free(s, z, c)
+inflate_blocks_statef *s;
+z_streamp z;
+uLongf *c;
+{
+ inflate_blocks_reset(s, z, c);
+ ZFREE(z, s->window);
+ ZFREE(z, s);
+ Trace((stderr, "inflate: blocks freed\n"));
+ return Z_OK;
+}
+
+
+void inflate_set_dictionary(s, d, n)
+inflate_blocks_statef *s;
+const Bytef *d;
+uInt n;
+{
+ zmemcpy((charf *)s->window, d, n);
+ s->read = s->write = s->window + n;
+}
+
+/*
+ * This subroutine adds the data at next_in/avail_in to the output history
+ * without performing any output. The output buffer must be "caught up";
+ * i.e. no pending output (hence s->read equals s->write), and the state must
+ * be BLOCKS (i.e. we should be willing to see the start of a series of
+ * BLOCKS). On exit, the output will also be caught up, and the checksum
+ * will have been updated if need be.
+ */
+int inflate_addhistory(s, z)
+inflate_blocks_statef *s;
+z_stream *z;
+{
+ uLong b; /* bit buffer */ /* NOT USED HERE */
+ uInt k; /* bits in bit buffer */ /* NOT USED HERE */
+ uInt t; /* temporary storage */
+ Bytef *p; /* input data pointer */
+ uInt n; /* bytes available there */
+ Bytef *q; /* output window write pointer */
+ uInt m; /* bytes to end of window or read pointer */
+
+ if (s->read != s->write)
+ return Z_STREAM_ERROR;
+ if (s->mode != TYPE)
+ return Z_DATA_ERROR;
+
+ /* we're ready to rock */
+ LOAD
+ /* while there is input ready, copy to output buffer, moving
+ * pointers as needed.
+ */
+ while (n) {
+ t = n; /* how many to do */
+ /* is there room until end of buffer? */
+ if (t > m) t = m;
+ /* update check information */
+ if (s->checkfn != Z_NULL)
+ s->check = (*s->checkfn)(s->check, q, t);
+ zmemcpy(q, p, t);
+ q += t;
+ p += t;
+ n -= t;
+ z->total_out += t;
+ s->read = q; /* drag read pointer forward */
+/* WWRAP */ /* expand WWRAP macro by hand to handle s->read */
+ if (q == s->end) {
+ s->read = q = s->window;
+ m = WAVAIL;
+ }
+ }
+ UPDATE
+ return Z_OK;
+}
+
+
+/*
+ * At the end of a Deflate-compressed PPP packet, we expect to have seen
+ * a `stored' block type value but not the (zero) length bytes.
+ */
+int inflate_packet_flush(s)
+ inflate_blocks_statef *s;
+{
+ if (s->mode != LENS)
+ return Z_DATA_ERROR;
+ s->mode = TYPE;
+ return Z_OK;
+}
+/* --- infblock.c */
+
+/* +++ inftrees.c */
+/* inftrees.c -- generate Huffman trees for efficient decoding
+ * Copyright (C) 1995-1996 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* #include "zutil.h" */
+/* #include "inftrees.h" */
+
+char inflate_copyright[] = " inflate 1.0.4 Copyright 1995-1996 Mark Adler ";
+/*
+ If you use the zlib library in a product, an acknowledgment is welcome
+ in the documentation of your product. If for some reason you cannot
+ include such an acknowledgment, I would appreciate that you keep this
+ copyright string in the executable of your product.
+ */
+
+#ifndef NO_DUMMY_DECL
+struct internal_state {int dummy;}; /* for buggy compilers */
+#endif
+
+/* simplify the use of the inflate_huft type with some defines */
+#define base more.Base
+#define next more.Next
+#define exop word.what.Exop
+#define bits word.what.Bits
+
+
+local int huft_build OF((
+ uIntf *, /* code lengths in bits */
+ uInt, /* number of codes */
+ uInt, /* number of "simple" codes */
+ const uIntf *, /* list of base values for non-simple codes */
+ const uIntf *, /* list of extra bits for non-simple codes */
+ inflate_huft * FAR*,/* result: starting table */
+ uIntf *, /* maximum lookup bits (returns actual) */
+ z_streamp )); /* for zalloc function */
+
+local voidpf falloc OF((
+ voidpf, /* opaque pointer (not used) */
+ uInt, /* number of items */
+ uInt)); /* size of item */
+
+/* Tables for deflate from PKZIP's appnote.txt. */
+local const uInt cplens[31] = { /* Copy lengths for literal codes 257..285 */
+ 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
+ 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
+ /* see note #13 above about 258 */
+local const uInt cplext[31] = { /* Extra bits for literal codes 257..285 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
+ 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 112, 112}; /* 112==invalid */
+local const uInt cpdist[30] = { /* Copy offsets for distance codes 0..29 */
+ 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
+ 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
+ 8193, 12289, 16385, 24577};
+local const uInt cpdext[30] = { /* Extra bits for distance codes */
+ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
+ 7, 7, 8, 8, 9, 9, 10, 10, 11, 11,
+ 12, 12, 13, 13};
+
+/*
+ Huffman code decoding is performed using a multi-level table lookup.
+ The fastest way to decode is to simply build a lookup table whose
+ size is determined by the longest code. However, the time it takes
+ to build this table can also be a factor if the data being decoded
+ is not very long. The most common codes are necessarily the
+ shortest codes, so those codes dominate the decoding time, and hence
+ the speed. The idea is you can have a shorter table that decodes the
+ shorter, more probable codes, and then point to subsidiary tables for
+ the longer codes. The time it costs to decode the longer codes is
+ then traded against the time it takes to make longer tables.
+
+ This results of this trade are in the variables lbits and dbits
+ below. lbits is the number of bits the first level table for literal/
+ length codes can decode in one step, and dbits is the same thing for
+ the distance codes. Subsequent tables are also less than or equal to
+ those sizes. These values may be adjusted either when all of the
+ codes are shorter than that, in which case the longest code length in
+ bits is used, or when the shortest code is *longer* than the requested
+ table size, in which case the length of the shortest code in bits is
+ used.
+
+ There are two different values for the two tables, since they code a
+ different number of possibilities each. The literal/length table
+ codes 286 possible values, or in a flat code, a little over eight
+ bits. The distance table codes 30 possible values, or a little less
+ than five bits, flat. The optimum values for speed end up being
+ about one bit more than those, so lbits is 8+1 and dbits is 5+1.
+ The optimum values may differ though from machine to machine, and
+ possibly even between compilers. Your mileage may vary.
+ */
+
+
+/* If BMAX needs to be larger than 16, then h and x[] should be uLong. */
+#define BMAX 15 /* maximum bit length of any code */
+#define N_MAX 288 /* maximum number of codes in any set */
+
+#ifdef DEBUG_ZLIB
+ uInt inflate_hufts;
+#endif
+
+local int huft_build(b, n, s, d, e, t, m, zs)
+uIntf *b; /* code lengths in bits (all assumed <= BMAX) */
+uInt n; /* number of codes (assumed <= N_MAX) */
+uInt s; /* number of simple-valued codes (0..s-1) */
+const uIntf *d; /* list of base values for non-simple codes */
+const uIntf *e; /* list of extra bits for non-simple codes */
+inflate_huft * FAR *t; /* result: starting table */
+uIntf *m; /* maximum lookup bits, returns actual */
+z_streamp zs; /* for zalloc function */
+/* Given a list of code lengths and a maximum table size, make a set of
+ tables to decode that set of codes. Return Z_OK on success, Z_BUF_ERROR
+ if the given code set is incomplete (the tables are still built in this
+ case), Z_DATA_ERROR if the input is invalid (an over-subscribed set of
+ lengths), or Z_MEM_ERROR if not enough memory. */
+{
+
+ uInt a; /* counter for codes of length k */
+ uInt c[BMAX+1]; /* bit length count table */
+ uInt f; /* i repeats in table every f entries */
+ int g; /* maximum code length */
+ int h; /* table level */
+ register uInt i; /* counter, current code */
+ register uInt j; /* counter */
+ register int k; /* number of bits in current code */
+ int l; /* bits per table (returned in m) */
+ register uIntf *p; /* pointer into c[], b[], or v[] */
+ inflate_huft *q; /* points to current table */
+ struct inflate_huft_s r; /* table entry for structure assignment */
+ inflate_huft *u[BMAX]; /* table stack */
+ uInt v[N_MAX]; /* values in order of bit length */
+ register int w; /* bits before this table == (l * h) */
+ uInt x[BMAX+1]; /* bit offsets, then code stack */
+ uIntf *xp; /* pointer into x */
+ int y; /* number of dummy codes added */
+ uInt z; /* number of entries in current table */
+
+
+ /* Generate counts for each bit length */
+ p = c;
+#define C0 *p++ = 0;
+#define C2 C0 C0 C0 C0
+#define C4 C2 C2 C2 C2
+ C4 /* clear c[]--assume BMAX+1 is 16 */
+ p = b; i = n;
+ do {
+ c[*p++]++; /* assume all entries <= BMAX */
+ } while (--i);
+ if (c[0] == n) /* null input--all zero length codes */
+ {
+ *t = (inflate_huft *)Z_NULL;
+ *m = 0;
+ return Z_OK;
+ }
+
+
+ /* Find minimum and maximum length, bound *m by those */
+ l = *m;
+ for (j = 1; j <= BMAX; j++)
+ if (c[j])
+ break;
+ k = j; /* minimum code length */
+ if ((uInt)l < j)
+ l = j;
+ for (i = BMAX; i; i--)
+ if (c[i])
+ break;
+ g = i; /* maximum code length */
+ if ((uInt)l > i)
+ l = i;
+ *m = l;
+
+
+ /* Adjust last length count to fill out codes, if needed */
+ for (y = 1 << j; j < i; j++, y <<= 1)
+ if ((y -= c[j]) < 0)
+ return Z_DATA_ERROR;
+ if ((y -= c[i]) < 0)
+ return Z_DATA_ERROR;
+ c[i] += y;
+
+
+ /* Generate starting offsets into the value table for each length */
+ x[1] = j = 0;
+ p = c + 1; xp = x + 2;
+ while (--i) { /* note that i == g from above */
+ *xp++ = (j += *p++);
+ }
+
+
+ /* Make a table of values in order of bit lengths */
+ p = b; i = 0;
+ do {
+ if ((j = *p++) != 0)
+ v[x[j]++] = i;
+ } while (++i < n);
+ n = x[g]; /* set n to length of v */
+
+
+ /* Generate the Huffman codes and for each, make the table entries */
+ x[0] = i = 0; /* first Huffman code is zero */
+ p = v; /* grab values in bit order */
+ h = -1; /* no tables yet--level -1 */
+ w = -l; /* bits decoded == (l * h) */
+ u[0] = (inflate_huft *)Z_NULL; /* just to keep compilers happy */
+ q = (inflate_huft *)Z_NULL; /* ditto */
+ z = 0; /* ditto */
+
+ /* go through the bit lengths (k already is bits in shortest code) */
+ for (; k <= g; k++)
+ {
+ a = c[k];
+ while (a--)
+ {
+ /* here i is the Huffman code of length k bits for value *p */
+ /* make tables up to required level */
+ while (k > w + l)
+ {
+ h++;
+ w += l; /* previous table always l bits */
+
+ /* compute minimum size table less than or equal to l bits */
+ z = g - w;
+ z = z > (uInt)l ? l : z; /* table size upper limit */
+ if ((f = 1 << (j = k - w)) > a + 1) /* try a k-w bit table */
+ { /* too few codes for k-w bit table */
+ f -= a + 1; /* deduct codes from patterns left */
+ xp = c + k;
+ if (j < z)
+ while (++j < z) /* try smaller tables up to z bits */
+ {
+ if ((f <<= 1) <= *++xp)
+ break; /* enough codes to use up j bits */
+ f -= *xp; /* else deduct codes from patterns */
+ }
+ }
+ z = 1 << j; /* table entries for j-bit table */
+
+ /* allocate and link in new table */
+ if ((q = (inflate_huft *)ZALLOC
+ (zs,z + 1,sizeof(inflate_huft))) == Z_NULL)
+ {
+ if (h)
+ inflate_trees_free(u[0], zs);
+ return Z_MEM_ERROR; /* not enough memory */
+ }
+#ifdef DEBUG_ZLIB
+ inflate_hufts += z + 1;
+#endif
+ *t = q + 1; /* link to list for huft_free() */
+ *(t = &(q->next)) = Z_NULL;
+ u[h] = ++q; /* table starts after link */
+
+ /* connect to last table, if there is one */
+ if (h)
+ {
+ x[h] = i; /* save pattern for backing up */
+ r.bits = (Byte)l; /* bits to dump before this table */
+ r.exop = (Byte)j; /* bits in this table */
+ r.next = q; /* pointer to this table */
+ j = i >> (w - l); /* (get around Turbo C bug) */
+ u[h-1][j] = r; /* connect to last table */
+ }
+ }
+
+ /* set up table entry in r */
+ r.bits = (Byte)(k - w);
+ if (p >= v + n)
+ r.exop = 128 + 64; /* out of values--invalid code */
+ else if (*p < s)
+ {
+ r.exop = (Byte)(*p < 256 ? 0 : 32 + 64); /* 256 is end-of-block */
+ r.base = *p++; /* simple code is just the value */
+ }
+ else
+ {
+ r.exop = (Byte)(e[*p - s] + 16 + 64);/* non-simple--look up in lists */
+ r.base = d[*p++ - s];
+ }
+
+ /* fill code-like entries with r */
+ f = 1 << (k - w);
+ for (j = i >> w; j < z; j += f)
+ q[j] = r;
+
+ /* backwards increment the k-bit code i */
+ for (j = 1 << (k - 1); i & j; j >>= 1)
+ i ^= j;
+ i ^= j;
+
+ /* backup over finished tables */
+ while ((i & ((1 << w) - 1)) != x[h])
+ {
+ h--; /* don't need to update q */
+ w -= l;
+ }
+ }
+ }
+
+
+ /* Return Z_BUF_ERROR if we were given an incomplete table */
+ return y != 0 && g != 1 ? Z_BUF_ERROR : Z_OK;
+}
+
+
+int inflate_trees_bits(c, bb, tb, z)
+uIntf *c; /* 19 code lengths */
+uIntf *bb; /* bits tree desired/actual depth */
+inflate_huft * FAR *tb; /* bits tree result */
+z_streamp z; /* for zfree function */
+{
+ int r;
+
+ r = huft_build(c, 19, 19, (uIntf*)Z_NULL, (uIntf*)Z_NULL, tb, bb, z);
+ if (r == Z_DATA_ERROR)
+ z->msg = (char*)"oversubscribed dynamic bit lengths tree";
+ else if (r == Z_BUF_ERROR || *bb == 0)
+ {
+ inflate_trees_free(*tb, z);
+ z->msg = (char*)"incomplete dynamic bit lengths tree";
+ r = Z_DATA_ERROR;
+ }
+ return r;
+}
+
+
+int inflate_trees_dynamic(nl, nd, c, bl, bd, tl, td, z)
+uInt nl; /* number of literal/length codes */
+uInt nd; /* number of distance codes */
+uIntf *c; /* that many (total) code lengths */
+uIntf *bl; /* literal desired/actual bit depth */
+uIntf *bd; /* distance desired/actual bit depth */
+inflate_huft * FAR *tl; /* literal/length tree result */
+inflate_huft * FAR *td; /* distance tree result */
+z_streamp z; /* for zfree function */
+{
+ int r;
+
+ /* build literal/length tree */
+ r = huft_build(c, nl, 257, cplens, cplext, tl, bl, z);
+ if (r != Z_OK || *bl == 0)
+ {
+ if (r == Z_DATA_ERROR)
+ z->msg = (char*)"oversubscribed literal/length tree";
+ else if (r != Z_MEM_ERROR)
+ {
+ inflate_trees_free(*tl, z);
+ z->msg = (char*)"incomplete literal/length tree";
+ r = Z_DATA_ERROR;
+ }
+ return r;
+ }
+
+ /* build distance tree */
+ r = huft_build(c + nl, nd, 0, cpdist, cpdext, td, bd, z);
+ if (r != Z_OK || (*bd == 0 && nl > 257))
+ {
+ if (r == Z_DATA_ERROR)
+ z->msg = (char*)"oversubscribed distance tree";
+ else if (r == Z_BUF_ERROR) {
+#ifdef PKZIP_BUG_WORKAROUND
+ r = Z_OK;
+ }
+#else
+ inflate_trees_free(*td, z);
+ z->msg = (char*)"incomplete distance tree";
+ r = Z_DATA_ERROR;
+ }
+ else if (r != Z_MEM_ERROR)
+ {
+ z->msg = (char*)"empty distance tree with lengths";
+ r = Z_DATA_ERROR;
+ }
+ inflate_trees_free(*tl, z);
+ return r;
+#endif
+ }
+
+ /* done */
+ return Z_OK;
+}
+
+
+/* build fixed tables only once--keep them here */
+local int fixed_built = 0;
+#define FIXEDH 530 /* number of hufts used by fixed tables */
+local inflate_huft fixed_mem[FIXEDH];
+local uInt fixed_bl;
+local uInt fixed_bd;
+local inflate_huft *fixed_tl;
+local inflate_huft *fixed_td;
+
+
+local voidpf falloc(q, n, s)
+voidpf q; /* opaque pointer */
+uInt n; /* number of items */
+uInt s; /* size of item */
+{
+ Assert(s == sizeof(inflate_huft) && n <= *(intf *)q,
+ "inflate_trees falloc overflow");
+ *(intf *)q -= n+s-s; /* s-s to avoid warning */
+ return (voidpf)(fixed_mem + *(intf *)q);
+}
+
+
+int inflate_trees_fixed(bl, bd, tl, td)
+uIntf *bl; /* literal desired/actual bit depth */
+uIntf *bd; /* distance desired/actual bit depth */
+inflate_huft * FAR *tl; /* literal/length tree result */
+inflate_huft * FAR *td; /* distance tree result */
+{
+ /* build fixed tables if not already (multiple overlapped executions ok) */
+ if (!fixed_built)
+ {
+ int k; /* temporary variable */
+ unsigned c[288]; /* length list for huft_build */
+ z_stream z; /* for falloc function */
+ int f = FIXEDH; /* number of hufts left in fixed_mem */
+
+ /* set up fake z_stream for memory routines */
+ z.zalloc = falloc;
+ z.zfree = Z_NULL;
+ z.opaque = (voidpf)&f;
+
+ /* literal table */
+ for (k = 0; k < 144; k++)
+ c[k] = 8;
+ for (; k < 256; k++)
+ c[k] = 9;
+ for (; k < 280; k++)
+ c[k] = 7;
+ for (; k < 288; k++)
+ c[k] = 8;
+ fixed_bl = 7;
+ huft_build(c, 288, 257, cplens, cplext, &fixed_tl, &fixed_bl, &z);
+
+ /* distance table */
+ for (k = 0; k < 30; k++)
+ c[k] = 5;
+ fixed_bd = 5;
+ huft_build(c, 30, 0, cpdist, cpdext, &fixed_td, &fixed_bd, &z);
+
+ /* done */
+ Assert(f == 0, "invalid build of fixed tables");
+ fixed_built = 1;
+ }
+ *bl = fixed_bl;
+ *bd = fixed_bd;
+ *tl = fixed_tl;
+ *td = fixed_td;
+ return Z_OK;
+}
+
+
+int inflate_trees_free(t, z)
+inflate_huft *t; /* table to free */
+z_streamp z; /* for zfree function */
+/* Free the malloc'ed tables built by huft_build(), which makes a linked
+ list of the tables it made, with the links in a dummy first entry of
+ each table. */
+{
+ register inflate_huft *p, *q, *r;
+
+ /* Reverse linked list */
+ p = Z_NULL;
+ q = t;
+ while (q != Z_NULL)
+ {
+ r = (q - 1)->next;
+ (q - 1)->next = p;
+ p = q;
+ q = r;
+ }
+ /* Go through linked list, freeing from the malloced (t[-1]) address. */
+ while (p != Z_NULL)
+ {
+ q = (--p)->next;
+ ZFREE(z,p);
+ p = q;
+ }
+ return Z_OK;
+}
+/* --- inftrees.c */
+
+/* +++ infcodes.c */
+/* infcodes.c -- process literals and length/distance pairs
+ * Copyright (C) 1995-1996 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* #include "zutil.h" */
+/* #include "inftrees.h" */
+/* #include "infblock.h" */
+/* #include "infcodes.h" */
+/* #include "infutil.h" */
+
+/* +++ inffast.h */
+/* inffast.h -- header to use inffast.c
+ * Copyright (C) 1995-1996 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+extern int inflate_fast OF((
+ uInt,
+ uInt,
+ inflate_huft *,
+ inflate_huft *,
+ inflate_blocks_statef *,
+ z_streamp ));
+/* --- inffast.h */
+
+/* simplify the use of the inflate_huft type with some defines */
+#define base more.Base
+#define next more.Next
+#define exop word.what.Exop
+#define bits word.what.Bits
+
+/* inflate codes private state */
+struct inflate_codes_state {
+
+ /* mode */
+ enum { /* waiting for "i:"=input, "o:"=output, "x:"=nothing */
+ START, /* x: set up for LEN */
+ LEN, /* i: get length/literal/eob next */
+ LENEXT, /* i: getting length extra (have base) */
+ DIST, /* i: get distance next */
+ DISTEXT, /* i: getting distance extra */
+ COPY, /* o: copying bytes in window, waiting for space */
+ LIT, /* o: got literal, waiting for output space */
+ WASH, /* o: got eob, possibly still output waiting */
+ END, /* x: got eob and all data flushed */
+ BADCODE} /* x: got error */
+ mode; /* current inflate_codes mode */
+
+ /* mode dependent information */
+ uInt len;
+ union {
+ struct {
+ inflate_huft *tree; /* pointer into tree */
+ uInt need; /* bits needed */
+ } code; /* if LEN or DIST, where in tree */
+ uInt lit; /* if LIT, literal */
+ struct {
+ uInt get; /* bits to get for extra */
+ uInt dist; /* distance back to copy from */
+ } copy; /* if EXT or COPY, where and how much */
+ } sub; /* submode */
+
+ /* mode independent information */
+ Byte lbits; /* ltree bits decoded per branch */
+ Byte dbits; /* dtree bits decoder per branch */
+ inflate_huft *ltree; /* literal/length/eob tree */
+ inflate_huft *dtree; /* distance tree */
+
+};
+
+
+inflate_codes_statef *inflate_codes_new(bl, bd, tl, td, z)
+uInt bl, bd;
+inflate_huft *tl;
+inflate_huft *td; /* need separate declaration for Borland C++ */
+z_streamp z;
+{
+ inflate_codes_statef *c;
+
+ if ((c = (inflate_codes_statef *)
+ ZALLOC(z,1,sizeof(struct inflate_codes_state))) != Z_NULL)
+ {
+ c->mode = START;
+ c->lbits = (Byte)bl;
+ c->dbits = (Byte)bd;
+ c->ltree = tl;
+ c->dtree = td;
+ Tracev((stderr, "inflate: codes new\n"));
+ }
+ return c;
+}
+
+
+int inflate_codes(s, z, r)
+inflate_blocks_statef *s;
+z_streamp z;
+int r;
+{
+ uInt j; /* temporary storage */
+ inflate_huft *t; /* temporary pointer */
+ uInt e; /* extra bits or operation */
+ uLong b; /* bit buffer */
+ uInt k; /* bits in bit buffer */
+ Bytef *p; /* input data pointer */
+ uInt n; /* bytes available there */
+ Bytef *q; /* output window write pointer */
+ uInt m; /* bytes to end of window or read pointer */
+ Bytef *f; /* pointer to copy strings from */
+ inflate_codes_statef *c = s->sub.decode.codes; /* codes state */
+
+ /* copy input/output information to locals (UPDATE macro restores) */
+ LOAD
+
+ /* process input and output based on current state */
+ while (1) switch (c->mode)
+ { /* waiting for "i:"=input, "o:"=output, "x:"=nothing */
+ case START: /* x: set up for LEN */
+#ifndef SLOW
+ if (m >= 258 && n >= 10)
+ {
+ UPDATE
+ r = inflate_fast(c->lbits, c->dbits, c->ltree, c->dtree, s, z);
+ LOAD
+ if (r != Z_OK)
+ {
+ c->mode = r == Z_STREAM_END ? WASH : BADCODE;
+ break;
+ }
+ }
+#endif /* !SLOW */
+ c->sub.code.need = c->lbits;
+ c->sub.code.tree = c->ltree;
+ c->mode = LEN;
+ case LEN: /* i: get length/literal/eob next */
+ j = c->sub.code.need;
+ NEEDBITS(j)
+ t = c->sub.code.tree + ((uInt)b & inflate_mask[j]);
+ DUMPBITS(t->bits)
+ e = (uInt)(t->exop);
+ if (e == 0) /* literal */
+ {
+ c->sub.lit = t->base;
+ Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ?
+ "inflate: literal '%c'\n" :
+ "inflate: literal 0x%02x\n", t->base));
+ c->mode = LIT;
+ break;
+ }
+ if (e & 16) /* length */
+ {
+ c->sub.copy.get = e & 15;
+ c->len = t->base;
+ c->mode = LENEXT;
+ break;
+ }
+ if ((e & 64) == 0) /* next table */
+ {
+ c->sub.code.need = e;
+ c->sub.code.tree = t->next;
+ break;
+ }
+ if (e & 32) /* end of block */
+ {
+ Tracevv((stderr, "inflate: end of block\n"));
+ c->mode = WASH;
+ break;
+ }
+ c->mode = BADCODE; /* invalid code */
+ z->msg = (char*)"invalid literal/length code";
+ r = Z_DATA_ERROR;
+ LEAVE
+ case LENEXT: /* i: getting length extra (have base) */
+ j = c->sub.copy.get;
+ NEEDBITS(j)
+ c->len += (uInt)b & inflate_mask[j];
+ DUMPBITS(j)
+ c->sub.code.need = c->dbits;
+ c->sub.code.tree = c->dtree;
+ Tracevv((stderr, "inflate: length %u\n", c->len));
+ c->mode = DIST;
+ case DIST: /* i: get distance next */
+ j = c->sub.code.need;
+ NEEDBITS(j)
+ t = c->sub.code.tree + ((uInt)b & inflate_mask[j]);
+ DUMPBITS(t->bits)
+ e = (uInt)(t->exop);
+ if (e & 16) /* distance */
+ {
+ c->sub.copy.get = e & 15;
+ c->sub.copy.dist = t->base;
+ c->mode = DISTEXT;
+ break;
+ }
+ if ((e & 64) == 0) /* next table */
+ {
+ c->sub.code.need = e;
+ c->sub.code.tree = t->next;
+ break;
+ }
+ c->mode = BADCODE; /* invalid code */
+ z->msg = (char*)"invalid distance code";
+ r = Z_DATA_ERROR;
+ LEAVE
+ case DISTEXT: /* i: getting distance extra */
+ j = c->sub.copy.get;
+ NEEDBITS(j)
+ c->sub.copy.dist += (uInt)b & inflate_mask[j];
+ DUMPBITS(j)
+ Tracevv((stderr, "inflate: distance %u\n", c->sub.copy.dist));
+ c->mode = COPY;
+ case COPY: /* o: copying bytes in window, waiting for space */
+#ifndef __TURBOC__ /* Turbo C bug for following expression */
+ f = (uInt)(q - s->window) < c->sub.copy.dist ?
+ s->end - (c->sub.copy.dist - (q - s->window)) :
+ q - c->sub.copy.dist;
+#else
+ f = q - c->sub.copy.dist;
+ if ((uInt)(q - s->window) < c->sub.copy.dist)
+ f = s->end - (c->sub.copy.dist - (uInt)(q - s->window));
+#endif
+ while (c->len)
+ {
+ NEEDOUT
+ OUTBYTE(*f++)
+ if (f == s->end)
+ f = s->window;
+ c->len--;
+ }
+ c->mode = START;
+ break;
+ case LIT: /* o: got literal, waiting for output space */
+ NEEDOUT
+ OUTBYTE(c->sub.lit)
+ c->mode = START;
+ break;
+ case WASH: /* o: got eob, possibly more output */
+ FLUSH
+ if (s->read != s->write)
+ LEAVE
+ c->mode = END;
+ case END:
+ r = Z_STREAM_END;
+ LEAVE
+ case BADCODE: /* x: got error */
+ r = Z_DATA_ERROR;
+ LEAVE
+ default:
+ r = Z_STREAM_ERROR;
+ LEAVE
+ }
+}
+
+
+void inflate_codes_free(c, z)
+inflate_codes_statef *c;
+z_streamp z;
+{
+ ZFREE(z, c);
+ Tracev((stderr, "inflate: codes free\n"));
+}
+/* --- infcodes.c */
+
+/* +++ infutil.c */
+/* inflate_util.c -- data and routines common to blocks and codes
+ * Copyright (C) 1995-1996 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* #include "zutil.h" */
+/* #include "infblock.h" */
+/* #include "inftrees.h" */
+/* #include "infcodes.h" */
+/* #include "infutil.h" */
+
+#ifndef NO_DUMMY_DECL
+struct inflate_codes_state {int dummy;}; /* for buggy compilers */
+#endif
+
+/* And'ing with mask[n] masks the lower n bits */
+uInt inflate_mask[17] = {
+ 0x0000,
+ 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff,
+ 0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff
+};
+
+
+/* copy as much as possible from the sliding window to the output area */
+int inflate_flush(s, z, r)
+inflate_blocks_statef *s;
+z_streamp z;
+int r;
+{
+ uInt n;
+ Bytef *p;
+ Bytef *q;
+
+ /* local copies of source and destination pointers */
+ p = z->next_out;
+ q = s->read;
+
+ /* compute number of bytes to copy as far as end of window */
+ n = (uInt)((q <= s->write ? s->write : s->end) - q);
+ if (n > z->avail_out) n = z->avail_out;
+ if (n && r == Z_BUF_ERROR) r = Z_OK;
+
+ /* update counters */
+ z->avail_out -= n;
+ z->total_out += n;
+
+ /* update check information */
+ if (s->checkfn != Z_NULL)
+ z->adler = s->check = (*s->checkfn)(s->check, q, n);
+
+ /* copy as far as end of window */
+ if (p != Z_NULL) {
+ zmemcpy(p, q, n);
+ p += n;
+ }
+ q += n;
+
+ /* see if more to copy at beginning of window */
+ if (q == s->end)
+ {
+ /* wrap pointers */
+ q = s->window;
+ if (s->write == s->end)
+ s->write = s->window;
+
+ /* compute bytes to copy */
+ n = (uInt)(s->write - q);
+ if (n > z->avail_out) n = z->avail_out;
+ if (n && r == Z_BUF_ERROR) r = Z_OK;
+
+ /* update counters */
+ z->avail_out -= n;
+ z->total_out += n;
+
+ /* update check information */
+ if (s->checkfn != Z_NULL)
+ z->adler = s->check = (*s->checkfn)(s->check, q, n);
+
+ /* copy */
+ if (p != Z_NULL) {
+ zmemcpy(p, q, n);
+ p += n;
+ }
+ q += n;
+ }
+
+ /* update pointers */
+ z->next_out = p;
+ s->read = q;
+
+ /* done */
+ return r;
+}
+/* --- infutil.c */
+
+/* +++ inffast.c */
+/* inffast.c -- process literals and length/distance pairs fast
+ * Copyright (C) 1995-1996 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* #include "zutil.h" */
+/* #include "inftrees.h" */
+/* #include "infblock.h" */
+/* #include "infcodes.h" */
+/* #include "infutil.h" */
+/* #include "inffast.h" */
+
+#ifndef NO_DUMMY_DECL
+struct inflate_codes_state {int dummy;}; /* for buggy compilers */
+#endif
+
+/* simplify the use of the inflate_huft type with some defines */
+#define base more.Base
+#define next more.Next
+#define exop word.what.Exop
+#define bits word.what.Bits
+
+/* macros for bit input with no checking and for returning unused bytes */
+#define GRABBITS(j) {while(k<(j)){b|=((uLong)NEXTBYTE)<<k;k+=8;}}
+#define UNGRAB {n+=(c=k>>3);p-=c;k&=7;}
+
+/* Called with number of bytes left to write in window at least 258
+ (the maximum string length) and number of input bytes available
+ at least ten. The ten bytes are six bytes for the longest length/
+ distance pair plus four bytes for overloading the bit buffer. */
+
+int inflate_fast(bl, bd, tl, td, s, z)
+uInt bl, bd;
+inflate_huft *tl;
+inflate_huft *td; /* need separate declaration for Borland C++ */
+inflate_blocks_statef *s;
+z_streamp z;
+{
+ inflate_huft *t; /* temporary pointer */
+ uInt e; /* extra bits or operation */
+ uLong b; /* bit buffer */
+ uInt k; /* bits in bit buffer */
+ Bytef *p; /* input data pointer */
+ uInt n; /* bytes available there */
+ Bytef *q; /* output window write pointer */
+ uInt m; /* bytes to end of window or read pointer */
+ uInt ml; /* mask for literal/length tree */
+ uInt md; /* mask for distance tree */
+ uInt c; /* bytes to copy */
+ uInt d; /* distance back to copy from */
+ Bytef *r; /* copy source pointer */
+
+ /* load input, output, bit values */
+ LOAD
+
+ /* initialize masks */
+ ml = inflate_mask[bl];
+ md = inflate_mask[bd];
+
+ /* do until not enough input or output space for fast loop */
+ do { /* assume called with m >= 258 && n >= 10 */
+ /* get literal/length code */
+ GRABBITS(20) /* max bits for literal/length code */
+ if ((e = (t = tl + ((uInt)b & ml))->exop) == 0)
+ {
+ DUMPBITS(t->bits)
+ Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ?
+ "inflate: * literal '%c'\n" :
+ "inflate: * literal 0x%02x\n", t->base));
+ *q++ = (Byte)t->base;
+ m--;
+ continue;
+ }
+ do {
+ DUMPBITS(t->bits)
+ if (e & 16)
+ {
+ /* get extra bits for length */
+ e &= 15;
+ c = t->base + ((uInt)b & inflate_mask[e]);
+ DUMPBITS(e)
+ Tracevv((stderr, "inflate: * length %u\n", c));
+
+ /* decode distance base of block to copy */
+ GRABBITS(15); /* max bits for distance code */
+ e = (t = td + ((uInt)b & md))->exop;
+ do {
+ DUMPBITS(t->bits)
+ if (e & 16)
+ {
+ /* get extra bits to add to distance base */
+ e &= 15;
+ GRABBITS(e) /* get extra bits (up to 13) */
+ d = t->base + ((uInt)b & inflate_mask[e]);
+ DUMPBITS(e)
+ Tracevv((stderr, "inflate: * distance %u\n", d));
+
+ /* do the copy */
+ m -= c;
+ if ((uInt)(q - s->window) >= d) /* offset before dest */
+ { /* just copy */
+ r = q - d;
+ *q++ = *r++; c--; /* minimum count is three, */
+ *q++ = *r++; c--; /* so unroll loop a little */
+ }
+ else /* else offset after destination */
+ {
+ e = d - (uInt)(q - s->window); /* bytes from offset to end */
+ r = s->end - e; /* pointer to offset */
+ if (c > e) /* if source crosses, */
+ {
+ c -= e; /* copy to end of window */
+ do {
+ *q++ = *r++;
+ } while (--e);
+ r = s->window; /* copy rest from start of window */
+ }
+ }
+ do { /* copy all or what's left */
+ *q++ = *r++;
+ } while (--c);
+ break;
+ }
+ else if ((e & 64) == 0)
+ e = (t = t->next + ((uInt)b & inflate_mask[e]))->exop;
+ else
+ {
+ z->msg = (char*)"invalid distance code";
+ UNGRAB
+ UPDATE
+ return Z_DATA_ERROR;
+ }
+ } while (1);
+ break;
+ }
+ if ((e & 64) == 0)
+ {
+ if ((e = (t = t->next + ((uInt)b & inflate_mask[e]))->exop) == 0)
+ {
+ DUMPBITS(t->bits)
+ Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ?
+ "inflate: * literal '%c'\n" :
+ "inflate: * literal 0x%02x\n", t->base));
+ *q++ = (Byte)t->base;
+ m--;
+ break;
+ }
+ }
+ else if (e & 32)
+ {
+ Tracevv((stderr, "inflate: * end of block\n"));
+ UNGRAB
+ UPDATE
+ return Z_STREAM_END;
+ }
+ else
+ {
+ z->msg = (char*)"invalid literal/length code";
+ UNGRAB
+ UPDATE
+ return Z_DATA_ERROR;
+ }
+ } while (1);
+ } while (m >= 258 && n >= 10);
+
+ /* not enough input or output--restore pointers and return */
+ UNGRAB
+ UPDATE
+ return Z_OK;
+}
+/* --- inffast.c */
+
+/* +++ zutil.c */
+/* zutil.c -- target dependent utility functions for the compression library
+ * Copyright (C) 1995-1996 Jean-loup Gailly.
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* From: zutil.c,v 1.17 1996/07/24 13:41:12 me Exp $ */
+
+#ifdef DEBUG_ZLIB
+#include <stdio.h>
+#endif
+
+/* #include "zutil.h" */
+
+#ifndef NO_DUMMY_DECL
+struct internal_state {int dummy;}; /* for buggy compilers */
+#endif
+
+#ifndef STDC
+extern void exit OF((int));
+#endif
+
+static const char *z_errmsg[10] = {
+"need dictionary", /* Z_NEED_DICT 2 */
+"stream end", /* Z_STREAM_END 1 */
+"", /* Z_OK 0 */
+"file error", /* Z_ERRNO (-1) */
+"stream error", /* Z_STREAM_ERROR (-2) */
+"data error", /* Z_DATA_ERROR (-3) */
+"insufficient memory", /* Z_MEM_ERROR (-4) */
+"buffer error", /* Z_BUF_ERROR (-5) */
+"incompatible version",/* Z_VERSION_ERROR (-6) */
+""};
+
+
+const char *zlibVersion()
+{
+ return ZLIB_VERSION;
+}
+
+#ifdef DEBUG_ZLIB
+void z_error (m)
+ char *m;
+{
+ fprintf(stderr, "%s\n", m);
+ exit(1);
+}
+#endif
+
+#ifndef HAVE_MEMCPY
+
+void zmemcpy(dest, source, len)
+ Bytef* dest;
+ Bytef* source;
+ uInt len;
+{
+ if (len == 0) return;
+ do {
+ *dest++ = *source++; /* ??? to be unrolled */
+ } while (--len != 0);
+}
+
+int zmemcmp(s1, s2, len)
+ Bytef* s1;
+ Bytef* s2;
+ uInt len;
+{
+ uInt j;
+
+ for (j = 0; j < len; j++) {
+ if (s1[j] != s2[j]) return 2*(s1[j] > s2[j])-1;
+ }
+ return 0;
+}
+
+void zmemzero(dest, len)
+ Bytef* dest;
+ uInt len;
+{
+ if (len == 0) return;
+ do {
+ *dest++ = 0; /* ??? to be unrolled */
+ } while (--len != 0);
+}
+#endif
+
+#ifdef __TURBOC__
+#if (defined( __BORLANDC__) || !defined(SMALL_MEDIUM)) && !defined(__32BIT__)
+/* Small and medium model in Turbo C are for now limited to near allocation
+ * with reduced MAX_WBITS and MAX_MEM_LEVEL
+ */
+# define MY_ZCALLOC
+
+/* Turbo C malloc() does not allow dynamic allocation of 64K bytes
+ * and farmalloc(64K) returns a pointer with an offset of 8, so we
+ * must fix the pointer. Warning: the pointer must be put back to its
+ * original form in order to free it, use zcfree().
+ */
+
+#define MAX_PTR 10
+/* 10*64K = 640K */
+
+local int next_ptr = 0;
+
+typedef struct ptr_table_s {
+ voidpf org_ptr;
+ voidpf new_ptr;
+} ptr_table;
+
+local ptr_table table[MAX_PTR];
+/* This table is used to remember the original form of pointers
+ * to large buffers (64K). Such pointers are normalized with a zero offset.
+ * Since MSDOS is not a preemptive multitasking OS, this table is not
+ * protected from concurrent access. This hack doesn't work anyway on
+ * a protected system like OS/2. Use Microsoft C instead.
+ */
+
+voidpf zcalloc (voidpf opaque, unsigned items, unsigned size)
+{
+ voidpf buf = opaque; /* just to make some compilers happy */
+ ulg bsize = (ulg)items*size;
+
+ /* If we allocate less than 65520 bytes, we assume that farmalloc
+ * will return a usable pointer which doesn't have to be normalized.
+ */
+ if (bsize < 65520L) {
+ buf = farmalloc(bsize);
+ if (*(ush*)&buf != 0) return buf;
+ } else {
+ buf = farmalloc(bsize + 16L);
+ }
+ if (buf == NULL || next_ptr >= MAX_PTR) return NULL;
+ table[next_ptr].org_ptr = buf;
+
+ /* Normalize the pointer to seg:0 */
+ *((ush*)&buf+1) += ((ush)((uch*)buf-0) + 15) >> 4;
+ *(ush*)&buf = 0;
+ table[next_ptr++].new_ptr = buf;
+ return buf;
+}
+
+void zcfree (voidpf opaque, voidpf ptr)
+{
+ int n;
+ if (*(ush*)&ptr != 0) { /* object < 64K */
+ farfree(ptr);
+ return;
+ }
+ /* Find the original pointer */
+ for (n = 0; n < next_ptr; n++) {
+ if (ptr != table[n].new_ptr) continue;
+
+ farfree(table[n].org_ptr);
+ while (++n < next_ptr) {
+ table[n-1] = table[n];
+ }
+ next_ptr--;
+ return;
+ }
+ ptr = opaque; /* just to make some compilers happy */
+ Assert(0, "zcfree: ptr not found");
+}
+#endif
+#endif /* __TURBOC__ */
+
+
+#if defined(M_I86) && !defined(__32BIT__)
+/* Microsoft C in 16-bit mode */
+
+# define MY_ZCALLOC
+
+#if (!defined(_MSC_VER) || (_MSC_VER < 600))
+# define _halloc halloc
+# define _hfree hfree
+#endif
+
+voidpf zcalloc (voidpf opaque, unsigned items, unsigned size)
+{
+ if (opaque) opaque = 0; /* to make compiler happy */
+ return _halloc((long)items, size);
+}
+
+void zcfree (voidpf opaque, voidpf ptr)
+{
+ if (opaque) opaque = 0; /* to make compiler happy */
+ _hfree(ptr);
+}
+
+#endif /* MSC */
+
+
+#ifndef MY_ZCALLOC /* Any system without a special alloc function */
+
+#ifndef STDC
+extern voidp calloc OF((uInt items, uInt size));
+extern void free OF((voidpf ptr));
+#endif
+
+voidpf zcalloc (opaque, items, size)
+ voidpf opaque;
+ unsigned items;
+ unsigned size;
+{
+ if (opaque) items += size - size; /* make compiler happy */
+ return (voidpf)calloc(items, size);
+}
+
+void zcfree (opaque, ptr)
+ voidpf opaque;
+ voidpf ptr;
+{
+ free(ptr);
+ if (opaque) return; /* make compiler happy */
+}
+
+#endif /* MY_ZCALLOC */
+/* --- zutil.c */
+
+/* +++ adler32.c */
+/* adler32.c -- compute the Adler-32 checksum of a data stream
+ * Copyright (C) 1995-1996 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* From: adler32.c,v 1.10 1996/05/22 11:52:18 me Exp $ */
+
+/* #include "zlib.h" */
+
+#define BASE 65521L /* largest prime smaller than 65536 */
+#define NMAX 5552
+/* NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 */
+
+#define DO1(buf,i) {s1 += buf[i]; s2 += s1;}
+#define DO2(buf,i) DO1(buf,i); DO1(buf,i+1);
+#define DO4(buf,i) DO2(buf,i); DO2(buf,i+2);
+#define DO8(buf,i) DO4(buf,i); DO4(buf,i+4);
+#define DO16(buf) DO8(buf,0); DO8(buf,8);
+
+/* ========================================================================= */
+uLong adler32(adler, buf, len)
+ uLong adler;
+ const Bytef *buf;
+ uInt len;
+{
+ unsigned long s1 = adler & 0xffff;
+ unsigned long s2 = (adler >> 16) & 0xffff;
+ int k;
+
+ if (buf == Z_NULL) return 1L;
+
+ while (len > 0) {
+ k = len < NMAX ? len : NMAX;
+ len -= k;
+ while (k >= 16) {
+ DO16(buf);
+ buf += 16;
+ k -= 16;
+ }
+ if (k != 0) do {
+ s1 += *buf++;
+ s2 += s1;
+ } while (--k);
+ s1 %= BASE;
+ s2 %= BASE;
+ }
+ return (s2 << 16) | s1;
+}
+/* --- adler32.c */
diff --git a/sys/net/zlib.h b/sys/net/zlib.h
new file mode 100644
index 0000000..1f3ee50
--- /dev/null
+++ b/sys/net/zlib.h
@@ -0,0 +1,1013 @@
+/* $FreeBSD$ */
+
+/*
+ * This file is derived from zlib.h and zconf.h from the zlib-1.0.4
+ * distribution by Jean-loup Gailly and Mark Adler, with some additions
+ * by Paul Mackerras to aid in implementing Deflate compression and
+ * decompression for PPP packets.
+ */
+
+/*
+ * ==FILEVERSION 971127==
+ *
+ * This marker is used by the Linux installation script to determine
+ * whether an up-to-date version of this file is already installed.
+ */
+
+
+/* +++ zlib.h */
+/* zlib.h -- interface of the 'zlib' general purpose compression library
+ version 1.0.4, Jul 24th, 1996.
+
+ Copyright (C) 1995-1996 Jean-loup Gailly and Mark Adler
+
+ This software is provided 'as-is', without any express or implied
+ warranty. In no event will the authors be held liable for any damages
+ arising from the use of this software.
+
+ Permission is granted to anyone to use this software for any purpose,
+ including commercial applications, and to alter it and redistribute it
+ freely, subject to the following restrictions:
+
+ 1. The origin of this software must not be misrepresented; you must not
+ claim that you wrote the original software. If you use this software
+ in a product, an acknowledgment in the product documentation would be
+ appreciated but is not required.
+ 2. Altered source versions must be plainly marked as such, and must not be
+ misrepresented as being the original software.
+ 3. This notice may not be removed or altered from any source distribution.
+
+ Jean-loup Gailly Mark Adler
+ gzip@prep.ai.mit.edu madler@alumni.caltech.edu
+
+
+ The data format used by the zlib library is described by RFCs (Request for
+ Comments) 1950 to 1952 in the files ftp://ds.internic.net/rfc/rfc1950.txt
+ (zlib format), rfc1951.txt (deflate format) and rfc1952.txt (gzip format).
+*/
+
+#ifndef _ZLIB_H
+#define _ZLIB_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/* +++ zconf.h */
+/* zconf.h -- configuration of the zlib compression library
+ * Copyright (C) 1995-1996 Jean-loup Gailly.
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* From: zconf.h,v 1.20 1996/07/02 15:09:28 me Exp $ */
+
+#ifndef _ZCONF_H
+#define _ZCONF_H
+
+/*
+ * If you *really* need a unique prefix for all types and library functions,
+ * compile with -DZ_PREFIX. The "standard" zlib should be compiled without it.
+ */
+#ifdef Z_PREFIX
+# define deflateInit_ z_deflateInit_
+# define deflate z_deflate
+# define deflateEnd z_deflateEnd
+# define inflateInit_ z_inflateInit_
+# define inflate z_inflate
+# define inflateEnd z_inflateEnd
+# define deflateInit2_ z_deflateInit2_
+# define deflateSetDictionary z_deflateSetDictionary
+# define deflateCopy z_deflateCopy
+# define deflateReset z_deflateReset
+# define deflateParams z_deflateParams
+# define inflateInit2_ z_inflateInit2_
+# define inflateSetDictionary z_inflateSetDictionary
+# define inflateSync z_inflateSync
+# define inflateReset z_inflateReset
+# define compress z_compress
+# define uncompress z_uncompress
+# define adler32 z_adler32
+# define crc32 z_crc32
+# define get_crc_table z_get_crc_table
+
+# define Byte z_Byte
+# define uInt z_uInt
+# define uLong z_uLong
+# define Bytef z_Bytef
+# define charf z_charf
+# define intf z_intf
+# define uIntf z_uIntf
+# define uLongf z_uLongf
+# define voidpf z_voidpf
+# define voidp z_voidp
+#endif
+
+#if (defined(_WIN32) || defined(__WIN32__)) && !defined(WIN32)
+# define WIN32
+#endif
+#if defined(__GNUC__) || defined(WIN32) || defined(__386__) || defined(i386)
+# ifndef __32BIT__
+# define __32BIT__
+# endif
+#endif
+#if defined(__MSDOS__) && !defined(MSDOS)
+# define MSDOS
+#endif
+
+/*
+ * Compile with -DMAXSEG_64K if the alloc function cannot allocate more
+ * than 64k bytes at a time (needed on systems with 16-bit int).
+ */
+#if defined(MSDOS) && !defined(__32BIT__)
+# define MAXSEG_64K
+#endif
+#ifdef MSDOS
+# define UNALIGNED_OK
+#endif
+
+#if (defined(MSDOS) || defined(_WINDOWS) || defined(WIN32)) && !defined(STDC)
+# define STDC
+#endif
+#if (defined(__STDC__) || defined(__cplusplus)) && !defined(STDC)
+# define STDC
+#endif
+
+#ifndef STDC
+# ifndef const /* cannot use !defined(STDC) && !defined(const) on Mac */
+# define const
+# endif
+#endif
+
+/* Some Mac compilers merge all .h files incorrectly: */
+#if defined(__MWERKS__) || defined(applec) ||defined(THINK_C) ||defined(__SC__)
+# define NO_DUMMY_DECL
+#endif
+
+/* Maximum value for memLevel in deflateInit2 */
+#ifndef MAX_MEM_LEVEL
+# ifdef MAXSEG_64K
+# define MAX_MEM_LEVEL 8
+# else
+# define MAX_MEM_LEVEL 9
+# endif
+#endif
+
+/* Maximum value for windowBits in deflateInit2 and inflateInit2 */
+#ifndef MAX_WBITS
+# define MAX_WBITS 15 /* 32K LZ77 window */
+#endif
+
+/* The memory requirements for deflate are (in bytes):
+ 1 << (windowBits+2) + 1 << (memLevel+9)
+ that is: 128K for windowBits=15 + 128K for memLevel = 8 (default values)
+ plus a few kilobytes for small objects. For example, if you want to reduce
+ the default memory requirements from 256K to 128K, compile with
+ make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7"
+ Of course this will generally degrade compression (there's no free lunch).
+
+ The memory requirements for inflate are (in bytes) 1 << windowBits
+ that is, 32K for windowBits=15 (default value) plus a few kilobytes
+ for small objects.
+*/
+
+ /* Type declarations */
+
+#ifndef OF /* function prototypes */
+# ifdef STDC
+# define OF(args) args
+# else
+# define OF(args) ()
+# endif
+#endif
+
+/* The following definitions for FAR are needed only for MSDOS mixed
+ * model programming (small or medium model with some far allocations).
+ * This was tested only with MSC; for other MSDOS compilers you may have
+ * to define NO_MEMCPY in zutil.h. If you don't need the mixed model,
+ * just define FAR to be empty.
+ */
+#if (defined(M_I86SM) || defined(M_I86MM)) && !defined(__32BIT__)
+ /* MSC small or medium model */
+# define SMALL_MEDIUM
+# ifdef _MSC_VER
+# define FAR __far
+# else
+# define FAR far
+# endif
+#endif
+#if defined(__BORLANDC__) && (defined(__SMALL__) || defined(__MEDIUM__))
+# ifndef __32BIT__
+# define SMALL_MEDIUM
+# define FAR __far
+# endif
+#endif
+#ifndef FAR
+# define FAR
+#endif
+
+typedef unsigned char Byte; /* 8 bits */
+typedef unsigned int uInt; /* 16 bits or more */
+typedef unsigned long uLong; /* 32 bits or more */
+
+#if defined(__BORLANDC__) && defined(SMALL_MEDIUM)
+ /* Borland C/C++ ignores FAR inside typedef */
+# define Bytef Byte FAR
+#else
+ typedef Byte FAR Bytef;
+#endif
+typedef char FAR charf;
+typedef int FAR intf;
+typedef uInt FAR uIntf;
+typedef uLong FAR uLongf;
+
+#ifdef STDC
+ typedef void FAR *voidpf;
+ typedef void *voidp;
+#else
+ typedef Byte FAR *voidpf;
+ typedef Byte *voidp;
+#endif
+
+
+/* Compile with -DZLIB_DLL for Windows DLL support */
+#if (defined(_WINDOWS) || defined(WINDOWS)) && defined(ZLIB_DLL)
+# include <windows.h>
+# define EXPORT WINAPI
+#else
+# define EXPORT
+#endif
+
+#endif /* _ZCONF_H */
+/* --- zconf.h */
+
+#define ZLIB_VERSION "1.0.4P"
+
+/*
+ The 'zlib' compression library provides in-memory compression and
+ decompression functions, including integrity checks of the uncompressed
+ data. This version of the library supports only one compression method
+ (deflation) but other algorithms may be added later and will have the same
+ stream interface.
+
+ For compression the application must provide the output buffer and
+ may optionally provide the input buffer for optimization. For decompression,
+ the application must provide the input buffer and may optionally provide
+ the output buffer for optimization.
+
+ Compression can be done in a single step if the buffers are large
+ enough (for example if an input file is mmap'ed), or can be done by
+ repeated calls of the compression function. In the latter case, the
+ application must provide more input and/or consume the output
+ (providing more output space) before each call.
+
+ The library does not install any signal handler. It is recommended to
+ add at least a handler for SIGSEGV when decompressing; the library checks
+ the consistency of the input data whenever possible but may go nuts
+ for some forms of corrupted input.
+*/
+
+typedef voidpf (*alloc_func) OF((voidpf opaque, uInt items, uInt size));
+typedef void (*free_func) OF((voidpf opaque, voidpf address));
+
+struct internal_state;
+
+typedef struct z_stream_s {
+ Bytef *next_in; /* next input byte */
+ uInt avail_in; /* number of bytes available at next_in */
+ uLong total_in; /* total nb of input bytes read so far */
+
+ Bytef *next_out; /* next output byte should be put there */
+ uInt avail_out; /* remaining free space at next_out */
+ uLong total_out; /* total nb of bytes output so far */
+
+ const char *msg; /* last error message, NULL if no error */
+ struct internal_state FAR *state; /* not visible by applications */
+
+ alloc_func zalloc; /* used to allocate the internal state */
+ free_func zfree; /* used to free the internal state */
+ voidpf opaque; /* private data object passed to zalloc and zfree */
+
+ int data_type; /* best guess about the data type: ascii or binary */
+ uLong adler; /* adler32 value of the uncompressed data */
+ uLong reserved; /* reserved for future use */
+} z_stream;
+
+typedef z_stream FAR *z_streamp;
+
+/*
+ The application must update next_in and avail_in when avail_in has
+ dropped to zero. It must update next_out and avail_out when avail_out
+ has dropped to zero. The application must initialize zalloc, zfree and
+ opaque before calling the init function. All other fields are set by the
+ compression library and must not be updated by the application.
+
+ The opaque value provided by the application will be passed as the first
+ parameter for calls of zalloc and zfree. This can be useful for custom
+ memory management. The compression library attaches no meaning to the
+ opaque value.
+
+ zalloc must return Z_NULL if there is not enough memory for the object.
+ On 16-bit systems, the functions zalloc and zfree must be able to allocate
+ exactly 65536 bytes, but will not be required to allocate more than this
+ if the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS,
+ pointers returned by zalloc for objects of exactly 65536 bytes *must*
+ have their offset normalized to zero. The default allocation function
+ provided by this library ensures this (see zutil.c). To reduce memory
+ requirements and avoid any allocation of 64K objects, at the expense of
+ compression ratio, compile the library with -DMAX_WBITS=14 (see zconf.h).
+
+ The fields total_in and total_out can be used for statistics or
+ progress reports. After compression, total_in holds the total size of
+ the uncompressed data and may be saved for use in the decompressor
+ (particularly if the decompressor wants to decompress everything in
+ a single step).
+*/
+
+ /* constants */
+
+#define Z_NO_FLUSH 0
+#define Z_PARTIAL_FLUSH 1
+#define Z_PACKET_FLUSH 2
+#define Z_SYNC_FLUSH 3
+#define Z_FULL_FLUSH 4
+#define Z_FINISH 5
+/* Allowed flush values; see deflate() below for details */
+
+#define Z_OK 0
+#define Z_STREAM_END 1
+#define Z_NEED_DICT 2
+#define Z_ERRNO (-1)
+#define Z_STREAM_ERROR (-2)
+#define Z_DATA_ERROR (-3)
+#define Z_MEM_ERROR (-4)
+#define Z_BUF_ERROR (-5)
+#define Z_VERSION_ERROR (-6)
+/* Return codes for the compression/decompression functions. Negative
+ * values are errors, positive values are used for special but normal events.
+ */
+
+#define Z_NO_COMPRESSION 0
+#define Z_BEST_SPEED 1
+#define Z_BEST_COMPRESSION 9
+#define Z_DEFAULT_COMPRESSION (-1)
+/* compression levels */
+
+#define Z_FILTERED 1
+#define Z_HUFFMAN_ONLY 2
+#define Z_DEFAULT_STRATEGY 0
+/* compression strategy; see deflateInit2() below for details */
+
+#define Z_BINARY 0
+#define Z_ASCII 1
+#define Z_UNKNOWN 2
+/* Possible values of the data_type field */
+
+#define Z_DEFLATED 8
+/* The deflate compression method (the only one supported in this version) */
+
+#define Z_NULL 0 /* for initializing zalloc, zfree, opaque */
+
+#define zlib_version zlibVersion()
+/* for compatibility with versions < 1.0.2 */
+
+ /* basic functions */
+
+extern const char * EXPORT zlibVersion OF((void));
+/* The application can compare zlibVersion and ZLIB_VERSION for consistency.
+ If the first character differs, the library code actually used is
+ not compatible with the zlib.h header file used by the application.
+ This check is automatically made by deflateInit and inflateInit.
+ */
+
+/*
+extern int EXPORT deflateInit OF((z_streamp strm, int level));
+
+ Initializes the internal stream state for compression. The fields
+ zalloc, zfree and opaque must be initialized before by the caller.
+ If zalloc and zfree are set to Z_NULL, deflateInit updates them to
+ use default allocation functions.
+
+ The compression level must be Z_DEFAULT_COMPRESSION, or between 0 and 9:
+ 1 gives best speed, 9 gives best compression, 0 gives no compression at
+ all (the input data is simply copied a block at a time).
+ Z_DEFAULT_COMPRESSION requests a default compromise between speed and
+ compression (currently equivalent to level 6).
+
+ deflateInit returns Z_OK if success, Z_MEM_ERROR if there was not
+ enough memory, Z_STREAM_ERROR if level is not a valid compression level,
+ Z_VERSION_ERROR if the zlib library version (zlib_version) is incompatible
+ with the version assumed by the caller (ZLIB_VERSION).
+ msg is set to null if there is no error message. deflateInit does not
+ perform any compression: this will be done by deflate().
+*/
+
+
+extern int EXPORT deflate OF((z_streamp strm, int flush));
+/*
+ Performs one or both of the following actions:
+
+ - Compress more input starting at next_in and update next_in and avail_in
+ accordingly. If not all input can be processed (because there is not
+ enough room in the output buffer), next_in and avail_in are updated and
+ processing will resume at this point for the next call of deflate().
+
+ - Provide more output starting at next_out and update next_out and avail_out
+ accordingly. This action is forced if the parameter flush is non zero.
+ Forcing flush frequently degrades the compression ratio, so this parameter
+ should be set only when necessary (in interactive applications).
+ Some output may be provided even if flush is not set.
+
+ Before the call of deflate(), the application should ensure that at least
+ one of the actions is possible, by providing more input and/or consuming
+ more output, and updating avail_in or avail_out accordingly; avail_out
+ should never be zero before the call. The application can consume the
+ compressed output when it wants, for example when the output buffer is full
+ (avail_out == 0), or after each call of deflate(). If deflate returns Z_OK
+ and with zero avail_out, it must be called again after making room in the
+ output buffer because there might be more output pending.
+
+ If the parameter flush is set to Z_PARTIAL_FLUSH, the current compression
+ block is terminated and flushed to the output buffer so that the
+ decompressor can get all input data available so far. For method 9, a future
+ variant on method 8, the current block will be flushed but not terminated.
+ Z_SYNC_FLUSH has the same effect as partial flush except that the compressed
+ output is byte aligned (the compressor can clear its internal bit buffer)
+ and the current block is always terminated; this can be useful if the
+ compressor has to be restarted from scratch after an interruption (in which
+ case the internal state of the compressor may be lost).
+ If flush is set to Z_FULL_FLUSH, the compression block is terminated, a
+ special marker is output and the compression dictionary is discarded; this
+ is useful to allow the decompressor to synchronize if one compressed block
+ has been damaged (see inflateSync below). Flushing degrades compression and
+ so should be used only when necessary. Using Z_FULL_FLUSH too often can
+ seriously degrade the compression. If deflate returns with avail_out == 0,
+ this function must be called again with the same value of the flush
+ parameter and more output space (updated avail_out), until the flush is
+ complete (deflate returns with non-zero avail_out).
+
+ If the parameter flush is set to Z_PACKET_FLUSH, the compression
+ block is terminated, and a zero-length stored block is output,
+ omitting the length bytes (the effect of this is that the 3-bit type
+ code 000 for a stored block is output, and the output is then
+ byte-aligned). This is designed for use at the end of a PPP packet.
+
+ If the parameter flush is set to Z_FINISH, pending input is processed,
+ pending output is flushed and deflate returns with Z_STREAM_END if there
+ was enough output space; if deflate returns with Z_OK, this function must be
+ called again with Z_FINISH and more output space (updated avail_out) but no
+ more input data, until it returns with Z_STREAM_END or an error. After
+ deflate has returned Z_STREAM_END, the only possible operations on the
+ stream are deflateReset or deflateEnd.
+
+ Z_FINISH can be used immediately after deflateInit if all the compression
+ is to be done in a single step. In this case, avail_out must be at least
+ 0.1% larger than avail_in plus 12 bytes. If deflate does not return
+ Z_STREAM_END, then it must be called again as described above.
+
+ deflate() may update data_type if it can make a good guess about
+ the input data type (Z_ASCII or Z_BINARY). In doubt, the data is considered
+ binary. This field is only for information purposes and does not affect
+ the compression algorithm in any manner.
+
+ deflate() returns Z_OK if some progress has been made (more input
+ processed or more output produced), Z_STREAM_END if all input has been
+ consumed and all output has been produced (only when flush is set to
+ Z_FINISH), Z_STREAM_ERROR if the stream state was inconsistent (for example
+ if next_in or next_out was NULL), Z_BUF_ERROR if no progress is possible.
+*/
+
+
+extern int EXPORT deflateEnd OF((z_streamp strm));
+/*
+ All dynamically allocated data structures for this stream are freed.
+ This function discards any unprocessed input and does not flush any
+ pending output.
+
+ deflateEnd returns Z_OK if success, Z_STREAM_ERROR if the
+ stream state was inconsistent, Z_DATA_ERROR if the stream was freed
+ prematurely (some input or output was discarded). In the error case,
+ msg may be set but then points to a static string (which must not be
+ deallocated).
+*/
+
+
+/*
+extern int EXPORT inflateInit OF((z_streamp strm));
+
+ Initializes the internal stream state for decompression. The fields
+ zalloc, zfree and opaque must be initialized before by the caller. If
+ zalloc and zfree are set to Z_NULL, inflateInit updates them to use default
+ allocation functions.
+
+ inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not
+ enough memory, Z_VERSION_ERROR if the zlib library version is incompatible
+ with the version assumed by the caller. msg is set to null if there is no
+ error message. inflateInit does not perform any decompression: this will be
+ done by inflate().
+*/
+
+#if defined(__FreeBSD__) && (defined(KERNEL) || defined(_KERNEL))
+#define inflate inflate_ppp /* FreeBSD already has an inflate :-( */
+#endif
+
+extern int EXPORT inflate OF((z_streamp strm, int flush));
+/*
+ Performs one or both of the following actions:
+
+ - Decompress more input starting at next_in and update next_in and avail_in
+ accordingly. If not all input can be processed (because there is not
+ enough room in the output buffer), next_in is updated and processing
+ will resume at this point for the next call of inflate().
+
+ - Provide more output starting at next_out and update next_out and avail_out
+ accordingly. inflate() provides as much output as possible, until there
+ is no more input data or no more space in the output buffer (see below
+ about the flush parameter).
+
+ Before the call of inflate(), the application should ensure that at least
+ one of the actions is possible, by providing more input and/or consuming
+ more output, and updating the next_* and avail_* values accordingly.
+ The application can consume the uncompressed output when it wants, for
+ example when the output buffer is full (avail_out == 0), or after each
+ call of inflate(). If inflate returns Z_OK and with zero avail_out, it
+ must be called again after making room in the output buffer because there
+ might be more output pending.
+
+ If the parameter flush is set to Z_PARTIAL_FLUSH or Z_PACKET_FLUSH,
+ inflate flushes as much output as possible to the output buffer. The
+ flushing behavior of inflate is not specified for values of the flush
+ parameter other than Z_PARTIAL_FLUSH, Z_PACKET_FLUSH or Z_FINISH, but the
+ current implementation actually flushes as much output as possible
+ anyway. For Z_PACKET_FLUSH, inflate checks that once all the input data
+ has been consumed, it is expecting to see the length field of a stored
+ block; if not, it returns Z_DATA_ERROR.
+
+ inflate() should normally be called until it returns Z_STREAM_END or an
+ error. However if all decompression is to be performed in a single step
+ (a single call of inflate), the parameter flush should be set to
+ Z_FINISH. In this case all pending input is processed and all pending
+ output is flushed; avail_out must be large enough to hold all the
+ uncompressed data. (The size of the uncompressed data may have been saved
+ by the compressor for this purpose.) The next operation on this stream must
+ be inflateEnd to deallocate the decompression state. The use of Z_FINISH
+ is never required, but can be used to inform inflate that a faster routine
+ may be used for the single inflate() call.
+
+ inflate() returns Z_OK if some progress has been made (more input
+ processed or more output produced), Z_STREAM_END if the end of the
+ compressed data has been reached and all uncompressed output has been
+ produced, Z_NEED_DICT if a preset dictionary is needed at this point (see
+ inflateSetDictionary below), Z_DATA_ERROR if the input data was corrupted,
+ Z_STREAM_ERROR if the stream structure was inconsistent (for example if
+ next_in or next_out was NULL), Z_MEM_ERROR if there was not enough memory,
+ Z_BUF_ERROR if no progress is possible or if there was not enough room in
+ the output buffer when Z_FINISH is used. In the Z_DATA_ERROR case, the
+ application may then call inflateSync to look for a good compression block.
+ In the Z_NEED_DICT case, strm->adler is set to the Adler32 value of the
+ dictionary chosen by the compressor.
+*/
+
+
+extern int EXPORT inflateEnd OF((z_streamp strm));
+/*
+ All dynamically allocated data structures for this stream are freed.
+ This function discards any unprocessed input and does not flush any
+ pending output.
+
+ inflateEnd returns Z_OK if success, Z_STREAM_ERROR if the stream state
+ was inconsistent. In the error case, msg may be set but then points to a
+ static string (which must not be deallocated).
+*/
+
+ /* Advanced functions */
+
+/*
+ The following functions are needed only in some special applications.
+*/
+
+/*
+extern int EXPORT deflateInit2 OF((z_streamp strm,
+ int level,
+ int method,
+ int windowBits,
+ int memLevel,
+ int strategy));
+
+ This is another version of deflateInit with more compression options. The
+ fields next_in, zalloc, zfree and opaque must be initialized before by
+ the caller.
+
+ The method parameter is the compression method. It must be Z_DEFLATED in
+ this version of the library. (Method 9 will allow a 64K history buffer and
+ partial block flushes.)
+
+ The windowBits parameter is the base two logarithm of the window size
+ (the size of the history buffer). It should be in the range 8..15 for this
+ version of the library (the value 16 will be allowed for method 9). Larger
+ values of this parameter result in better compression at the expense of
+ memory usage. The default value is 15 if deflateInit is used instead.
+
+ The memLevel parameter specifies how much memory should be allocated
+ for the internal compression state. memLevel=1 uses minimum memory but
+ is slow and reduces compression ratio; memLevel=9 uses maximum memory
+ for optimal speed. The default value is 8. See zconf.h for total memory
+ usage as a function of windowBits and memLevel.
+
+ The strategy parameter is used to tune the compression algorithm. Use the
+ value Z_DEFAULT_STRATEGY for normal data, Z_FILTERED for data produced by a
+ filter (or predictor), or Z_HUFFMAN_ONLY to force Huffman encoding only (no
+ string match). Filtered data consists mostly of small values with a
+ somewhat random distribution. In this case, the compression algorithm is
+ tuned to compress them better. The effect of Z_FILTERED is to force more
+ Huffman coding and less string matching; it is somewhat intermediate
+ between Z_DEFAULT and Z_HUFFMAN_ONLY. The strategy parameter only affects
+ the compression ratio but not the correctness of the compressed output even
+ if it is not set appropriately.
+
+ If next_in is not null, the library will use this buffer to hold also
+ some history information; the buffer must either hold the entire input
+ data, or have at least 1<<(windowBits+1) bytes and be writable. If next_in
+ is null, the library will allocate its own history buffer (and leave next_in
+ null). next_out need not be provided here but must be provided by the
+ application for the next call of deflate().
+
+ If the history buffer is provided by the application, next_in must
+ must never be changed by the application since the compressor maintains
+ information inside this buffer from call to call; the application
+ must provide more input only by increasing avail_in. next_in is always
+ reset by the library in this case.
+
+ deflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was
+ not enough memory, Z_STREAM_ERROR if a parameter is invalid (such as
+ an invalid method). msg is set to null if there is no error message.
+ deflateInit2 does not perform any compression: this will be done by
+ deflate().
+*/
+
+extern int EXPORT deflateSetDictionary OF((z_streamp strm,
+ const Bytef *dictionary,
+ uInt dictLength));
+/*
+ Initializes the compression dictionary (history buffer) from the given
+ byte sequence without producing any compressed output. This function must
+ be called immediately after deflateInit or deflateInit2, before any call
+ of deflate. The compressor and decompressor must use exactly the same
+ dictionary (see inflateSetDictionary).
+ The dictionary should consist of strings (byte sequences) that are likely
+ to be encountered later in the data to be compressed, with the most commonly
+ used strings preferably put towards the end of the dictionary. Using a
+ dictionary is most useful when the data to be compressed is short and
+ can be predicted with good accuracy; the data can then be compressed better
+ than with the default empty dictionary. In this version of the library,
+ only the last 32K bytes of the dictionary are used.
+ Upon return of this function, strm->adler is set to the Adler32 value
+ of the dictionary; the decompressor may later use this value to determine
+ which dictionary has been used by the compressor. (The Adler32 value
+ applies to the whole dictionary even if only a subset of the dictionary is
+ actually used by the compressor.)
+
+ deflateSetDictionary returns Z_OK if success, or Z_STREAM_ERROR if a
+ parameter is invalid (such as NULL dictionary) or the stream state
+ is inconsistent (for example if deflate has already been called for this
+ stream). deflateSetDictionary does not perform any compression: this will
+ be done by deflate().
+*/
+
+extern int EXPORT deflateCopy OF((z_streamp dest,
+ z_streamp source));
+/*
+ Sets the destination stream as a complete copy of the source stream. If
+ the source stream is using an application-supplied history buffer, a new
+ buffer is allocated for the destination stream. The compressed output
+ buffer is always application-supplied. It's the responsibility of the
+ application to provide the correct values of next_out and avail_out for the
+ next call of deflate.
+
+ This function can be useful when several compression strategies will be
+ tried, for example when there are several ways of pre-processing the input
+ data with a filter. The streams that will be discarded should then be freed
+ by calling deflateEnd. Note that deflateCopy duplicates the internal
+ compression state which can be quite large, so this strategy is slow and
+ can consume lots of memory.
+
+ deflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not
+ enough memory, Z_STREAM_ERROR if the source stream state was inconsistent
+ (such as zalloc being NULL). msg is left unchanged in both source and
+ destination.
+*/
+
+extern int EXPORT deflateReset OF((z_streamp strm));
+/*
+ This function is equivalent to deflateEnd followed by deflateInit,
+ but does not free and reallocate all the internal compression state.
+ The stream will keep the same compression level and any other attributes
+ that may have been set by deflateInit2.
+
+ deflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
+ stream state was inconsistent (such as zalloc or state being NULL).
+*/
+
+extern int EXPORT deflateParams OF((z_streamp strm, int level, int strategy));
+/*
+ Dynamically update the compression level and compression strategy.
+ This can be used to switch between compression and straight copy of
+ the input data, or to switch to a different kind of input data requiring
+ a different strategy. If the compression level is changed, the input
+ available so far is compressed with the old level (and may be flushed);
+ the new level will take effect only at the next call of deflate().
+
+ Before the call of deflateParams, the stream state must be set as for
+ a call of deflate(), since the currently available input may have to
+ be compressed and flushed. In particular, strm->avail_out must be non-zero.
+
+ deflateParams returns Z_OK if success, Z_STREAM_ERROR if the source
+ stream state was inconsistent or if a parameter was invalid, Z_BUF_ERROR
+ if strm->avail_out was zero.
+*/
+
+extern int EXPORT deflateOutputPending OF((z_streamp strm));
+/*
+ Returns the number of bytes of output which are immediately
+ available from the compressor (i.e. without any further input
+ or flush).
+*/
+
+/*
+extern int EXPORT inflateInit2 OF((z_streamp strm,
+ int windowBits));
+
+ This is another version of inflateInit with more compression options. The
+ fields next_out, zalloc, zfree and opaque must be initialized before by
+ the caller.
+
+ The windowBits parameter is the base two logarithm of the maximum window
+ size (the size of the history buffer). It should be in the range 8..15 for
+ this version of the library (the value 16 will be allowed soon). The
+ default value is 15 if inflateInit is used instead. If a compressed stream
+ with a larger window size is given as input, inflate() will return with
+ the error code Z_DATA_ERROR instead of trying to allocate a larger window.
+
+ If next_out is not null, the library will use this buffer for the history
+ buffer; the buffer must either be large enough to hold the entire output
+ data, or have at least 1<<windowBits bytes. If next_out is null, the
+ library will allocate its own buffer (and leave next_out null). next_in
+ need not be provided here but must be provided by the application for the
+ next call of inflate().
+
+ If the history buffer is provided by the application, next_out must
+ never be changed by the application since the decompressor maintains
+ history information inside this buffer from call to call; the application
+ can only reset next_out to the beginning of the history buffer when
+ avail_out is zero and all output has been consumed.
+
+ inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was
+ not enough memory, Z_STREAM_ERROR if a parameter is invalid (such as
+ windowBits < 8). msg is set to null if there is no error message.
+ inflateInit2 does not perform any decompression: this will be done by
+ inflate().
+*/
+
+extern int EXPORT inflateSetDictionary OF((z_streamp strm,
+ const Bytef *dictionary,
+ uInt dictLength));
+/*
+ Initializes the decompression dictionary (history buffer) from the given
+ uncompressed byte sequence. This function must be called immediately after
+ a call of inflate if this call returned Z_NEED_DICT. The dictionary chosen
+ by the compressor can be determined from the Adler32 value returned by this
+ call of inflate. The compressor and decompressor must use exactly the same
+ dictionary (see deflateSetDictionary).
+
+ inflateSetDictionary returns Z_OK if success, Z_STREAM_ERROR if a
+ parameter is invalid (such as NULL dictionary) or the stream state is
+ inconsistent, Z_DATA_ERROR if the given dictionary doesn't match the
+ expected one (incorrect Adler32 value). inflateSetDictionary does not
+ perform any decompression: this will be done by subsequent calls of
+ inflate().
+*/
+
+extern int EXPORT inflateSync OF((z_streamp strm));
+/*
+ Skips invalid compressed data until the special marker (see deflate()
+ above) can be found, or until all available input is skipped. No output
+ is provided.
+
+ inflateSync returns Z_OK if the special marker has been found, Z_BUF_ERROR
+ if no more input was provided, Z_DATA_ERROR if no marker has been found,
+ or Z_STREAM_ERROR if the stream structure was inconsistent. In the success
+ case, the application may save the current current value of total_in which
+ indicates where valid compressed data was found. In the error case, the
+ application may repeatedly call inflateSync, providing more input each time,
+ until success or end of the input data.
+*/
+
+extern int EXPORT inflateReset OF((z_streamp strm));
+/*
+ This function is equivalent to inflateEnd followed by inflateInit,
+ but does not free and reallocate all the internal decompression state.
+ The stream will keep attributes that may have been set by inflateInit2.
+
+ inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
+ stream state was inconsistent (such as zalloc or state being NULL).
+*/
+
+extern int inflateIncomp OF((z_stream *strm));
+/*
+ This function adds the data at next_in (avail_in bytes) to the output
+ history without performing any output. There must be no pending output,
+ and the decompressor must be expecting to see the start of a block.
+ Calling this function is equivalent to decompressing a stored block
+ containing the data at next_in (except that the data is not output).
+*/
+
+ /* utility functions */
+
+/*
+ The following utility functions are implemented on top of the
+ basic stream-oriented functions. To simplify the interface, some
+ default options are assumed (compression level, window size,
+ standard memory allocation functions). The source code of these
+ utility functions can easily be modified if you need special options.
+*/
+
+extern int EXPORT compress OF((Bytef *dest, uLongf *destLen,
+ const Bytef *source, uLong sourceLen));
+/*
+ Compresses the source buffer into the destination buffer. sourceLen is
+ the byte length of the source buffer. Upon entry, destLen is the total
+ size of the destination buffer, which must be at least 0.1% larger than
+ sourceLen plus 12 bytes. Upon exit, destLen is the actual size of the
+ compressed buffer.
+ This function can be used to compress a whole file at once if the
+ input file is mmap'ed.
+ compress returns Z_OK if success, Z_MEM_ERROR if there was not
+ enough memory, Z_BUF_ERROR if there was not enough room in the output
+ buffer.
+*/
+
+extern int EXPORT uncompress OF((Bytef *dest, uLongf *destLen,
+ const Bytef *source, uLong sourceLen));
+/*
+ Decompresses the source buffer into the destination buffer. sourceLen is
+ the byte length of the source buffer. Upon entry, destLen is the total
+ size of the destination buffer, which must be large enough to hold the
+ entire uncompressed data. (The size of the uncompressed data must have
+ been saved previously by the compressor and transmitted to the decompressor
+ by some mechanism outside the scope of this compression library.)
+ Upon exit, destLen is the actual size of the compressed buffer.
+ This function can be used to decompress a whole file at once if the
+ input file is mmap'ed.
+
+ uncompress returns Z_OK if success, Z_MEM_ERROR if there was not
+ enough memory, Z_BUF_ERROR if there was not enough room in the output
+ buffer, or Z_DATA_ERROR if the input data was corrupted.
+*/
+
+
+typedef voidp gzFile;
+
+extern gzFile EXPORT gzopen OF((const char *path, const char *mode));
+/*
+ Opens a gzip (.gz) file for reading or writing. The mode parameter
+ is as in fopen ("rb" or "wb") but can also include a compression level
+ ("wb9"). gzopen can be used to read a file which is not in gzip format;
+ in this case gzread will directly read from the file without decompression.
+ gzopen returns NULL if the file could not be opened or if there was
+ insufficient memory to allocate the (de)compression state; errno
+ can be checked to distinguish the two cases (if errno is zero, the
+ zlib error is Z_MEM_ERROR).
+*/
+
+extern gzFile EXPORT gzdopen OF((int fd, const char *mode));
+/*
+ gzdopen() associates a gzFile with the file descriptor fd. File
+ descriptors are obtained from calls like open, dup, creat, pipe or
+ fileno (in the file has been previously opened with fopen).
+ The mode parameter is as in gzopen.
+ The next call of gzclose on the returned gzFile will also close the
+ file descriptor fd, just like fclose(fdopen(fd), mode) closes the file
+ descriptor fd. If you want to keep fd open, use gzdopen(dup(fd), mode).
+ gzdopen returns NULL if there was insufficient memory to allocate
+ the (de)compression state.
+*/
+
+extern int EXPORT gzread OF((gzFile file, voidp buf, unsigned len));
+/*
+ Reads the given number of uncompressed bytes from the compressed file.
+ If the input file was not in gzip format, gzread copies the given number
+ of bytes into the buffer.
+ gzread returns the number of uncompressed bytes actually read (0 for
+ end of file, -1 for error). */
+
+extern int EXPORT gzwrite OF((gzFile file, const voidp buf, unsigned len));
+/*
+ Writes the given number of uncompressed bytes into the compressed file.
+ gzwrite returns the number of uncompressed bytes actually written
+ (0 in case of error).
+*/
+
+extern int EXPORT gzflush OF((gzFile file, int flush));
+/*
+ Flushes all pending output into the compressed file. The parameter
+ flush is as in the deflate() function. The return value is the zlib
+ error number (see function gzerror below). gzflush returns Z_OK if
+ the flush parameter is Z_FINISH and all output could be flushed.
+ gzflush should be called only when strictly necessary because it can
+ degrade compression.
+*/
+
+extern int EXPORT gzclose OF((gzFile file));
+/*
+ Flushes all pending output if necessary, closes the compressed file
+ and deallocates all the (de)compression state. The return value is the zlib
+ error number (see function gzerror below).
+*/
+
+extern const char * EXPORT gzerror OF((gzFile file, int *errnum));
+/*
+ Returns the error message for the last error which occurred on the
+ given compressed file. errnum is set to zlib error number. If an
+ error occurred in the file system and not in the compression library,
+ errnum is set to Z_ERRNO and the application may consult errno
+ to get the exact error code.
+*/
+
+ /* checksum functions */
+
+/*
+ These functions are not related to compression but are exported
+ anyway because they might be useful in applications using the
+ compression library.
+*/
+
+extern uLong EXPORT adler32 OF((uLong adler, const Bytef *buf, uInt len));
+
+/*
+ Update a running Adler-32 checksum with the bytes buf[0..len-1] and
+ return the updated checksum. If buf is NULL, this function returns
+ the required initial value for the checksum.
+ An Adler-32 checksum is almost as reliable as a CRC32 but can be computed
+ much faster. Usage example:
+
+ uLong adler = adler32(0L, Z_NULL, 0);
+
+ while (read_buffer(buffer, length) != EOF) {
+ adler = adler32(adler, buffer, length);
+ }
+ if (adler != original_adler) error();
+*/
+
+extern uLong EXPORT crc32 OF((uLong crc, const Bytef *buf, uInt len));
+/*
+ Update a running crc with the bytes buf[0..len-1] and return the updated
+ crc. If buf is NULL, this function returns the required initial value
+ for the crc. Pre- and post-conditioning (one's complement) is performed
+ within this function so it shouldn't be done by the application.
+ Usage example:
+
+ uLong crc = crc32(0L, Z_NULL, 0);
+
+ while (read_buffer(buffer, length) != EOF) {
+ crc = crc32(crc, buffer, length);
+ }
+ if (crc != original_crc) error();
+*/
+
+
+ /* various hacks, don't look :) */
+
+/* deflateInit and inflateInit are macros to allow checking the zlib version
+ * and the compiler's view of z_stream:
+ */
+extern int EXPORT deflateInit_ OF((z_streamp strm, int level,
+ const char *version, int stream_size));
+extern int EXPORT inflateInit_ OF((z_streamp strm,
+ const char *version, int stream_size));
+extern int EXPORT deflateInit2_ OF((z_streamp strm, int level, int method,
+ int windowBits, int memLevel, int strategy,
+ const char *version, int stream_size));
+extern int EXPORT inflateInit2_ OF((z_streamp strm, int windowBits,
+ const char *version, int stream_size));
+#define deflateInit(strm, level) \
+ deflateInit_((strm), (level), ZLIB_VERSION, sizeof(z_stream))
+#define inflateInit(strm) \
+ inflateInit_((strm), ZLIB_VERSION, sizeof(z_stream))
+#define deflateInit2(strm, level, method, windowBits, memLevel, strategy) \
+ deflateInit2_((strm),(level),(method),(windowBits),(memLevel),\
+ (strategy), ZLIB_VERSION, sizeof(z_stream))
+#define inflateInit2(strm, windowBits) \
+ inflateInit2_((strm), (windowBits), ZLIB_VERSION, sizeof(z_stream))
+
+#if !defined(_Z_UTIL_H) && !defined(NO_DUMMY_DECL)
+ struct internal_state {int dummy;}; /* hack for buggy compilers */
+#endif
+
+uLongf *get_crc_table OF((void)); /* can be used by asm versions of crc32() */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _ZLIB_H */
+/* --- zlib.h */
OpenPOWER on IntegriCloud