summaryrefslogtreecommitdiffstats
path: root/contrib/libpcap/pcap-bpf.c
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/libpcap/pcap-bpf.c')
-rw-r--r--contrib/libpcap/pcap-bpf.c288
1 files changed, 186 insertions, 102 deletions
diff --git a/contrib/libpcap/pcap-bpf.c b/contrib/libpcap/pcap-bpf.c
index 5e3d3ea..8337be8 100644
--- a/contrib/libpcap/pcap-bpf.c
+++ b/contrib/libpcap/pcap-bpf.c
@@ -33,8 +33,8 @@ static const char rcsid[] _U_ =
#ifdef HAVE_ZEROCOPY_BPF
#include <sys/mman.h>
#endif
-#include <sys/time.h>
#include <sys/socket.h>
+#include <time.h>
/*
* <net/bpf.h> defines ioctls, but doesn't include <sys/ioccom.h>.
*
@@ -188,77 +188,67 @@ static int pcap_setfilter_bpf(pcap_t *p, struct bpf_program *fp);
static int pcap_setdirection_bpf(pcap_t *, pcap_direction_t);
static int pcap_set_datalink_bpf(pcap_t *p, int dlt);
-#ifdef HAVE_ZEROCOPY_BPF
/*
- * For zerocopy bpf, we need to override the setnonblock/getnonblock routines
- * so we don't call select(2) if the pcap handle is in non-blocking mode. We
- * preserve the timeout supplied by pcap_open functions to make sure it
- * does not get clobbered if the pcap handle moves between blocking and non-
- * blocking mode.
+ * For zerocopy bpf, the setnonblock/getnonblock routines need to modify
+ * p->md.timeout so we don't call select(2) if the pcap handle is in non-
+ * blocking mode. We preserve the timeout supplied by pcap_open functions
+ * to make sure it does not get clobbered if the pcap handle moves between
+ * blocking and non-blocking mode.
*/
static int
-pcap_getnonblock_zbuf(pcap_t *p, char *errbuf)
+pcap_getnonblock_bpf(pcap_t *p, char *errbuf)
{
- /*
- * Use a negative value for the timeout to represent that the
- * pcap handle is in non-blocking mode.
- */
- return (p->md.timeout < 0);
+#ifdef HAVE_ZEROCOPY_BPF
+ if (p->md.zerocopy) {
+ /*
+ * Use a negative value for the timeout to represent that the
+ * pcap handle is in non-blocking mode.
+ */
+ return (p->md.timeout < 0);
+ }
+#endif
+ return (pcap_getnonblock_fd(p, errbuf));
}
static int
-pcap_setnonblock_zbuf(pcap_t *p, int nonblock, char *errbuf)
+pcap_setnonblock_bpf(pcap_t *p, int nonblock, char *errbuf)
{
- /*
- * Map each value to the corresponding 2's complement, to
- * preserve the timeout value provided with pcap_set_timeout.
- * (from pcap-linux.c).
- */
- if (nonblock) {
- if (p->md.timeout >= 0) {
- /*
- * Timeout is non-negative, so we're not already
- * in non-blocking mode; set it to the 2's
- * complement, to make it negative, as an
- * indication that we're in non-blocking mode.
- */
- p->md.timeout = p->md.timeout * -1 - 1;
- }
- } else {
- if (p->md.timeout < 0) {
- /*
- * Timeout is negative, so we're not already
- * in blocking mode; reverse the previous
- * operation, to make the timeout non-negative
- * again.
- */
- p->md.timeout = (p->md.timeout + 1) * -1;
+#ifdef HAVE_ZEROCOPY_BPF
+ if (p->md.zerocopy) {
+ /*
+ * Map each value to the corresponding 2's complement, to
+ * preserve the timeout value provided with pcap_set_timeout.
+ * (from pcap-linux.c).
+ */
+ if (nonblock) {
+ if (p->md.timeout >= 0) {
+ /*
+ * Timeout is non-negative, so we're not
+ * currently in non-blocking mode; set it
+ * to the 2's complement, to make it
+ * negative, as an indication that we're
+ * in non-blocking mode.
+ */
+ p->md.timeout = p->md.timeout * -1 - 1;
+ }
+ } else {
+ if (p->md.timeout < 0) {
+ /*
+ * Timeout is negative, so we're currently
+ * in blocking mode; reverse the previous
+ * operation, to make the timeout non-negative
+ * again.
+ */
+ p->md.timeout = (p->md.timeout + 1) * -1;
+ }
}
+ return (0);
}
- return (0);
-}
-
-/*
- * Zero-copy specific close method. Un-map the shared buffers then call
- * pcap_cleanup_live_common.
- */
-static void
-pcap_cleanup_zbuf(pcap_t *p)
-{
- /*
- * Delete the mappings. Note that p->buffer gets initialized to one
- * of the mmapped regions in this case, so do not try and free it
- * directly; null it out so that pcap_cleanup_live_common() doesn't
- * try to free it.
- */
- if (p->md.zbuf1 != MAP_FAILED && p->md.zbuf1 != NULL)
- (void) munmap(p->md.zbuf1, p->md.zbufsize);
- if (p->md.zbuf2 != MAP_FAILED && p->md.zbuf2 != NULL)
- (void) munmap(p->md.zbuf2, p->md.zbufsize);
- p->buffer = NULL;
- pcap_cleanup_live_common(p);
+#endif
+ return (pcap_setnonblock_fd(p, nonblock, errbuf));
}
+#ifdef HAVE_ZEROCOPY_BPF
/*
* Zero-copy BPF buffer routines to check for and acknowledge BPF data in
* shared memory buffers.
@@ -412,7 +402,7 @@ pcap_ack_zbuf(pcap_t *p)
p->buffer = NULL;
return (0);
}
-#endif
+#endif /* HAVE_ZEROCOPY_BPF */
pcap_t *
pcap_create(const char *device, char *ebuf)
@@ -437,6 +427,10 @@ pcap_create(const char *device, char *ebuf)
return (p);
}
+/*
+ * On success, returns a file descriptor for a BPF device.
+ * On failure, returns a PCAP_ERROR_ value, and sets p->errbuf.
+ */
static int
bpf_open(pcap_t *p)
{
@@ -497,12 +491,52 @@ bpf_open(pcap_t *p)
* XXX better message for all minors used
*/
if (fd < 0) {
- if (errno == EACCES)
+ switch (errno) {
+
+ case ENOENT:
+ fd = PCAP_ERROR;
+ if (n == 1) {
+ /*
+ * /dev/bpf0 doesn't exist, which
+ * means we probably have no BPF
+ * devices.
+ */
+ snprintf(p->errbuf, PCAP_ERRBUF_SIZE,
+ "(there are no BPF devices)");
+ } else {
+ /*
+ * We got EBUSY on at least one
+ * BPF device, so we have BPF
+ * devices, but all the ones
+ * that exist are busy.
+ */
+ snprintf(p->errbuf, PCAP_ERRBUF_SIZE,
+ "(all BPF devices are busy)");
+ }
+ break;
+
+ case EACCES:
+ /*
+ * Got EACCES on the last device we tried,
+ * and EBUSY on all devices before that,
+ * if any.
+ */
fd = PCAP_ERROR_PERM_DENIED;
- else
+ snprintf(p->errbuf, PCAP_ERRBUF_SIZE,
+ "(cannot open BPF device) %s: %s", device,
+ pcap_strerror(errno));
+ break;
+
+ default:
+ /*
+ * Some other problem.
+ */
fd = PCAP_ERROR;
- snprintf(p->errbuf, PCAP_ERRBUF_SIZE, "(no devices found) %s: %s",
- device, pcap_strerror(errno));
+ snprintf(p->errbuf, PCAP_ERRBUF_SIZE,
+ "(cannot open BPF device) %s: %s", device,
+ pcap_strerror(errno));
+ break;
+ }
}
#endif
@@ -675,14 +709,23 @@ pcap_can_set_rfmon_bpf(pcap_t *p)
*/
fd = bpf_open(p);
if (fd < 0)
- return (fd);
+ return (fd); /* fd is the appropriate error code */
/*
* Now bind to the device.
*/
(void)strncpy(ifr.ifr_name, p->opt.source, sizeof(ifr.ifr_name));
if (ioctl(fd, BIOCSETIF, (caddr_t)&ifr) < 0) {
- if (errno == ENETDOWN) {
+ switch (errno) {
+
+ case ENXIO:
+ /*
+ * There's no such device.
+ */
+ close(fd);
+ return (PCAP_ERROR_NO_SUCH_DEVICE);
+
+ case ENETDOWN:
/*
* Return a "network down" indication, so that
* the application can report that rather than
@@ -692,7 +735,8 @@ pcap_can_set_rfmon_bpf(pcap_t *p)
*/
close(fd);
return (PCAP_ERROR_IFACE_NOT_UP);
- } else {
+
+ default:
snprintf(p->errbuf, PCAP_ERRBUF_SIZE,
"BIOCSETIF: %s: %s",
p->opt.source, pcap_strerror(errno));
@@ -913,14 +957,28 @@ pcap_read_bpf(pcap_t *p, int cnt, pcap_handler callback, u_char *user)
* processed so far.
*/
if (p->break_loop) {
+ p->bp = bp;
+ p->cc = ep - bp;
+ /*
+ * ep is set based on the return value of read(),
+ * but read() from a BPF device doesn't necessarily
+ * return a value that's a multiple of the alignment
+ * value for BPF_WORDALIGN(). However, whenever we
+ * increment bp, we round up the increment value by
+ * a value rounded up by BPF_WORDALIGN(), so we
+ * could increment bp past ep after processing the
+ * last packet in the buffer.
+ *
+ * We treat ep < bp as an indication that this
+ * happened, and just set p->cc to 0.
+ */
+ if (p->cc < 0)
+ p->cc = 0;
if (n == 0) {
p->break_loop = 0;
return (PCAP_ERROR_BREAK);
- } else {
- p->bp = bp;
- p->cc = ep - bp;
+ } else
return (n);
- }
}
caplen = bhp->bh_caplen;
@@ -972,6 +1030,11 @@ pcap_read_bpf(pcap_t *p, int cnt, pcap_handler callback, u_char *user)
if (++n >= cnt && cnt > 0) {
p->bp = bp;
p->cc = ep - bp;
+ /*
+ * See comment above about p->cc < 0.
+ */
+ if (p->cc < 0)
+ p->cc = 0;
return (n);
}
} else {
@@ -1272,15 +1335,19 @@ pcap_cleanup_bpf(pcap_t *p)
}
#ifdef HAVE_ZEROCOPY_BPF
- /*
- * In zero-copy mode, p->buffer is just a pointer into one of the two
- * memory-mapped buffers, so no need to free it.
- */
if (p->md.zerocopy) {
+ /*
+ * Delete the mappings. Note that p->buffer gets
+ * initialized to one of the mmapped regions in
+ * this case, so do not try and free it directly;
+ * null it out so that pcap_cleanup_live_common()
+ * doesn't try to free it.
+ */
if (p->md.zbuf1 != MAP_FAILED && p->md.zbuf1 != NULL)
- munmap(p->md.zbuf1, p->md.zbufsize);
+ (void) munmap(p->md.zbuf1, p->md.zbufsize);
if (p->md.zbuf2 != MAP_FAILED && p->md.zbuf2 != NULL)
- munmap(p->md.zbuf2, p->md.zbufsize);
+ (void) munmap(p->md.zbuf2, p->md.zbufsize);
+ p->buffer = NULL;
p->buffer = NULL;
}
#endif
@@ -1395,7 +1462,15 @@ pcap_activate_bpf(pcap_t *p)
{
int status = 0;
int fd;
+#ifdef LIFNAMSIZ
+ struct lifreq ifr;
+ char *ifrname = ifr.lifr_name;
+ const size_t ifnamsiz = sizeof(ifr.lifr_name);
+#else
struct ifreq ifr;
+ char *ifrname = ifr.ifr_name;
+ const size_t ifnamsiz = sizeof(ifr.ifr_name);
+#endif
struct bpf_version bv;
#ifdef __APPLE__
int sockfd;
@@ -1503,9 +1578,8 @@ pcap_activate_bpf(pcap_t *p)
*/
sockfd = socket(AF_INET, SOCK_DGRAM, 0);
if (sockfd != -1) {
- strlcpy(ifr.ifr_name,
- p->opt.source,
- sizeof(ifr.ifr_name));
+ strlcpy(ifrname,
+ p->opt.source, ifnamsiz);
if (ioctl(sockfd, SIOCGIFFLAGS,
(char *)&ifr) < 0) {
/*
@@ -1571,14 +1645,6 @@ pcap_activate_bpf(pcap_t *p)
p->md.zerocopy = 1;
/*
- * Set the cleanup and set/get nonblocking mode ops
- * as appropriate for zero-copy mode.
- */
- p->cleanup_op = pcap_cleanup_zbuf;
- p->setnonblock_op = pcap_setnonblock_zbuf;
- p->getnonblock_op = pcap_getnonblock_zbuf;
-
- /*
* How to pick a buffer size: first, query the maximum buffer
* size supported by zero-copy. This also lets us quickly
* determine whether the kernel generally supports zero-copy.
@@ -1627,7 +1693,7 @@ pcap_activate_bpf(pcap_t *p)
pcap_strerror(errno));
goto bad;
}
- (void)strncpy(ifr.ifr_name, p->opt.source, sizeof(ifr.ifr_name));
+ (void)strncpy(ifrname, p->opt.source, ifnamsiz);
if (ioctl(fd, BIOCSETIF, (caddr_t)&ifr) < 0) {
snprintf(p->errbuf, PCAP_ERRBUF_SIZE, "BIOCSETIF: %s: %s",
p->opt.source, pcap_strerror(errno));
@@ -1657,9 +1723,13 @@ pcap_activate_bpf(pcap_t *p)
/*
* Now bind to the device.
*/
- (void)strncpy(ifr.ifr_name, p->opt.source,
- sizeof(ifr.ifr_name));
- if (ioctl(fd, BIOCSETIF, (caddr_t)&ifr) < 0) {
+ (void)strncpy(ifrname, p->opt.source, ifnamsiz);
+#ifdef BIOCSETLIF
+ if (ioctl(fd, BIOCSETLIF, (caddr_t)&ifr) < 0)
+#else
+ if (ioctl(fd, BIOCSETIF, (caddr_t)&ifr) < 0)
+#endif
+ {
status = check_setif_failure(p, errno);
goto bad;
}
@@ -1686,9 +1756,12 @@ pcap_activate_bpf(pcap_t *p)
*/
(void) ioctl(fd, BIOCSBLEN, (caddr_t)&v);
- (void)strncpy(ifr.ifr_name, p->opt.source,
- sizeof(ifr.ifr_name));
+ (void)strncpy(ifrname, p->opt.source, ifnamsiz);
+#ifdef BIOCSETLIF
+ if (ioctl(fd, BIOCSETLIF, (caddr_t)&ifr) >= 0)
+#else
if (ioctl(fd, BIOCSETIF, (caddr_t)&ifr) >= 0)
+#endif
break; /* that size worked; we're done */
if (errno != ENOBUFS) {
@@ -2180,8 +2253,8 @@ pcap_activate_bpf(pcap_t *p)
p->setfilter_op = pcap_setfilter_bpf;
p->setdirection_op = pcap_setdirection_bpf;
p->set_datalink_op = pcap_set_datalink_bpf;
- p->getnonblock_op = pcap_getnonblock_fd;
- p->setnonblock_op = pcap_setnonblock_fd;
+ p->getnonblock_op = pcap_getnonblock_bpf;
+ p->setnonblock_op = pcap_setnonblock_bpf;
p->stats_op = pcap_stats_bpf;
p->cleanup_op = pcap_cleanup_bpf;
@@ -2234,17 +2307,28 @@ monitor_mode(pcap_t *p, int set)
/*
* Can't get the media types.
*/
- if (errno == EINVAL) {
+ switch (errno) {
+
+ case ENXIO:
+ /*
+ * There's no such device.
+ */
+ close(sock);
+ return (PCAP_ERROR_NO_SUCH_DEVICE);
+
+ case EINVAL:
/*
* Interface doesn't support SIOC{G,S}IFMEDIA.
*/
close(sock);
return (PCAP_ERROR_RFMON_NOTSUP);
+
+ default:
+ snprintf(p->errbuf, PCAP_ERRBUF_SIZE,
+ "SIOCGIFMEDIA 1: %s", pcap_strerror(errno));
+ close(sock);
+ return (PCAP_ERROR);
}
- snprintf(p->errbuf, PCAP_ERRBUF_SIZE, "SIOCGIFMEDIA 1: %s",
- pcap_strerror(errno));
- close(sock);
- return (PCAP_ERROR);
}
if (req.ifm_count == 0) {
/*
OpenPOWER on IntegriCloud