summaryrefslogtreecommitdiffstats
path: root/sys/netpfil
diff options
context:
space:
mode:
Diffstat (limited to 'sys/netpfil')
-rw-r--r--sys/netpfil/ipfw/ip_dn_glue.c4
-rw-r--r--sys/netpfil/ipfw/ip_dn_io.c4
-rw-r--r--sys/netpfil/ipfw/ip_dummynet.c3
-rw-r--r--sys/netpfil/ipfw/ip_fw2.c127
-rw-r--r--sys/netpfil/ipfw/ip_fw_pfil.c145
-rw-r--r--sys/netpfil/ipfw/ip_fw_private.h6
-rw-r--r--sys/netpfil/ipfw/ip_fw_sockopt.c12
-rw-r--r--sys/netpfil/ipfw/ip_fw_table.c121
-rw-r--r--sys/netpfil/ipfw/ip_fw_table.h7
-rw-r--r--sys/netpfil/ipfw/ip_fw_table_algo.c927
-rw-r--r--sys/netpfil/ipfw/nat64/nat64stl.c9
-rw-r--r--sys/netpfil/pf/if_pflog.c4
-rw-r--r--sys/netpfil/pf/if_pfsync.c15
-rw-r--r--sys/netpfil/pf/pf.c657
-rw-r--r--sys/netpfil/pf/pf.h3
-rw-r--r--sys/netpfil/pf/pf_ioctl.c74
-rw-r--r--sys/netpfil/pf/pf_norm.c11
-rw-r--r--sys/netpfil/pf/pf_ruleset.c1
18 files changed, 1918 insertions, 212 deletions
diff --git a/sys/netpfil/ipfw/ip_dn_glue.c b/sys/netpfil/ipfw/ip_dn_glue.c
index 4c4659a..7686155 100644
--- a/sys/netpfil/ipfw/ip_dn_glue.c
+++ b/sys/netpfil/ipfw/ip_dn_glue.c
@@ -164,7 +164,7 @@ struct dn_pipe7 { /* a pipe */
SLIST_ENTRY(dn_pipe7) next; /* linked list in a hash slot */
int pipe_nr ; /* number */
- int bandwidth; /* really, bytes/tick. */
+ uint32_t bandwidth; /* really, bytes/tick. */
int delay ; /* really, ticks */
struct mbuf *head, *tail ; /* packets in delay line */
@@ -230,7 +230,7 @@ struct dn_pipe8 { /* a pipe */
SLIST_ENTRY(dn_pipe8) next; /* linked list in a hash slot */
int pipe_nr ; /* number */
- int bandwidth; /* really, bytes/tick. */
+ uint32_t bandwidth; /* really, bytes/tick. */
int delay ; /* really, ticks */
struct mbuf *head, *tail ; /* packets in delay line */
diff --git a/sys/netpfil/ipfw/ip_dn_io.c b/sys/netpfil/ipfw/ip_dn_io.c
index 7295f7b..5d6ecf7 100644
--- a/sys/netpfil/ipfw/ip_dn_io.c
+++ b/sys/netpfil/ipfw/ip_dn_io.c
@@ -601,7 +601,8 @@ serve_sched(struct mq *q, struct dn_sch_inst *si, uint64_t now)
struct dn_schk *s = si->sched;
struct mbuf *m = NULL;
int delay_line_idle = (si->dline.mq.head == NULL);
- int done, bw;
+ int done;
+ uint32_t bw;
if (q == NULL) {
q = &def_q;
@@ -759,6 +760,7 @@ dummynet_send(struct mbuf *m)
dst = DIR_DROP;
} else {
dst = pkt->dn_dir;
+ pkt->rule.info |= IPFW_IS_DUMMYNET;
ifp = pkt->ifp;
tag->m_tag_cookie = MTAG_IPFW_RULE;
tag->m_tag_id = 0;
diff --git a/sys/netpfil/ipfw/ip_dummynet.c b/sys/netpfil/ipfw/ip_dummynet.c
index 7240a99..9fdcc13 100644
--- a/sys/netpfil/ipfw/ip_dummynet.c
+++ b/sys/netpfil/ipfw/ip_dummynet.c
@@ -153,7 +153,7 @@ ipdn_bound_var(int *v, int dflt, int lo, int hi, const char *msg)
op = "Clamp";
} else
return *v;
- if (op && msg)
+ if (op && msg && bootverbose)
printf("%s %s to %d (was %d)\n", op, msg, *v, oldv);
return *v;
}
@@ -2682,7 +2682,6 @@ static moduledata_t dummynet_mod = {
#define DN_SI_SUB SI_SUB_PROTO_FIREWALL
#define DN_MODEV_ORD (SI_ORDER_ANY - 128) /* after ipfw */
DECLARE_MODULE(dummynet, dummynet_mod, DN_SI_SUB, DN_MODEV_ORD);
-MODULE_DEPEND(dummynet, ipfw, 3, 3, 3);
MODULE_VERSION(dummynet, 3);
/*
diff --git a/sys/netpfil/ipfw/ip_fw2.c b/sys/netpfil/ipfw/ip_fw2.c
index 64cb3d3..12762ef 100644
--- a/sys/netpfil/ipfw/ip_fw2.c
+++ b/sys/netpfil/ipfw/ip_fw2.c
@@ -376,7 +376,7 @@ tcpopts_match(struct tcphdr *tcp, ipfw_insn *cmd)
static int
iface_match(struct ifnet *ifp, ipfw_insn_if *cmd, struct ip_fw_chain *chain,
- uint32_t *tablearg)
+ uint32_t *tablearg, void **te)
{
if (ifp == NULL) /* no iface with this packet, match fails */
@@ -386,7 +386,7 @@ iface_match(struct ifnet *ifp, ipfw_insn_if *cmd, struct ip_fw_chain *chain,
if (cmd->name[0] != '\0') { /* match by name */
if (cmd->name[0] == '\1') /* use tablearg to match */
return ipfw_lookup_table(chain, cmd->p.kidx, 0,
- &ifp->if_index, tablearg);
+ &ifp->if_index, tablearg, NULL, te);
/* Check name */
if (cmd->p.glob) {
if (fnmatch(cmd->name, ifp->if_xname, 0) == 0)
@@ -1390,6 +1390,12 @@ ipfw_chk(struct ip_fw_args *args)
struct ip_fw_chain *chain = &V_layer3_chain;
/*
+ * Table match pointers.
+ */
+ void *te = NULL; /* table entry */
+ uint16_t tidx, tkeylen;
+
+ /*
* We store in ulp a pointer to the upper layer protocol header.
* In the ipv4 case this is easy to determine from the header,
* but for ipv6 we might have some additional headers in the middle.
@@ -1746,11 +1752,17 @@ do { \
uint32_t tablearg = 0;
int l, cmdlen, skip_or; /* skip rest of OR block */
struct ip_fw *f;
+ uint8_t *ea;
f = chain->map[f_pos];
if (V_set_disable & (1 << f->set) )
continue;
+ ea = NULL;
+ te = NULL;
+ tidx = 0;
+ tkeylen = 0;
+
skip_or = 0;
for (l = f->cmd_len, cmd = f->cmd ; l > 0 ;
l -= cmdlen, cmd += cmdlen) {
@@ -1819,19 +1831,63 @@ do { \
break;
case O_RECV:
+ {
+ void *ifte = NULL;
+
match = iface_match(m->m_pkthdr.rcvif,
- (ipfw_insn_if *)cmd, chain, &tablearg);
+ (ipfw_insn_if *)cmd, chain, &tablearg,
+ &ifte);
+ if (match && ifte != NULL) {
+ te = ifte;
+ tkeylen = 0;
+ tidx = ((ipfw_insn_if *)cmd)->p.kidx;
+ }
break;
+ }
case O_XMIT:
+ {
+ void *ifte = NULL;
+
match = iface_match(oif, (ipfw_insn_if *)cmd,
- chain, &tablearg);
+ chain, &tablearg, &ifte);
+ if (match && ifte != NULL) {
+ te = ifte;
+ tkeylen = 0;
+ tidx = ((ipfw_insn_if *)cmd)->p.kidx;
+ }
break;
+ }
case O_VIA:
+ {
+ void *ifte = NULL;
+
match = iface_match(oif ? oif :
m->m_pkthdr.rcvif, (ipfw_insn_if *)cmd,
- chain, &tablearg);
+ chain, &tablearg, &ifte);
+ if (match && ifte != NULL) {
+ te = ifte;
+ tkeylen = 0;
+ tidx = ((ipfw_insn_if *)cmd)->p.kidx;
+ }
+ break;
+ }
+
+ case O_MACADDR2_LOOKUP:
+ if (args->eh != NULL) { /* have MAC header */
+ uint32_t v = 0;
+ match = ipfw_lookup_table(chain,
+ cmd->arg1, 0, args->eh, &v, NULL,
+ &te);
+ if (cmdlen == F_INSN_SIZE(ipfw_insn_u32))
+ match = ((ipfw_insn_u32 *)cmd)->d[0] == v;
+ if (match) {
+ tablearg = v;
+ tkeylen = 0;
+ tidx = cmd->arg1;
+ }
+ }
break;
case O_MACADDR2:
@@ -1977,11 +2033,16 @@ do { \
#endif /* !USERSPACE */
else
break;
+ if (args->eh != NULL) /* have MAC header */
+ ea = (uint8_t *)args->eh->ether_dhost;
match = ipfw_lookup_table(chain,
- cmd->arg1, keylen, pkey, &vidx);
+ cmd->arg1, keylen, pkey, &vidx,
+ ea, &te);
if (!match)
break;
tablearg = vidx;
+ tidx = cmd->arg1;
+ tkeylen = keylen;
break;
}
/* cmdlen =< F_INSN_SIZE(ipfw_insn_u32) */
@@ -2007,8 +2068,14 @@ do { \
pkey = &args->f_id.src_ip6;
} else
break;
+ if (args->eh != NULL) { /* have MAC header */
+ if (cmd->opcode == O_IP_DST_LOOKUP)
+ ea = (uint8_t *)args->eh->ether_dhost;
+ else
+ ea = (uint8_t *)args->eh->ether_shost;
+ }
match = ipfw_lookup_table(chain, cmd->arg1,
- keylen, pkey, &vidx);
+ keylen, pkey, &vidx, ea, &te);
if (!match)
break;
if (cmdlen == F_INSN_SIZE(ipfw_insn_u32)) {
@@ -2018,6 +2085,8 @@ do { \
break;
}
tablearg = vidx;
+ tidx = cmd->arg1;
+ tkeylen = keylen;
break;
}
@@ -2025,12 +2094,16 @@ do { \
{
uint32_t v = 0;
match = ipfw_lookup_table(chain,
- cmd->arg1, 0, &args->f_id, &v);
+ cmd->arg1, 0, &args->f_id, &v,
+ NULL, &te);
if (cmdlen == F_INSN_SIZE(ipfw_insn_u32))
match = ((ipfw_insn_u32 *)cmd)->d[0] ==
TARG_VAL(chain, v, tag);
- if (match)
+ if (match) {
tablearg = v;
+ tidx = cmd->arg1;
+ tkeylen = 0;
+ }
}
break;
case O_IP_SRC_MASK:
@@ -2685,11 +2758,19 @@ do { \
case O_COUNT:
IPFW_INC_RULE_COUNTER(f, pktlen);
+ if (te != NULL) {
+ ipfw_cnt_update_tentry(chain, tidx,
+ tkeylen, te, pktlen);
+ }
l = 0; /* exit inner loop */
break;
case O_SKIPTO:
IPFW_INC_RULE_COUNTER(f, pktlen);
+ if (te != NULL) {
+ ipfw_cnt_update_tentry(chain, tidx,
+ tkeylen, te, pktlen);
+ }
f_pos = JUMP(chain, f, cmd->arg1, tablearg, 0);
/*
* Skip disabled rules, and re-enter
@@ -2766,6 +2847,10 @@ do { \
}
IPFW_INC_RULE_COUNTER(f, pktlen);
+ if (te != NULL) {
+ ipfw_cnt_update_tentry(chain, tidx,
+ tkeylen, te, pktlen);
+ }
stack = (uint16_t *)(mtag + 1);
/*
@@ -2849,8 +2934,6 @@ do { \
break;
case O_FORWARD_IP:
- if (args->eh) /* not valid on layer2 pkts */
- break;
if (q != f ||
dyn_info.direction == MATCH_FORWARD) {
struct sockaddr_in *sa;
@@ -2910,8 +2993,6 @@ do { \
#ifdef INET6
case O_FORWARD_IP6:
- if (args->eh) /* not valid on layer2 pkts */
- break;
if (q != f ||
dyn_info.direction == MATCH_FORWARD) {
struct sockaddr_in6 *sin6;
@@ -2941,6 +3022,10 @@ do { \
uint32_t fib;
IPFW_INC_RULE_COUNTER(f, pktlen);
+ if (te != NULL) {
+ ipfw_cnt_update_tentry(chain, tidx,
+ tkeylen, te, pktlen);
+ }
fib = TARG(cmd->arg1, fib) & 0x7FFF;
if (fib >= rt_numfibs)
fib = 0;
@@ -2974,6 +3059,10 @@ do { \
break;
IPFW_INC_RULE_COUNTER(f, pktlen);
+ if (te != NULL) {
+ ipfw_cnt_update_tentry(chain, tidx,
+ tkeylen, te, pktlen);
+ }
break;
}
@@ -3020,6 +3109,10 @@ do { \
if (is_ipv6) /* IPv6 is not supported yet */
break;
IPFW_INC_RULE_COUNTER(f, pktlen);
+ if (te != NULL) {
+ ipfw_cnt_update_tentry(chain, tidx,
+ tkeylen, te, pktlen);
+ }
ip_off = ntohs(ip->ip_off);
/* if not fragmented, go to next rule */
@@ -3060,6 +3153,10 @@ do { \
*/
if (retval == 0 && done == 0) {
IPFW_INC_RULE_COUNTER(f, pktlen);
+ if (te != NULL) {
+ ipfw_cnt_update_tentry(chain,
+ tidx, tkeylen, te, pktlen);
+ }
/*
* Reset the result of the last
* dynamic state lookup.
@@ -3103,6 +3200,10 @@ do { \
struct ip_fw *rule = chain->map[f_pos];
/* Update statistics */
IPFW_INC_RULE_COUNTER(rule, pktlen);
+ if (te != NULL) {
+ ipfw_cnt_update_tentry(chain, tidx, tkeylen, te,
+ pktlen);
+ }
} else {
retval = IP_FW_DENY;
printf("ipfw: ouch!, skip past end of rules, denying packet\n");
diff --git a/sys/netpfil/ipfw/ip_fw_pfil.c b/sys/netpfil/ipfw/ip_fw_pfil.c
index 4316526..f014dfb 100644
--- a/sys/netpfil/ipfw/ip_fw_pfil.c
+++ b/sys/netpfil/ipfw/ip_fw_pfil.c
@@ -113,6 +113,74 @@ SYSEND
#endif /* SYSCTL_NODE */
+static int
+ipfw_check_next_hop(struct ip_fw_args *args, int dir, struct mbuf *m)
+{
+ struct m_tag *fwd_tag;
+ size_t len;
+
+#if (!defined(INET6) && !defined(INET))
+ return (EACCES);
+#else
+
+ KASSERT(args->next_hop == NULL || args->next_hop6 == NULL,
+ ("%s: both next_hop=%p and next_hop6=%p not NULL", __func__,
+ args->next_hop, args->next_hop6));
+#ifdef INET6
+ if (args->next_hop6 != NULL)
+ len = sizeof(struct sockaddr_in6);
+#endif
+#ifdef INET
+ if (args->next_hop != NULL)
+ len = sizeof(struct sockaddr_in);
+#endif
+
+ /* Incoming packets should not be tagged so we do not
+ * m_tag_find. Outgoing packets may be tagged, so we
+ * reuse the tag if present.
+ */
+ fwd_tag = (dir == DIR_IN) ? NULL :
+ m_tag_find(m, PACKET_TAG_IPFORWARD, NULL);
+ if (fwd_tag != NULL) {
+ m_tag_unlink(m, fwd_tag);
+ } else {
+ fwd_tag = m_tag_get(PACKET_TAG_IPFORWARD, len,
+ M_NOWAIT);
+ if (fwd_tag == NULL)
+ return (EACCES);
+ }
+#ifdef INET6
+ if (args->next_hop6 != NULL) {
+ struct sockaddr_in6 *sa6;
+
+ sa6 = (struct sockaddr_in6 *)(fwd_tag + 1);
+ bcopy(args->next_hop6, sa6, len);
+ /*
+ * If nh6 address is link-local we should convert
+ * it to kernel internal form before doing any
+ * comparisons.
+ */
+ if (sa6_embedscope(sa6, V_ip6_use_defzone) != 0)
+ return (EACCES);
+ if (in6_localip(&sa6->sin6_addr))
+ m->m_flags |= M_FASTFWD_OURS;
+ m->m_flags |= M_IP6_NEXTHOP;
+ }
+#endif
+#ifdef INET
+ if (args->next_hop != NULL) {
+ bcopy(args->next_hop, (fwd_tag+1), len);
+ if (in_localip(args->next_hop->sin_addr))
+ m->m_flags |= M_FASTFWD_OURS;
+ m->m_flags |= M_IP_NEXTHOP;
+ }
+#endif
+ m_tag_prepend(m, fwd_tag);
+#endif /* INET || INET6 */
+
+ return (IP_FW_PASS);
+}
+
/*
* The pfilter hook to pass packets to ipfw_chk and then to
* dummynet, divert, netgraph or other modules.
@@ -161,72 +229,7 @@ again:
/* next_hop may be set by ipfw_chk */
if (args.next_hop == NULL && args.next_hop6 == NULL)
break; /* pass */
-#if (!defined(INET6) && !defined(INET))
- ret = EACCES;
-#else
- {
- struct m_tag *fwd_tag;
- size_t len;
-
- KASSERT(args.next_hop == NULL || args.next_hop6 == NULL,
- ("%s: both next_hop=%p and next_hop6=%p not NULL", __func__,
- args.next_hop, args.next_hop6));
-#ifdef INET6
- if (args.next_hop6 != NULL)
- len = sizeof(struct sockaddr_in6);
-#endif
-#ifdef INET
- if (args.next_hop != NULL)
- len = sizeof(struct sockaddr_in);
-#endif
-
- /* Incoming packets should not be tagged so we do not
- * m_tag_find. Outgoing packets may be tagged, so we
- * reuse the tag if present.
- */
- fwd_tag = (dir == DIR_IN) ? NULL :
- m_tag_find(*m0, PACKET_TAG_IPFORWARD, NULL);
- if (fwd_tag != NULL) {
- m_tag_unlink(*m0, fwd_tag);
- } else {
- fwd_tag = m_tag_get(PACKET_TAG_IPFORWARD, len,
- M_NOWAIT);
- if (fwd_tag == NULL) {
- ret = EACCES;
- break; /* i.e. drop */
- }
- }
-#ifdef INET6
- if (args.next_hop6 != NULL) {
- struct sockaddr_in6 *sa6;
-
- sa6 = (struct sockaddr_in6 *)(fwd_tag + 1);
- bcopy(args.next_hop6, sa6, len);
- /*
- * If nh6 address is link-local we should convert
- * it to kernel internal form before doing any
- * comparisons.
- */
- if (sa6_embedscope(sa6, V_ip6_use_defzone) != 0) {
- ret = EACCES;
- break;
- }
- if (in6_localip(&sa6->sin6_addr))
- (*m0)->m_flags |= M_FASTFWD_OURS;
- (*m0)->m_flags |= M_IP6_NEXTHOP;
- }
-#endif
-#ifdef INET
- if (args.next_hop != NULL) {
- bcopy(args.next_hop, (fwd_tag+1), len);
- if (in_localip(args.next_hop->sin_addr))
- (*m0)->m_flags |= M_FASTFWD_OURS;
- (*m0)->m_flags |= M_IP_NEXTHOP;
- }
-#endif
- m_tag_prepend(*m0, fwd_tag);
- }
-#endif /* INET || INET6 */
+ ret = ipfw_check_next_hop(&args, dir, *m0);
break;
case IP_FW_DENY:
@@ -368,6 +371,10 @@ ipfw_check_frame(void *arg, struct mbuf **m0, struct ifnet *ifp, int dir,
/* Check result of ipfw_chk() */
switch (i) {
case IP_FW_PASS:
+ /* next_hop may be set by ipfw_chk */
+ if (args.next_hop == NULL && args.next_hop6 == NULL)
+ break; /* pass */
+ ret = ipfw_check_next_hop(&args, dir, *m0);
break;
case IP_FW_DENY:
@@ -505,7 +512,11 @@ ipfw_hook(int onoff, int pf)
hook_func = (pf == AF_LINK) ? ipfw_check_frame : ipfw_check_packet;
- (void) (onoff ? pfil_add_hook : pfil_remove_hook)
+ if (onoff)
+ (void) pfil_add_named_hook
+ (hook_func, NULL, "ipfw", PFIL_IN | PFIL_OUT | PFIL_WAITOK, pfh);
+ else
+ (void) pfil_remove_hook
(hook_func, NULL, PFIL_IN | PFIL_OUT | PFIL_WAITOK, pfh);
return 0;
diff --git a/sys/netpfil/ipfw/ip_fw_private.h b/sys/netpfil/ipfw/ip_fw_private.h
index 6c29505..3762d20 100644
--- a/sys/netpfil/ipfw/ip_fw_private.h
+++ b/sys/netpfil/ipfw/ip_fw_private.h
@@ -761,10 +761,12 @@ int ipfw_run_eaction(struct ip_fw_chain *ch, struct ip_fw_args *args,
struct table_info;
typedef int (table_lookup_t)(struct table_info *ti, void *key, uint32_t keylen,
- uint32_t *val);
+ uint32_t *val, uint8_t *ea, void **te);
int ipfw_lookup_table(struct ip_fw_chain *ch, uint16_t tbl, uint16_t plen,
- void *paddr, uint32_t *val);
+ void *paddr, uint32_t *val, uint8_t *ea, void **te);
+void ipfw_cnt_update_tentry(struct ip_fw_chain *ch, uint16_t tbl, uint16_t plen,
+ void *e, int pktlen);
struct named_object *ipfw_objhash_lookup_table_kidx(struct ip_fw_chain *ch,
uint16_t kidx);
int ipfw_ref_table(struct ip_fw_chain *ch, ipfw_obj_ntlv *ntlv, uint16_t *kidx);
diff --git a/sys/netpfil/ipfw/ip_fw_sockopt.c b/sys/netpfil/ipfw/ip_fw_sockopt.c
index 93df4af..724cb07 100644
--- a/sys/netpfil/ipfw/ip_fw_sockopt.c
+++ b/sys/netpfil/ipfw/ip_fw_sockopt.c
@@ -1880,10 +1880,22 @@ check_ipfw_rule_body(ipfw_insn *cmd, int cmd_len, struct rule_check_info *ci)
goto bad_size;
ci->object_opcodes++;
break;
+
case O_MACADDR2:
if (cmdlen != F_INSN_SIZE(ipfw_insn_mac))
goto bad_size;
break;
+ case O_MACADDR2_LOOKUP:
+ if (cmd->arg1 >= V_fw_tables_max) {
+ printf("ipfw: invalid table number %d\n",
+ cmd->arg1);
+ return (EINVAL);
+ }
+ if (cmdlen != F_INSN_SIZE(ipfw_insn) &&
+ cmdlen != F_INSN_SIZE(ipfw_insn_u32))
+ goto bad_size;
+ ci->object_opcodes++;
+ break;
case O_NOP:
case O_IPID:
diff --git a/sys/netpfil/ipfw/ip_fw_table.c b/sys/netpfil/ipfw/ip_fw_table.c
index 64651c5..de88a10 100644
--- a/sys/netpfil/ipfw/ip_fw_table.c
+++ b/sys/netpfil/ipfw/ip_fw_table.c
@@ -185,6 +185,66 @@ get_table_value(struct ip_fw_chain *ch, struct table_config *tc, uint32_t kidx)
return (&pval[kidx]);
}
+static int
+zero_cnt_entry(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
+ struct sockopt_data *sd)
+{
+ ipfw_obj_tentry *tent;
+ ipfw_obj_header *oh;
+ struct tid_info ti;
+ struct table_config *tc;
+ struct table_algo *ta;
+ struct table_info *kti;
+ struct namedobj_instance *ni;
+ int error;
+ size_t sz;
+
+ /* Check minimum header size */
+ sz = sizeof(*oh) + sizeof(*tent);
+ if (sd->valsize != sz)
+ return (EINVAL);
+
+ oh = (struct _ipfw_obj_header *)ipfw_get_sopt_header(sd, sz);
+ tent = (ipfw_obj_tentry *)(oh + 1);
+
+ /* Basic length checks for TLVs */
+ if (oh->ntlv.head.length != sizeof(oh->ntlv))
+ return (EINVAL);
+
+ objheader_to_ti(oh, &ti);
+ ti.type = oh->ntlv.type;
+ ti.uidx = tent->idx;
+
+ IPFW_UH_RLOCK(ch);
+ ni = CHAIN_TO_NI(ch);
+
+ /*
+ * Find existing table and check its type .
+ */
+ ta = NULL;
+ if ((tc = find_table(ni, &ti)) == NULL) {
+ IPFW_UH_RUNLOCK(ch);
+ return (ESRCH);
+ }
+
+ /* check table type */
+ if (tc->no.subtype != ti.type) {
+ IPFW_UH_RUNLOCK(ch);
+ return (EINVAL);
+ }
+
+ kti = KIDX_TO_TI(ch, tc->no.kidx);
+ ta = tc->ta;
+
+ if (ta->zero_cnt_tentry == NULL)
+ return (ENOTSUP);
+
+ error = ta->zero_cnt_tentry(tc->astate, kti, tent);
+ IPFW_UH_RUNLOCK(ch);
+
+ return (error);
+}
+
/*
* Checks if we're able to insert/update entry @tei into table
@@ -1029,6 +1089,7 @@ manage_table_ent_v1(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
ptei = tei_buf;
ptent = tent;
for (i = 0; i < ctlv->count; i++, ptent++, ptei++) {
+ ptei->mac = ptent->mac;
ptei->paddr = &ptent->k;
ptei->subtype = ptent->subtype;
ptei->masklen = ptent->masklen;
@@ -1089,6 +1150,7 @@ find_table_entry(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
struct table_algo *ta;
struct table_info *kti;
struct table_value *pval;
+ struct timeval boottime;
struct namedobj_instance *ni;
int error;
size_t sz;
@@ -1137,6 +1199,10 @@ find_table_entry(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
if (error == 0) {
pval = get_table_value(ch, tc, tent->v.kidx);
ipfw_export_table_value_v1(pval, &tent->v.value);
+ if (tent->timestamp != 0) {
+ getboottime(&boottime);
+ tent->timestamp += boottime.tv_sec;
+ }
}
IPFW_UH_RUNLOCK(ch);
@@ -1665,13 +1731,37 @@ ipfw_unref_table(struct ip_fw_chain *ch, uint16_t kidx)
*/
int
ipfw_lookup_table(struct ip_fw_chain *ch, uint16_t tbl, uint16_t plen,
- void *paddr, uint32_t *val)
+ void *paddr, uint32_t *val, uint8_t *ea, void **te)
{
struct table_info *ti;
ti = KIDX_TO_TI(ch, tbl);
- return (ti->lookup(ti, paddr, plen, val));
+ return (ti->lookup(ti, paddr, plen, val, ea, te));
+}
+
+/*
+ * Update the table entry counter.
+ */
+void
+ipfw_cnt_update_tentry(struct ip_fw_chain *ch, uint16_t tbl, uint16_t plen,
+ void *e, int pktlen)
+{
+ struct namedobj_instance *ni;
+ struct table_algo *ta;
+ struct table_config *tc;
+ struct table_info *ti;
+
+ ni = CHAIN_TO_NI(ch);
+ tc = (struct table_config *)ipfw_objhash_lookup_kidx(ni, tbl);
+ if (tc == NULL)
+ return;
+ ta = tc->ta;
+ if (ta->cnt_tentry == NULL)
+ return;
+
+ ti = KIDX_TO_TI(ch, tbl);
+ ta->cnt_tentry(tc->astate, ti, plen, e, pktlen);
}
/*
@@ -2011,6 +2101,7 @@ struct dump_args {
ta_foreach_f *f;
void *farg;
ipfw_obj_tentry tent;
+ time_t boottime;
};
static int
@@ -2169,6 +2260,7 @@ dump_table_v1(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
struct tid_info ti;
struct table_config *tc;
struct table_algo *ta;
+ struct timeval boottime;
struct dump_args da;
uint32_t sz;
@@ -2207,6 +2299,8 @@ dump_table_v1(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
da.ti = KIDX_TO_TI(ch, tc->no.kidx);
da.tc = tc;
da.sd = sd;
+ getboottime(&boottime);
+ da.boottime = boottime.tv_sec;
ta = tc->ta;
@@ -2447,6 +2541,9 @@ dump_table_tentry(void *e, void *arg)
pval = get_table_value(da->ch, da->tc, tent->v.kidx);
ipfw_export_table_value_v1(pval, &tent->v.value);
+ if (tent->timestamp != 0)
+ tent->timestamp += da->boottime;
+
return (0);
}
@@ -2806,6 +2903,15 @@ classify_flow(ipfw_insn *cmd, uint16_t *puidx, uint8_t *ptype)
return (0);
}
+static int
+classify_mac(ipfw_insn *cmd, uint16_t *puidx, uint8_t *ptype)
+{
+ *puidx = cmd->arg1;
+ *ptype = IPFW_TABLE_MAC2;
+
+ return (0);
+}
+
static void
update_arg1(ipfw_insn *cmd, uint16_t idx)
{
@@ -2957,6 +3063,16 @@ static struct opcode_obj_rewrite opcodes[] = {
.manage_sets = table_manage_sets,
},
{
+ .opcode = O_MACADDR2_LOOKUP,
+ .etlv = IPFW_TLV_TBL_NAME,
+ .classifier = classify_mac,
+ .update = update_arg1,
+ .find_byname = table_findbyname,
+ .find_bykidx = table_findbykidx,
+ .create_object = create_table_compat,
+ .manage_sets = table_manage_sets,
+ },
+ {
.opcode = O_XMIT,
.etlv = IPFW_TLV_TBL_NAME,
.classifier = classify_via,
@@ -3290,6 +3406,7 @@ static struct ipfw_sopt_handler scodes[] = {
{ IP_FW_TABLE_XSWAP, 0, HDIR_SET, swap_table },
{ IP_FW_TABLES_ALIST, 0, HDIR_GET, list_table_algo },
{ IP_FW_TABLE_XGETSIZE, 0, HDIR_GET, get_table_size },
+ { IP_FW_TABLE_XZEROCNT, 0, HDIR_SET, zero_cnt_entry },
};
static int
diff --git a/sys/netpfil/ipfw/ip_fw_table.h b/sys/netpfil/ipfw/ip_fw_table.h
index d657848..cf0309c 100644
--- a/sys/netpfil/ipfw/ip_fw_table.h
+++ b/sys/netpfil/ipfw/ip_fw_table.h
@@ -62,6 +62,7 @@ struct tentry_info {
uint8_t subtype;
uint16_t flags; /* record flags */
uint32_t value; /* value index */
+ uint64_t mac;
};
#define TEI_FLAGS_UPDATE 0x0001 /* Add or update rec if exists */
#define TEI_FLAGS_UPDATED 0x0002 /* Entry has been updated */
@@ -111,6 +112,10 @@ typedef int ta_find_tentry(void *ta_state, struct table_info *ti,
typedef void ta_dump_tinfo(void *ta_state, struct table_info *ti,
ipfw_ta_tinfo *tinfo);
typedef uint32_t ta_get_count(void *ta_state, struct table_info *ti);
+typedef void ta_cnt_tentry(void *ta_state, struct table_info *ti,
+ uint32_t keylen, void *e, int pktlen);
+typedef int ta_zero_cnt_tentry(void *ta_state, struct table_info *ti,
+ ipfw_obj_tentry *tent);
struct table_algo {
char name[16];
@@ -139,6 +144,8 @@ struct table_algo {
ta_print_config *print_config;
ta_dump_tinfo *dump_tinfo;
ta_get_count *get_count;
+ ta_cnt_tentry *cnt_tentry;
+ ta_zero_cnt_tentry *zero_cnt_tentry;
};
#define TA_FLAG_DEFAULT 0x01 /* Algo is default for given type */
#define TA_FLAG_READONLY 0x02 /* Algo does not support modifications*/
diff --git a/sys/netpfil/ipfw/ip_fw_table_algo.c b/sys/netpfil/ipfw/ip_fw_table_algo.c
index 1fb8075..67dafac 100644
--- a/sys/netpfil/ipfw/ip_fw_table_algo.c
+++ b/sys/netpfil/ipfw/ip_fw_table_algo.c
@@ -48,6 +48,7 @@ __FBSDID("$FreeBSD$");
#include <sys/rmlock.h>
#include <sys/socket.h>
#include <sys/queue.h>
+#include <net/ethernet.h>
#include <net/if.h> /* ip_fw.h requires IFNAMSIZ */
#include <net/radix.h>
#include <net/route.h>
@@ -71,7 +72,8 @@ __FBSDID("$FreeBSD$");
* Algo init:
* * struct table_algo has to be filled with:
* name: "type:algoname" format, e.g. "addr:radix". Currently
- * there are the following types: "addr", "iface", "number" and "flow".
+ * there are the following types: "addr", "iface", "mac", "number" and
+ * "flow".
* type: one of IPFW_TABLE_* types
* flags: one or more TA_FLAGS_*
* ta_buf_size: size of structure used to store add/del item state.
@@ -328,6 +330,10 @@ struct radix_addr_entry {
struct radix_node rn[2];
struct sockaddr_in addr;
uint32_t value;
+ uint64_t bcnt;
+ uint64_t mac;
+ uint64_t pcnt;
+ time_t timestamp;
uint8_t masklen;
};
@@ -342,6 +348,10 @@ struct radix_addr_xentry {
struct radix_node rn[2];
struct sa_in6 addr6;
uint32_t value;
+ uint64_t bcnt;
+ uint64_t mac;
+ uint64_t pcnt;
+ time_t timestamp;
uint8_t masklen;
};
@@ -370,7 +380,7 @@ struct ta_buf_radix
};
static int ta_lookup_radix(struct table_info *ti, void *key, uint32_t keylen,
- uint32_t *val);
+ uint32_t *val, uint8_t *ea, void **te);
static int ta_init_radix(struct ip_fw_chain *ch, void **ta_state,
struct table_info *ti, char *data, uint8_t tflags);
static int flush_radix_entry(struct radix_node *rn, void *arg);
@@ -397,10 +407,14 @@ static void ta_flush_radix_entry(struct ip_fw_chain *ch, struct tentry_info *tei
void *ta_buf);
static int ta_need_modify_radix(void *ta_state, struct table_info *ti,
uint32_t count, uint64_t *pflags);
+static void ta_cnt_radix_tentry(void *ta_state, struct table_info *ti,
+ uint32_t keylen, void *e, int pktlen);
+static int ta_zero_cnt_radix_tentry(void *ta_state, struct table_info *ti,
+ ipfw_obj_tentry *tent);
static int
ta_lookup_radix(struct table_info *ti, void *key, uint32_t keylen,
- uint32_t *val)
+ uint32_t *val, uint8_t *ea, void **te)
{
struct radix_node_head *rnh;
@@ -412,7 +426,14 @@ ta_lookup_radix(struct table_info *ti, void *key, uint32_t keylen,
rnh = (struct radix_node_head *)ti->state;
ent = (struct radix_addr_entry *)(rnh->rnh_matchaddr(&sa, &rnh->rh));
if (ent != NULL) {
+ if (ent->mac != 0 && ea == NULL)
+ return (0);
+ if (ent->mac != 0 &&
+ memcmp(ea, &ent->mac, ETHER_ADDR_LEN) != 0)
+ return (0);
*val = ent->value;
+ if (te != NULL)
+ *te = (void *)ent;
return (1);
}
} else {
@@ -423,7 +444,14 @@ ta_lookup_radix(struct table_info *ti, void *key, uint32_t keylen,
rnh = (struct radix_node_head *)ti->xstate;
xent = (struct radix_addr_xentry *)(rnh->rnh_matchaddr(&sa6, &rnh->rh));
if (xent != NULL) {
+ if (xent->mac != 0 && ea == NULL)
+ return (0);
+ if (xent->mac != 0 &&
+ memcmp(ea, &xent->mac, ETHER_ADDR_LEN) != 0)
+ return (0);
*val = xent->value;
+ if (te != NULL)
+ *te = (void *)xent;
return (1);
}
}
@@ -523,6 +551,10 @@ ta_dump_radix_tentry(void *ta_state, struct table_info *ti, void *e,
tent->masklen = n->masklen;
tent->subtype = AF_INET;
tent->v.kidx = n->value;
+ tent->mac = n->mac;
+ tent->bcnt = n->bcnt;
+ tent->pcnt = n->pcnt;
+ tent->timestamp = n->timestamp;
#ifdef INET6
} else {
xn = (struct radix_addr_xentry *)e;
@@ -531,6 +563,10 @@ ta_dump_radix_tentry(void *ta_state, struct table_info *ti, void *e,
tent->masklen = xn->masklen;
tent->subtype = AF_INET6;
tent->v.kidx = xn->value;
+ tent->mac = n->mac;
+ tent->bcnt = n->bcnt;
+ tent->pcnt = n->pcnt;
+ tent->timestamp = n->timestamp;
#endif
}
@@ -718,9 +754,11 @@ ta_add_radix(void *ta_state, struct table_info *ti, struct tentry_info *tei,
/* Save current entry value from @tei */
if (tei->subtype == AF_INET) {
rnh = ti->state;
+ ((struct radix_addr_entry *)tb->ent_ptr)->mac = tei->mac;
((struct radix_addr_entry *)tb->ent_ptr)->value = tei->value;
} else {
rnh = ti->xstate;
+ ((struct radix_addr_xentry *)tb->ent_ptr)->mac = tei->mac;
((struct radix_addr_xentry *)tb->ent_ptr)->value = tei->value;
}
@@ -868,6 +906,63 @@ ta_need_modify_radix(void *ta_state, struct table_info *ti, uint32_t count,
return (0);
}
+static void
+ta_cnt_radix_tentry(void *ta_state, struct table_info *ti, uint32_t keylen,
+ void *e, int pktlen)
+{
+
+ if (keylen == sizeof(in_addr_t)) {
+ struct radix_addr_entry *ent;
+ ent = (struct radix_addr_entry *)e;
+ ent->pcnt++;
+ ent->bcnt += pktlen;
+ ent->timestamp = time_uptime;
+ } else {
+ struct radix_addr_xentry *xent;
+ xent = (struct radix_addr_xentry *)e;
+ xent->pcnt++;
+ xent->bcnt += pktlen;
+ xent->timestamp = time_uptime;
+ }
+}
+
+static int
+ta_zero_cnt_radix_tentry(void *ta_state, struct table_info *ti,
+ ipfw_obj_tentry *tent)
+{
+ struct radix_node_head *rnh;
+
+ if (tent->subtype == AF_INET) {
+ struct radix_addr_entry *ent;
+ struct sockaddr_in sa;
+ KEY_LEN(sa) = KEY_LEN_INET;
+ sa.sin_addr.s_addr = tent->k.addr.s_addr;
+ rnh = (struct radix_node_head *)ti->state;
+ ent = (struct radix_addr_entry *)rnh->rnh_matchaddr(&sa,
+ &rnh->rh);
+ if (ent == NULL)
+ return (ENOENT);
+ ent->pcnt = 0;
+ ent->bcnt = 0;
+ ent->timestamp = 0;
+ } else {
+ struct radix_addr_xentry *xent;
+ struct sa_in6 sa6;
+ KEY_LEN(sa6) = KEY_LEN_INET6;
+ memcpy(&sa6.sin6_addr, &tent->k.addr6, sizeof(struct in6_addr));
+ rnh = (struct radix_node_head *)ti->xstate;
+ xent = (struct radix_addr_xentry *)rnh->rnh_matchaddr(&sa6,
+ &rnh->rh);
+ if (xent == NULL)
+ return (ENOENT);
+ xent->pcnt = 0;
+ xent->bcnt = 0;
+ xent->timestamp = 0;
+ }
+
+ return (0);
+}
+
struct table_algo addr_radix = {
.name = "addr:radix",
.type = IPFW_TABLE_ADDR,
@@ -885,6 +980,8 @@ struct table_algo addr_radix = {
.find_tentry = ta_find_radix_tentry,
.dump_tinfo = ta_dump_radix_tinfo,
.need_modify = ta_need_modify_radix,
+ .cnt_tentry = ta_cnt_radix_tentry,
+ .zero_cnt_tentry = ta_zero_cnt_radix_tentry,
};
@@ -927,6 +1024,9 @@ struct chashentry {
SLIST_ENTRY(chashentry) next;
uint32_t value;
uint32_t type;
+ uint64_t bcnt;
+ uint64_t pcnt;
+ time_t timestamp;
union {
uint32_t a4; /* Host format */
struct in6_addr a6; /* Network format */
@@ -951,11 +1051,11 @@ static __inline uint32_t hash_ip6_al(struct in6_addr *addr6, void *key, int mask
int hsize);
#endif
static int ta_lookup_chash_slow(struct table_info *ti, void *key, uint32_t keylen,
- uint32_t *val);
+ uint32_t *val, uint8_t *ea, void **te);
static int ta_lookup_chash_aligned(struct table_info *ti, void *key,
- uint32_t keylen, uint32_t *val);
+ uint32_t keylen, uint32_t *val, uint8_t *ea, void **te);
static int ta_lookup_chash_64(struct table_info *ti, void *key, uint32_t keylen,
- uint32_t *val);
+ uint32_t *val, uint8_t *ea, void **te);
static int chash_parse_opts(struct chash_cfg *cfg, char *data);
static void ta_print_chash_config(void *ta_state, struct table_info *ti,
char *buf, size_t bufsize);
@@ -992,7 +1092,10 @@ static int ta_fill_mod_chash(void *ta_state, struct table_info *ti, void *ta_buf
static void ta_modify_chash(void *ta_state, struct table_info *ti, void *ta_buf,
uint64_t pflags);
static void ta_flush_mod_chash(void *ta_buf);
-
+static void ta_cnt_chash_tentry(void *ta_state, struct table_info *ti,
+ uint32_t keylen, void *e, int pktlen);
+static int ta_zero_cnt_chash_tentry(void *ta_state, struct table_info *ti,
+ ipfw_obj_tentry *tent);
#ifdef INET
static __inline uint32_t
@@ -1053,7 +1156,7 @@ hash_ip6_al(struct in6_addr *addr6, void *key, int mask, int hsize)
static int
ta_lookup_chash_slow(struct table_info *ti, void *key, uint32_t keylen,
- uint32_t *val)
+ uint32_t *val, uint8_t *ea, void **te)
{
struct chashbhead *head;
struct chashentry *ent;
@@ -1072,6 +1175,8 @@ ta_lookup_chash_slow(struct table_info *ti, void *key, uint32_t keylen,
SLIST_FOREACH(ent, &head[hash], next) {
if (ent->a.a4 == a) {
*val = ent->value;
+ if (te != NULL)
+ *te = (void *)ent;
return (1);
}
}
@@ -1087,6 +1192,8 @@ ta_lookup_chash_slow(struct table_info *ti, void *key, uint32_t keylen,
SLIST_FOREACH(ent, &head[hash], next) {
if (memcmp(&ent->a.a6, &addr6, 16) == 0) {
*val = ent->value;
+ if (te != NULL)
+ *te = (void *)ent;
return (1);
}
}
@@ -1098,7 +1205,7 @@ ta_lookup_chash_slow(struct table_info *ti, void *key, uint32_t keylen,
static int
ta_lookup_chash_aligned(struct table_info *ti, void *key, uint32_t keylen,
- uint32_t *val)
+ uint32_t *val, uint8_t *ea, void **te)
{
struct chashbhead *head;
struct chashentry *ent;
@@ -1117,6 +1224,8 @@ ta_lookup_chash_aligned(struct table_info *ti, void *key, uint32_t keylen,
SLIST_FOREACH(ent, &head[hash], next) {
if (ent->a.a4 == a) {
*val = ent->value;
+ if (te != NULL)
+ *te = (void *)ent;
return (1);
}
}
@@ -1136,6 +1245,8 @@ ta_lookup_chash_aligned(struct table_info *ti, void *key, uint32_t keylen,
ptmp = (uint64_t *)&ent->a.a6;
if (paddr[0] == ptmp[0] && paddr[1] == ptmp[1]) {
*val = ent->value;
+ if (te != NULL)
+ *te = (void *)ent;
return (1);
}
}
@@ -1147,7 +1258,7 @@ ta_lookup_chash_aligned(struct table_info *ti, void *key, uint32_t keylen,
static int
ta_lookup_chash_64(struct table_info *ti, void *key, uint32_t keylen,
- uint32_t *val)
+ uint32_t *val, uint8_t *ea, void **te)
{
struct chashbhead *head;
struct chashentry *ent;
@@ -1166,6 +1277,8 @@ ta_lookup_chash_64(struct table_info *ti, void *key, uint32_t keylen,
SLIST_FOREACH(ent, &head[hash], next) {
if (ent->a.a4 == a) {
*val = ent->value;
+ if (te != NULL)
+ *te = (void *)ent;
return (1);
}
}
@@ -1183,6 +1296,8 @@ ta_lookup_chash_64(struct table_info *ti, void *key, uint32_t keylen,
paddr = (uint64_t *)&ent->a.a6;
if (a6 == *paddr) {
*val = ent->value;
+ if (te != NULL)
+ *te = (void *)ent;
return (1);
}
}
@@ -1380,12 +1495,18 @@ ta_dump_chash_tentry(void *ta_state, struct table_info *ti, void *e,
tent->masklen = cfg->mask4;
tent->subtype = AF_INET;
tent->v.kidx = ent->value;
+ tent->bcnt = ent->bcnt;
+ tent->pcnt = ent->pcnt;
+ tent->timestamp = ent->timestamp;
#ifdef INET6
} else {
memcpy(&tent->k.addr6, &ent->a.a6, sizeof(struct in6_addr));
tent->masklen = cfg->mask6;
tent->subtype = AF_INET6;
tent->v.kidx = ent->value;
+ tent->bcnt = ent->bcnt;
+ tent->pcnt = ent->pcnt;
+ tent->timestamp = ent->timestamp;
#endif
}
@@ -1861,6 +1982,82 @@ ta_flush_mod_chash(void *ta_buf)
free(mi->main_ptr6, M_IPFW);
}
+static void
+ta_cnt_chash_tentry(void *ta_state, struct table_info *ti, uint32_t keylen,
+ void *e, int pktlen)
+{
+ struct chashentry *ent;
+
+ ent = (struct chashentry *)e;
+ ent->pcnt++;
+ ent->bcnt += pktlen;
+ ent->timestamp = time_uptime;
+}
+
+static int
+ta_zero_cnt_chash_tentry(void *ta_state, struct table_info *ti,
+ ipfw_obj_tentry *tent)
+{
+ struct chash_cfg *cfg;
+ struct chashbhead *head;
+ struct chashentry ent, *tmp;
+ struct tentry_info tei;
+ int error;
+ uint32_t hash;
+ bool done;
+
+ cfg = (struct chash_cfg *)ta_state;
+
+ done = false;
+ memset(&ent, 0, sizeof(ent));
+ memset(&tei, 0, sizeof(tei));
+
+ if (tent->subtype == AF_INET) {
+ tei.paddr = &tent->k.addr;
+ tei.masklen = cfg->mask4;
+ tei.subtype = AF_INET;
+
+ if ((error = tei_to_chash_ent(&tei, &ent)) != 0)
+ return (error);
+
+ head = cfg->head4;
+ hash = hash_ent(&ent, AF_INET, cfg->mask4, cfg->size4);
+ /* Check for existence */
+ SLIST_FOREACH(tmp, &head[hash], next) {
+ if (tmp->a.a4 != ent.a.a4)
+ continue;
+ done = true;
+ break;
+ }
+ } else {
+ tei.paddr = &tent->k.addr6;
+ tei.masklen = cfg->mask6;
+ tei.subtype = AF_INET6;
+
+ if ((error = tei_to_chash_ent(&tei, &ent)) != 0)
+ return (error);
+
+ head = cfg->head6;
+ hash = hash_ent(&ent, AF_INET6, cfg->mask6, cfg->size6);
+ /* Check for existence */
+ SLIST_FOREACH(tmp, &head[hash], next) {
+ if (memcmp(&tmp->a.a6, &ent.a.a6, 16) != 0)
+ continue;
+ done = true;
+ break;
+ }
+ }
+
+ if (!done)
+ return (ENOENT);
+
+ tmp->pcnt = 0;
+ tmp->bcnt = 0;
+ tmp->timestamp = 0;
+
+ return (0);
+}
+
struct table_algo addr_hash = {
.name = "addr:hash",
.type = IPFW_TABLE_ADDR,
@@ -1882,6 +2079,8 @@ struct table_algo addr_hash = {
.fill_mod = ta_fill_mod_chash,
.modify = ta_modify_chash,
.flush_mod = ta_flush_mod_chash,
+ .cnt_tentry = ta_cnt_chash_tentry,
+ .zero_cnt_tentry = ta_zero_cnt_chash_tentry,
};
@@ -1907,6 +2106,9 @@ struct ifidx {
uint16_t kidx;
uint16_t spare;
uint32_t value;
+ uint64_t bcnt;
+ uint64_t pcnt;
+ time_t timestamp;
};
#define DEFAULT_IFIDX_SIZE 64
@@ -1939,7 +2141,7 @@ struct ta_buf_ifidx
int compare_ifidx(const void *k, const void *v);
static struct ifidx * ifidx_find(struct table_info *ti, void *key);
static int ta_lookup_ifidx(struct table_info *ti, void *key, uint32_t keylen,
- uint32_t *val);
+ uint32_t *val, uint8_t *ea, void **te);
static int ta_init_ifidx(struct ip_fw_chain *ch, void **ta_state,
struct table_info *ti, char *data, uint8_t tflags);
static void ta_change_ti_ifidx(void *ta_state, struct table_info *ti);
@@ -1975,6 +2177,10 @@ static int foreach_ifidx(struct namedobj_instance *ii, struct named_object *no,
void *arg);
static void ta_foreach_ifidx(void *ta_state, struct table_info *ti,
ta_foreach_f *f, void *arg);
+static void ta_cnt_ifidx_tentry(void *ta_state, struct table_info *ti,
+ uint32_t keylen, void *e, int pktlen);
+static int ta_zero_cnt_ifidx_tentry(void *ta_state, struct table_info *ti,
+ ipfw_obj_tentry *tent);
int
compare_ifidx(const void *k, const void *v)
@@ -2081,7 +2287,7 @@ ifidx_find(struct table_info *ti, void *key)
static int
ta_lookup_ifidx(struct table_info *ti, void *key, uint32_t keylen,
- uint32_t *val)
+ uint32_t *val, uint8_t *ea, void **te)
{
struct ifidx *ifi;
@@ -2089,6 +2295,8 @@ ta_lookup_ifidx(struct table_info *ti, void *key, uint32_t keylen,
if (ifi != NULL) {
*val = ifi->value;
+ if (te != NULL)
+ *te = ifi;
return (1);
}
@@ -2396,6 +2604,9 @@ if_notifier(struct ip_fw_chain *ch, void *cbdata, uint16_t ifindex)
ifi.kidx = ifindex;
ifi.spare = 0;
ifi.value = ife->value;
+ ifi.bcnt = 0;
+ ifi.pcnt = 0;
+ ifi.timestamp = 0;
res = badd(&ifindex, &ifi, icfg->main_ptr, icfg->used,
sizeof(struct ifidx), compare_ifidx);
KASSERT(res == 1, ("index %d already exists", ifindex));
@@ -2524,6 +2735,7 @@ ta_dump_ifidx_tentry(void *ta_state, struct table_info *ti, void *e,
ipfw_obj_tentry *tent)
{
struct ifentry *ife;
+ struct ifidx *ifi;
ife = (struct ifentry *)e;
@@ -2531,6 +2743,13 @@ ta_dump_ifidx_tentry(void *ta_state, struct table_info *ti, void *e,
memcpy(&tent->k, ife->no.name, IF_NAMESIZE);
tent->v.kidx = ife->value;
+ ifi = ifidx_find(ti, &ife->ic.iface->ifindex);
+ if (ifi != NULL) {
+ tent->bcnt = ifi->bcnt;
+ tent->pcnt = ifi->pcnt;
+ tent->timestamp = ifi->timestamp;
+ }
+
return (0);
}
@@ -2592,6 +2811,47 @@ ta_foreach_ifidx(void *ta_state, struct table_info *ti, ta_foreach_f *f,
ipfw_objhash_foreach(icfg->ii, foreach_ifidx, &wa);
}
+static void
+ta_cnt_ifidx_tentry(void *ta_state, struct table_info *ti, uint32_t keylen,
+ void *e, int pktlen)
+{
+ struct ifidx *ifi;
+
+ ifi = (struct ifidx *)e;
+ ifi->pcnt++;
+ ifi->bcnt += pktlen;
+ ifi->timestamp = time_uptime;
+}
+
+static int
+ta_zero_cnt_ifidx_tentry(void *ta_state, struct table_info *ti,
+ ipfw_obj_tentry *tent)
+{
+ struct iftable_cfg *icfg;
+ struct ifentry *ife;
+ struct ifidx *ifi;
+ char *ifname;
+
+ icfg = (struct iftable_cfg *)ta_state;
+ ifname = tent->k.iface;
+
+ if (strnlen(ifname, IF_NAMESIZE) == IF_NAMESIZE)
+ return (EINVAL);
+
+ ife = (struct ifentry *)ipfw_objhash_lookup_name(icfg->ii, 0, ifname);
+ if (ife == NULL)
+ return (ENOENT);
+
+ ifi = ifidx_find(ti, &ife->ic.iface->ifindex);
+ if (ifi == NULL)
+ return (ENOENT);
+ ifi->pcnt = 0;
+ ifi->bcnt = 0;
+ ifi->timestamp = 0;
+
+ return (0);
+}
+
struct table_algo iface_idx = {
.name = "iface:array",
.type = IPFW_TABLE_INTERFACE,
@@ -2614,6 +2874,8 @@ struct table_algo iface_idx = {
.modify = ta_modify_ifidx,
.flush_mod = ta_flush_mod_ifidx,
.change_ti = ta_change_ti_ifidx,
+ .cnt_tentry = ta_cnt_ifidx_tentry,
+ .zero_cnt_tentry = ta_zero_cnt_ifidx_tentry,
};
/*
@@ -2631,6 +2893,9 @@ struct table_algo iface_idx = {
struct numarray {
uint32_t number;
uint32_t value;
+ uint64_t bcnt;
+ uint64_t pcnt;
+ time_t timestamp;
};
struct numarray_cfg {
@@ -2647,7 +2912,7 @@ struct ta_buf_numarray
int compare_numarray(const void *k, const void *v);
static struct numarray *numarray_find(struct table_info *ti, void *key);
static int ta_lookup_numarray(struct table_info *ti, void *key,
- uint32_t keylen, uint32_t *val);
+ uint32_t keylen, uint32_t *val, uint8_t *ea, void **te);
static int ta_init_numarray(struct ip_fw_chain *ch, void **ta_state,
struct table_info *ti, char *data, uint8_t tflags);
static void ta_destroy_numarray(void *ta_state, struct table_info *ti);
@@ -2675,6 +2940,10 @@ static int ta_find_numarray_tentry(void *ta_state, struct table_info *ti,
ipfw_obj_tentry *tent);
static void ta_foreach_numarray(void *ta_state, struct table_info *ti,
ta_foreach_f *f, void *arg);
+static void ta_cnt_numarray_tentry(void *ta_state, struct table_info *ti,
+ uint32_t keylen, void *e, int pktlen);
+static int ta_zero_cnt_numarray_tentry(void *ta_state, struct table_info *ti,
+ ipfw_obj_tentry *tent);
int
compare_numarray(const void *k, const void *v)
@@ -2706,7 +2975,7 @@ numarray_find(struct table_info *ti, void *key)
static int
ta_lookup_numarray(struct table_info *ti, void *key, uint32_t keylen,
- uint32_t *val)
+ uint32_t *val, uint8_t *ea, void **te)
{
struct numarray *ri;
@@ -2714,6 +2983,8 @@ ta_lookup_numarray(struct table_info *ti, void *key, uint32_t keylen,
if (ri != NULL) {
*val = ri->value;
+ if (te != NULL)
+ *te = ri;
return (1);
}
@@ -2987,6 +3258,9 @@ ta_dump_numarray_tentry(void *ta_state, struct table_info *ti, void *e,
tent->k.key = na->number;
tent->v.kidx = na->value;
+ tent->bcnt = na->bcnt;
+ tent->pcnt = na->pcnt;
+ tent->timestamp = na->timestamp;
return (0);
}
@@ -3025,6 +3299,37 @@ ta_foreach_numarray(void *ta_state, struct table_info *ti, ta_foreach_f *f,
f(&array[i], arg);
}
+static void
+ta_cnt_numarray_tentry(void *ta_state, struct table_info *ti, uint32_t keylen,
+ void *e, int pktlen)
+{
+ struct numarray *na;
+
+ na = (struct numarray *)e;
+ na->pcnt++;
+ na->bcnt += pktlen;
+ na->timestamp = time_uptime;
+}
+
+static int
+ta_zero_cnt_numarray_tentry(void *ta_state, struct table_info *ti,
+ ipfw_obj_tentry *tent)
+{
+ struct numarray_cfg *cfg;
+ struct numarray *na;
+
+ cfg = (struct numarray_cfg *)ta_state;
+
+ na = numarray_find(ti, &tent->k.key);
+ if (na == NULL)
+ return (ENOENT);
+ na->pcnt = 0;
+ na->bcnt = 0;
+ na->timestamp = 0;
+
+ return (0);
+}
+
struct table_algo number_array = {
.name = "number:array",
.type = IPFW_TABLE_NUMBER,
@@ -3045,6 +3350,8 @@ struct table_algo number_array = {
.fill_mod = ta_fill_mod_numarray,
.modify = ta_modify_numarray,
.flush_mod = ta_flush_mod_numarray,
+ .cnt_tentry = ta_cnt_numarray_tentry,
+ .zero_cnt_tentry = ta_zero_cnt_numarray_tentry,
};
/*
@@ -3079,6 +3386,9 @@ struct fhashentry {
uint16_t dport;
uint16_t sport;
uint32_t value;
+ uint64_t bcnt;
+ uint64_t pcnt;
+ time_t timestamp;
uint32_t spare1;
};
@@ -3113,7 +3423,7 @@ static __inline uint32_t hash_flow4(struct fhashentry4 *f, int hsize);
static __inline uint32_t hash_flow6(struct fhashentry6 *f, int hsize);
static uint32_t hash_flow_ent(struct fhashentry *ent, uint32_t size);
static int ta_lookup_fhash(struct table_info *ti, void *key, uint32_t keylen,
- uint32_t *val);
+ uint32_t *val, uint8_t *ea, void **te);
static int ta_init_fhash(struct ip_fw_chain *ch, void **ta_state,
struct table_info *ti, char *data, uint8_t tflags);
static void ta_destroy_fhash(void *ta_state, struct table_info *ti);
@@ -3144,6 +3454,10 @@ static int ta_fill_mod_fhash(void *ta_state, struct table_info *ti,
static void ta_modify_fhash(void *ta_state, struct table_info *ti, void *ta_buf,
uint64_t pflags);
static void ta_flush_mod_fhash(void *ta_buf);
+static void ta_cnt_fhash_tentry(void *ta_state, struct table_info *ti,
+ uint32_t keylen, void *e, int pktlen);
+static int ta_zero_cnt_fhash_tentry(void *ta_state, struct table_info *ti,
+ ipfw_obj_tentry *tent);
static __inline int
cmp_flow_ent(struct fhashentry *a, struct fhashentry *b, size_t sz)
@@ -3199,7 +3513,7 @@ hash_flow_ent(struct fhashentry *ent, uint32_t size)
static int
ta_lookup_fhash(struct table_info *ti, void *key, uint32_t keylen,
- uint32_t *val)
+ uint32_t *val, uint8_t *ea, void **te)
{
struct fhashbhead *head;
struct fhashentry *ent;
@@ -3227,6 +3541,8 @@ ta_lookup_fhash(struct table_info *ti, void *key, uint32_t keylen,
SLIST_FOREACH(ent, &head[hash], next) {
if (cmp_flow_ent(ent, &f.e, 2 * 4) != 0) {
*val = ent->value;
+ if (te != NULL)
+ *te = ent;
return (1);
}
}
@@ -3252,6 +3568,8 @@ ta_lookup_fhash(struct table_info *ti, void *key, uint32_t keylen,
SLIST_FOREACH(ent, &head[hash], next) {
if (cmp_flow_ent(ent, &f.e, 2 * 16) != 0) {
*val = ent->value;
+ if (te != NULL)
+ *te = ent;
return (1);
}
}
@@ -3374,6 +3692,9 @@ ta_dump_fhash_tentry(void *ta_state, struct table_info *ti, void *e,
tfe->sport = htons(ent->sport);
tent->v.kidx = ent->value;
tent->subtype = ent->af;
+ tent->bcnt = ent->bcnt;
+ tent->pcnt = ent->pcnt;
+ tent->timestamp = ent->timestamp;
if (ent->af == AF_INET) {
fe4 = (struct fhashentry4 *)ent;
@@ -3751,6 +4072,65 @@ ta_flush_mod_fhash(void *ta_buf)
free(mi->main_ptr, M_IPFW);
}
+static void
+ta_cnt_fhash_tentry(void *ta_state, struct table_info *ti, uint32_t keylen,
+ void *e, int pktlen)
+{
+ struct fhashentry *ent;
+
+ ent = (struct fhashentry *)e;
+ ent->pcnt++;
+ ent->bcnt += pktlen;
+ ent->timestamp = time_uptime;
+}
+
+static int
+ta_zero_cnt_fhash_tentry(void *ta_state, struct table_info *ti,
+ ipfw_obj_tentry *tent)
+{
+ struct fhash_cfg *cfg;
+ struct fhashbhead *head;
+ struct fhashentry *ent, *tmp;
+ struct fhashentry6 fe6;
+ struct tentry_info tei;
+ int error;
+ uint32_t hash;
+ size_t sz;
+
+ cfg = (struct fhash_cfg *)ta_state;
+
+ ent = &fe6.e;
+
+ memset(&fe6, 0, sizeof(fe6));
+ memset(&tei, 0, sizeof(tei));
+
+ tei.paddr = &tent->k.flow;
+ tei.subtype = tent->subtype;
+
+ if ((error = tei_to_fhash_ent(&tei, ent)) != 0)
+ return (error);
+
+ head = cfg->head;
+ hash = hash_flow_ent(ent, cfg->size);
+
+ if (tei.subtype == AF_INET)
+ sz = 2 * sizeof(struct in_addr);
+ else
+ sz = 2 * sizeof(struct in6_addr);
+
+ /* Check for existence */
+ SLIST_FOREACH(tmp, &head[hash], next) {
+ if (cmp_flow_ent(tmp, ent, sz) != 0) {
+ ent->pcnt = 0;
+ ent->bcnt = 0;
+ ent->timestamp = 0;
+ return (0);
+ }
+ }
+
+ return (ENOENT);
+}
+
struct table_algo flow_hash = {
.name = "flow:hash",
.type = IPFW_TABLE_FLOW,
@@ -3772,6 +4152,8 @@ struct table_algo flow_hash = {
.fill_mod = ta_fill_mod_fhash,
.modify = ta_modify_fhash,
.flush_mod = ta_flush_mod_fhash,
+ .cnt_tentry = ta_cnt_fhash_tentry,
+ .zero_cnt_tentry = ta_zero_cnt_fhash_tentry,
};
/*
@@ -3786,7 +4168,7 @@ struct table_algo flow_hash = {
*/
static int ta_lookup_kfib(struct table_info *ti, void *key, uint32_t keylen,
- uint32_t *val);
+ uint32_t *val, uint8_t *ea, void **te);
static int kfib_parse_opts(int *pfib, char *data);
static void ta_print_kfib_config(void *ta_state, struct table_info *ti,
char *buf, size_t bufsize);
@@ -3808,7 +4190,7 @@ static void ta_foreach_kfib(void *ta_state, struct table_info *ti,
static int
ta_lookup_kfib(struct table_info *ti, void *key, uint32_t keylen,
- uint32_t *val)
+ uint32_t *val, uint8_t *ea, void **te)
{
#ifdef INET
struct nhop4_basic nh4;
@@ -3837,6 +4219,8 @@ ta_lookup_kfib(struct table_info *ti, void *key, uint32_t keylen,
return (0);
*val = 0;
+ if (te != NULL)
+ *te = NULL;
return (1);
}
@@ -4080,6 +4464,509 @@ struct table_algo addr_kfib = {
.print_config = ta_print_kfib_config,
};
+/*
+ * mac:hash cmds
+ *
+ * ti->data:
+ * [unused][log2hsize]
+ * [ 24][ 8]
+ *
+ */
+
+struct mhashentry;
+
+SLIST_HEAD(mhashbhead, mhashentry);
+
+struct mhash_cfg {
+ struct mhashbhead *head;
+ size_t size;
+ size_t items;
+};
+
+struct macdata {
+ u_char addr[12]; /* dst[6] + src[6] */
+ u_char mask[12]; /* dst[6] + src[6] */
+ uint32_t value;
+ uint64_t bcnt;
+ uint64_t pcnt;
+ time_t timestamp;
+};
+
+struct mhashentry {
+ SLIST_ENTRY(mhashentry) next;
+ struct macdata *mac;
+};
+
+struct ta_buf_mhash {
+ void *ent_ptr;
+ struct macdata mac;
+};
+
+static __inline uint32_t
+hash_mac2(u_char *mac, int hsize)
+{
+ uint32_t i;
+
+ i = ((mac[2] << 16) | (mac[1] << 8) | (mac[0] << 0)) ^
+ ((mac[5] << 16) | (mac[4] << 8) | (mac[3] << 0)) ^
+ ((mac[8] << 16) | (mac[7] << 8) | (mac[6] << 0)) ^
+ ((mac[11] << 16) | (mac[10] << 8) | (mac[9] << 0));
+
+ return (i % (hsize - 1));
+}
+
+static void
+ta_print_mhash_config(void *ta_state, struct table_info *ti, char *buf,
+ size_t bufsize)
+{
+ snprintf(buf, bufsize, "%s", "mac:hash");
+}
+
+static __inline int
+ta_lookup_find_mhash(struct mhashbhead *head, uint32_t hash2,
+ struct macdata *mac, uint32_t *val, uint8_t *ea, void **te)
+{
+ struct mhashentry *ent;
+
+ SLIST_FOREACH(ent, &head[hash2], next) {
+ if (memcmp(&ent->mac->addr, mac->addr, sizeof(mac->addr)) != 0)
+ continue;
+ *val = ent->mac->value;
+ if (te != NULL)
+ *te = (void *)ent;
+ return (1);
+ }
+
+ return (0);
+}
+
+static int
+ta_lookup_mhash(struct table_info *ti, void *key, uint32_t keylen,
+ uint32_t *val, uint8_t *ea, void **te)
+{
+ struct macdata mac;
+ struct mhashbhead *head;
+ uint32_t hash2, hsize;
+
+ /* any any always match. */
+ if (ti->xstate != NULL) {
+ *te = ti->xstate;
+ return (1);
+ }
+
+ /*
+ * Look three times for a MAC is still faster than looking at whole
+ * table (128 entries by default).
+ */
+ head = (struct mhashbhead *)ti->state;
+ hsize = 1 << (ti->data & 0xFF);
+ hash2 = hash_mac2(key, hsize);
+ if (ta_lookup_find_mhash(head, hash2,
+ (struct macdata *)key, val, ea, te) == 1)
+ return (1);
+
+ /* src any */
+ memcpy(mac.addr, key, 6);
+ memset(mac.addr + 6, 0, 6);
+ hash2 = hash_mac2(mac.addr, hsize);
+ if (ta_lookup_find_mhash(head, hash2, &mac, val, ea, te) == 1)
+ return (1);
+
+ /* dst any */
+ memset(mac.addr, 0, 6);
+ memcpy(mac.addr + 6, (uint8_t *)key + 6, 6);
+ hash2 = hash_mac2(mac.addr, hsize);
+ if (ta_lookup_find_mhash(head, hash2, &mac, val, ea, te) == 1)
+ return (1);
+
+ return (0);
+}
+
+static int
+ta_init_mhash(struct ip_fw_chain *ch, void **ta_state, struct table_info *ti,
+ char *data, uint8_t tflags)
+{
+ int i;
+ struct mhash_cfg *cfg;
+
+ cfg = malloc(sizeof(struct mhash_cfg), M_IPFW, M_WAITOK | M_ZERO);
+
+ cfg->size = 128;
+ cfg->head = malloc(sizeof(struct mhashbhead) * cfg->size, M_IPFW,
+ M_WAITOK | M_ZERO);
+ for (i = 0; i < cfg->size; i++)
+ SLIST_INIT(&cfg->head[i]);
+ *ta_state = cfg;
+ ti->xstate = NULL;
+ ti->state = cfg->head;
+ ti->data = ta_log2(cfg->size);
+ ti->lookup = ta_lookup_mhash;
+
+ return (0);
+}
+
+static void
+ta_destroy_mhash(void *ta_state, struct table_info *ti)
+{
+ int i;
+ struct mhash_cfg *cfg;
+ struct mhashentry *ent, *ent_next;
+
+ cfg = (struct mhash_cfg *)ta_state;
+
+ for (i = 0; i < cfg->size; i++)
+ SLIST_FOREACH_SAFE(ent, &cfg->head[i], next, ent_next) {
+ free(ent->mac, M_IPFW_TBL);
+ free(ent, M_IPFW_TBL);
+ }
+
+ free(cfg->head, M_IPFW);
+
+ free(cfg, M_IPFW);
+}
+
+static void
+ta_foreach_mhash(void *ta_state, struct table_info *ti, ta_foreach_f *f,
+ void *arg)
+{
+ struct mhash_cfg *cfg;
+ struct mhashentry *ent, *ent_next;
+ int i;
+
+ cfg = (struct mhash_cfg *)ta_state;
+
+ if (ti->xstate != NULL)
+ f(ti->xstate, arg);
+ for (i = 0; i < cfg->size; i++)
+ SLIST_FOREACH_SAFE(ent, &cfg->head[i], next, ent_next)
+ f(ent, arg);
+}
+
+static int
+ta_dump_mhash_tentry(void *ta_state, struct table_info *ti, void *e,
+ ipfw_obj_tentry *tent)
+{
+ struct macdata *mac;
+ struct mhash_cfg *cfg;
+
+ cfg = (struct mhash_cfg *)ta_state;
+ mac = ((struct mhashentry *)e)->mac;
+
+ memcpy(&tent->k.mac, mac->addr, sizeof(mac->addr) + sizeof(mac->mask));
+ tent->masklen = ETHER_ADDR_LEN * 8;
+ tent->subtype = AF_LINK;
+ tent->v.kidx = mac->value;
+ tent->bcnt = mac->bcnt;
+ tent->pcnt = mac->pcnt;
+ tent->timestamp = mac->timestamp;
+
+ return (0);
+}
+
+static int
+ta_find_mhash_tentry(void *ta_state, struct table_info *ti,
+ ipfw_obj_tentry *tent)
+{
+ struct macdata mac;
+ struct mhash_cfg *cfg;
+ struct mhashentry *ent;
+ struct tentry_info tei;
+ uint32_t hash2;
+ u_char any[12];
+
+ cfg = (struct mhash_cfg *)ta_state;
+
+ memset(&mac, 0, sizeof(mac));
+ memset(&tei, 0, sizeof(tei));
+
+ tei.paddr = &tent->k.mac;
+ tei.subtype = AF_LINK;
+
+ memcpy(mac.addr, tei.paddr, sizeof(mac.addr) + sizeof(mac.mask));
+
+ /* any any */
+ memset(any, 0, sizeof(any));
+ if (memcmp(mac.addr, any, sizeof(mac.addr)) == 0 &&
+ ti->xstate != NULL) {
+ ta_dump_mhash_tentry(ta_state, ti, ti->xstate, tent);
+ return (0);
+ }
+
+ /* Check for existence */
+ hash2 = hash_mac2(mac.addr, cfg->size);
+ SLIST_FOREACH(ent, &cfg->head[hash2], next) {
+ if (memcmp(&ent->mac->addr, &mac.addr, sizeof(mac.addr)) != 0)
+ continue;
+ ta_dump_mhash_tentry(ta_state, ti, ent, tent);
+ return (0);
+ }
+
+ return (ENOENT);
+}
+
+static void
+ta_dump_mhash_tinfo(void *ta_state, struct table_info *ti, ipfw_ta_tinfo *tinfo)
+{
+ struct mhash_cfg *cfg;
+
+ cfg = (struct mhash_cfg *)ta_state;
+
+ tinfo->taclass4 = IPFW_TACLASS_HASH;
+ tinfo->size4 = cfg->size;
+ tinfo->count4 = cfg->items;
+ tinfo->itemsize4 = sizeof(struct mhashentry) + sizeof(struct macdata) -
+ sizeof(void *);
+}
+
+static int
+ta_prepare_add_mhash(struct ip_fw_chain *ch, struct tentry_info *tei,
+ void *ta_buf)
+{
+ struct ta_buf_mhash *tb;
+ struct mhashentry *ent;
+ struct macdata *mac;
+
+ if (tei->subtype != AF_LINK)
+ return (EINVAL);
+
+ tb = (struct ta_buf_mhash *)ta_buf;
+ ent = malloc(sizeof(*ent), M_IPFW_TBL, M_WAITOK | M_ZERO);
+ mac = malloc(sizeof(*mac), M_IPFW_TBL, M_WAITOK | M_ZERO);
+ memcpy(mac->addr, tei->paddr, sizeof(mac->addr) + sizeof(mac->mask));
+
+ ent->mac = mac;
+ tb->ent_ptr = ent;
+
+ return (0);
+}
+
+static int
+ta_add_mhash(void *ta_state, struct table_info *ti, struct tentry_info *tei,
+ void *ta_buf, uint32_t *pnum)
+{
+ int exists;
+ struct macdata *mac;
+ struct mhash_cfg *cfg;
+ struct mhashentry *ent, *tmp;
+ struct ta_buf_mhash *tb;
+ uint32_t hash2, value;
+ u_char any[12];
+
+ cfg = (struct mhash_cfg *)ta_state;
+ tb = (struct ta_buf_mhash *)ta_buf;
+ ent = (struct mhashentry *)tb->ent_ptr;
+ mac = ent->mac;
+ exists = 0;
+
+ /* Read current value from @tei */
+ mac->value = tei->value;
+
+ if (tei->subtype != AF_LINK)
+ return (EINVAL);
+
+ /* any any */
+ memset(any, 0, sizeof(any));
+ if (memcmp(mac->addr, any, sizeof(mac->addr)) == 0) {
+ if (ti->xstate != NULL) {
+ if ((tei->flags & TEI_FLAGS_UPDATE) == 0)
+ return (EEXIST);
+ /* Record already exists. Update value if we're asked to */
+ value = ((struct mhashentry *)ti->xstate)->mac->value;
+ ((struct mhashentry *)ti->xstate)->mac->value = tei->value;
+ tei->value = value;
+ /* Indicate that update has happened instead of addition */
+ tei->flags |= TEI_FLAGS_UPDATED;
+ *pnum = 0;
+ } else {
+ if ((tei->flags & TEI_FLAGS_DONTADD) != 0)
+ return (EFBIG);
+ ti->xstate = ent;
+ tb->ent_ptr = NULL;
+ *pnum = 1;
+
+ /* Update counters */
+ cfg->items++;
+ }
+ return (0);
+ }
+
+ /* Check for existence */
+ hash2 = hash_mac2(mac->addr, cfg->size);
+ SLIST_FOREACH(tmp, &cfg->head[hash2], next) {
+ if (memcmp(&tmp->mac->addr, &mac->addr,
+ sizeof(mac->addr)) == 0) {
+ exists = 1;
+ break;
+ }
+ }
+
+ if (exists == 1) {
+ if ((tei->flags & TEI_FLAGS_UPDATE) == 0)
+ return (EEXIST);
+ /* Record already exists. Update value if we're asked to */
+ value = tmp->mac->value;
+ tmp->mac->value = tei->value;
+ tei->value = value;
+ /* Indicate that update has happened instead of addition */
+ tei->flags |= TEI_FLAGS_UPDATED;
+ *pnum = 0;
+ } else {
+ if ((tei->flags & TEI_FLAGS_DONTADD) != 0)
+ return (EFBIG);
+ SLIST_INSERT_HEAD(&cfg->head[hash2], ent, next);
+ tb->ent_ptr = NULL;
+ *pnum = 1;
+
+ /* Update counters */
+ cfg->items++;
+ }
+
+ return (0);
+}
+
+static int
+ta_prepare_del_mhash(struct ip_fw_chain *ch, struct tentry_info *tei,
+ void *ta_buf)
+{
+ struct ta_buf_mhash *tb;
+
+ tb = (struct ta_buf_mhash *)ta_buf;
+
+ memcpy(tb->mac.addr, tei->paddr, sizeof(tb->mac.addr));
+
+ return (0);
+}
+
+static int
+ta_del_mhash(void *ta_state, struct table_info *ti, struct tentry_info *tei,
+ void *ta_buf, uint32_t *pnum)
+{
+ struct macdata *mac;
+ struct mhash_cfg *cfg;
+ struct mhashentry *tmp, *tmp_next;
+ struct ta_buf_mhash *tb;
+ uint32_t hash2;
+ u_char any[12];
+
+ cfg = (struct mhash_cfg *)ta_state;
+ tb = (struct ta_buf_mhash *)ta_buf;
+ mac = &tb->mac;
+
+ if (tei->masklen != ETHER_ADDR_LEN * 8)
+ return (EINVAL);
+
+ /* any any */
+ memset(any, 0, sizeof(any));
+ if (memcmp(mac->addr, any, sizeof(mac->addr)) == 0 &&
+ ti->xstate != NULL) {
+ cfg->items--;
+ tb->ent_ptr = ti->xstate;
+ tei->value = ((struct mhashentry *)ti->xstate)->mac->value;
+ ti->xstate = NULL;
+ *pnum = 1;
+ return (0);
+ }
+
+ hash2 = hash_mac2(mac->addr, cfg->size);
+ SLIST_FOREACH_SAFE(tmp, &cfg->head[hash2], next, tmp_next) {
+ if (memcmp(&tmp->mac->addr, &mac->addr, sizeof(mac->addr)) != 0)
+ continue;
+
+ SLIST_REMOVE(&cfg->head[hash2], tmp, mhashentry, next);
+ cfg->items--;
+ tb->ent_ptr = tmp;
+ tei->value = tmp->mac->value;
+ *pnum = 1;
+ return (0);
+ }
+
+ return (ENOENT);
+}
+
+static void
+ta_flush_mhash_entry(struct ip_fw_chain *ch, struct tentry_info *tei,
+ void *ta_buf)
+{
+ struct mhashentry *ent;
+ struct ta_buf_mhash *tb;
+
+ tb = (struct ta_buf_mhash *)ta_buf;
+
+ if (tb->ent_ptr != NULL) {
+ ent = (struct mhashentry *)tb->ent_ptr;
+ free(ent->mac, M_IPFW_TBL);
+ free(tb->ent_ptr, M_IPFW_TBL);
+ tb->ent_ptr = NULL;
+ }
+}
+
+static void
+ta_cnt_mhash_tentry(void *ta_state, struct table_info *ti, uint32_t keylen,
+ void *e, int pktlen)
+{
+ struct mhashentry *ent;
+
+ ent = (struct mhashentry *)e;
+ ent->mac->pcnt++;
+ ent->mac->bcnt += pktlen;
+ ent->mac->timestamp = time_uptime;
+}
+
+static int
+ta_zero_cnt_mhash_tentry(void *ta_state, struct table_info *ti,
+ ipfw_obj_tentry *tent)
+{
+ struct macdata mac;
+ struct mhash_cfg *cfg;
+ struct mhashentry *ent;
+ struct tentry_info tei;
+ uint32_t hash2;
+
+ cfg = (struct mhash_cfg *)ta_state;
+
+ memset(&mac, 0, sizeof(mac));
+ memset(&tei, 0, sizeof(tei));
+
+ tei.paddr = &tent->k.mac;
+ tei.subtype = AF_LINK;
+
+ memcpy(mac.addr, tei.paddr, sizeof(mac.addr) + sizeof(mac.mask));
+
+ /* Check for existence */
+ hash2 = hash_mac2(mac.addr, cfg->size);
+ SLIST_FOREACH(ent, &cfg->head[hash2], next) {
+ if (memcmp(&ent->mac->addr, &mac.addr, sizeof(mac.addr)) != 0)
+ continue;
+ ent->mac->pcnt = 0;
+ ent->mac->bcnt = 0;
+ ent->mac->timestamp = 0;
+ return (0);
+ }
+
+ return (ENOENT);
+}
+
+struct table_algo mac_hash = {
+ .name = "mac:hash",
+ .type = IPFW_TABLE_MAC2,
+ .flags = TA_FLAG_DEFAULT,
+ .ta_buf_size = sizeof(struct ta_buf_mhash),
+ .print_config = ta_print_mhash_config,
+ .init = ta_init_mhash,
+ .destroy = ta_destroy_mhash,
+ .prepare_add = ta_prepare_add_mhash,
+ .prepare_del = ta_prepare_del_mhash,
+ .add = ta_add_mhash,
+ .del = ta_del_mhash,
+ .flush_entry = ta_flush_mhash_entry,
+ .foreach = ta_foreach_mhash,
+ .dump_tentry = ta_dump_mhash_tentry,
+ .find_tentry = ta_find_mhash_tentry,
+ .dump_tinfo = ta_dump_mhash_tinfo,
+ .cnt_tentry = ta_cnt_mhash_tentry,
+ .zero_cnt_tentry = ta_zero_cnt_mhash_tentry,
+};
+
void
ipfw_table_algo_init(struct ip_fw_chain *ch)
{
@@ -4092,6 +4979,7 @@ ipfw_table_algo_init(struct ip_fw_chain *ch)
ipfw_add_table_algo(ch, &addr_radix, sz, &addr_radix.idx);
ipfw_add_table_algo(ch, &addr_hash, sz, &addr_hash.idx);
ipfw_add_table_algo(ch, &iface_idx, sz, &iface_idx.idx);
+ ipfw_add_table_algo(ch, &mac_hash, sz, &mac_hash.idx);
ipfw_add_table_algo(ch, &number_array, sz, &number_array.idx);
ipfw_add_table_algo(ch, &flow_hash, sz, &flow_hash.idx);
ipfw_add_table_algo(ch, &addr_kfib, sz, &addr_kfib.idx);
@@ -4104,9 +4992,8 @@ ipfw_table_algo_destroy(struct ip_fw_chain *ch)
ipfw_del_table_algo(ch, addr_radix.idx);
ipfw_del_table_algo(ch, addr_hash.idx);
ipfw_del_table_algo(ch, iface_idx.idx);
+ ipfw_del_table_algo(ch, mac_hash.idx);
ipfw_del_table_algo(ch, number_array.idx);
ipfw_del_table_algo(ch, flow_hash.idx);
ipfw_del_table_algo(ch, addr_kfib.idx);
}
-
-
diff --git a/sys/netpfil/ipfw/nat64/nat64stl.c b/sys/netpfil/ipfw/nat64/nat64stl.c
index 87e64ec..bd4b9aa 100644
--- a/sys/netpfil/ipfw/nat64/nat64stl.c
+++ b/sys/netpfil/ipfw/nat64/nat64stl.c
@@ -185,8 +185,8 @@ nat64stl_handle_icmp6(struct ip_fw_chain *chain, struct nat64stl_cfg *cfg,
* IPv4 mapped address.
*/
ip6i = mtodo(m, hlen);
- if (ipfw_lookup_table(chain, cfg->map64,
- sizeof(struct in6_addr), &ip6i->ip6_dst, &tablearg) == 0) {
+ if (ipfw_lookup_table(chain, cfg->map64, sizeof(struct in6_addr),
+ &ip6i->ip6_dst, &tablearg, NULL, NULL) == 0) {
m_freem(m);
return (NAT64RETURN);
}
@@ -223,11 +223,12 @@ ipfw_nat64stl(struct ip_fw_chain *chain, struct ip_fw_args *args,
case 4:
dst4 = htonl(args->f_id.dst_ip);
ret = ipfw_lookup_table(chain, cfg->map46, sizeof(in_addr_t),
- &dst4, &tablearg);
+ &dst4, &tablearg, NULL, NULL);
break;
case 6:
ret = ipfw_lookup_table(chain, cfg->map64,
- sizeof(struct in6_addr), &args->f_id.src_ip6, &tablearg);
+ sizeof(struct in6_addr), &args->f_id.src_ip6,
+ &tablearg, NULL, NULL);
break;
default:
return (0);
diff --git a/sys/netpfil/pf/if_pflog.c b/sys/netpfil/pf/if_pflog.c
index cbf596b..3e5d7c4 100644
--- a/sys/netpfil/pf/if_pflog.c
+++ b/sys/netpfil/pf/if_pflog.c
@@ -222,13 +222,16 @@ pflog_packet(struct pfi_kif *kif, struct mbuf *m, sa_family_t af, u_int8_t dir,
if (am == NULL) {
hdr.rulenr = htonl(rm->nr);
hdr.subrulenr = -1;
+ hdr.ridentifier = rm->cuid;
} else {
hdr.rulenr = htonl(am->nr);
hdr.subrulenr = htonl(rm->nr);
+ hdr.ridentifier = rm->cuid;
if (ruleset != NULL && ruleset->anchor != NULL)
strlcpy(hdr.ruleset, ruleset->anchor->name,
sizeof(hdr.ruleset));
}
+#ifdef PF_USER_INFO
/*
* XXXGL: we avoid pf_socket_lookup() when we are holding
* state lock, since this leads to unsafe LOR.
@@ -243,6 +246,7 @@ pflog_packet(struct pfi_kif *kif, struct mbuf *m, sa_family_t af, u_int8_t dir,
hdr.pid = NO_PID;
hdr.rule_uid = rm->cuid;
hdr.rule_pid = rm->cpid;
+#endif
hdr.dir = dir;
#ifdef INET
diff --git a/sys/netpfil/pf/if_pfsync.c b/sys/netpfil/pf/if_pfsync.c
index 00d6194..1d95074 100644
--- a/sys/netpfil/pf/if_pfsync.c
+++ b/sys/netpfil/pf/if_pfsync.c
@@ -188,9 +188,6 @@ struct pfsync_softc {
struct ip_moptions sc_imo;
struct in_addr sc_sync_peer;
uint32_t sc_flags;
-#define PFSYNCF_OK 0x00000001
-#define PFSYNCF_DEFER 0x00000002
-#define PFSYNCF_PUSH 0x00000004
uint8_t sc_maxupdates;
struct ip sc_template;
struct callout sc_tmo;
@@ -368,7 +365,7 @@ pfsync_clone_destroy(struct ifnet *ifp)
callout_drain(&sc->sc_bulkfail_tmo);
callout_drain(&sc->sc_bulk_tmo);
- if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p)
+ if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p && V_pfsync_carp_adj > 0)
(*carp_demote_adj_p)(-V_pfsync_carp_adj, "pfsync destroy");
bpfdetach(ifp);
if_detach(ifp);
@@ -1156,7 +1153,7 @@ pfsync_in_bus(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
sc->sc_ureq_sent = 0;
sc->sc_bulk_tries = 0;
callout_stop(&sc->sc_bulkfail_tmo);
- if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p)
+ if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p && V_pfsync_carp_adj > 0)
(*carp_demote_adj_p)(-V_pfsync_carp_adj,
"pfsync bulk done");
sc->sc_flags |= PFSYNCF_OK;
@@ -1314,8 +1311,7 @@ pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
}
pfsyncr.pfsyncr_syncpeer = sc->sc_sync_peer;
pfsyncr.pfsyncr_maxupdates = sc->sc_maxupdates;
- pfsyncr.pfsyncr_defer = (PFSYNCF_DEFER ==
- (sc->sc_flags & PFSYNCF_DEFER));
+ pfsyncr.pfsyncr_defer = sc->sc_flags;
PFSYNC_UNLOCK(sc);
return (copyout(&pfsyncr, ifr_data_get_ptr(ifr),
sizeof(pfsyncr)));
@@ -1409,7 +1405,7 @@ pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
ip->ip_dst.s_addr = sc->sc_sync_peer.s_addr;
/* Request a full state table update. */
- if ((sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p)
+ if ((sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p && V_pfsync_carp_adj > 0)
(*carp_demote_adj_p)(V_pfsync_carp_adj,
"pfsync bulk start");
sc->sc_flags &= ~PFSYNCF_OK;
@@ -1639,6 +1635,7 @@ pfsync_sendout(int schedswi)
if_inc_counter(sc->sc_ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len);
sc->sc_len = PFSYNC_MINPKT;
+ /* XXX: Sould not drop voluntarily update packets! */
if (!_IF_QFULL(&sc->sc_ifp->if_snd))
_IF_ENQUEUE(&sc->sc_ifp->if_snd, m);
else {
@@ -2164,7 +2161,7 @@ pfsync_bulk_fail(void *arg)
sc->sc_ureq_sent = 0;
sc->sc_bulk_tries = 0;
PFSYNC_LOCK(sc);
- if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p)
+ if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p && V_pfsync_carp_adj > 0)
(*carp_demote_adj_p)(-V_pfsync_carp_adj,
"pfsync bulk fail");
sc->sc_flags |= PFSYNCF_OK;
diff --git a/sys/netpfil/pf/pf.c b/sys/netpfil/pf/pf.c
index 096b9a4..3733fa9 100644
--- a/sys/netpfil/pf/pf.c
+++ b/sys/netpfil/pf/pf.c
@@ -90,6 +90,8 @@ __FBSDID("$FreeBSD$");
#include <netinet/udp_var.h>
#include <netpfil/ipfw/ip_fw_private.h> /* XXX: only for DIR_IN/DIR_OUT */
+#include <netinet/ip_fw.h>
+#include <netinet/ip_dummynet.h>
#ifdef INET6
#include <netinet/ip6.h>
@@ -232,6 +234,8 @@ static int pf_state_key_attach(struct pf_state_key *,
static void pf_state_key_detach(struct pf_state *, int);
static int pf_state_key_ctor(void *, int, void *, int);
static u_int32_t pf_tcp_iss(struct pf_pdesc *);
+void pf_rule_to_actions(struct pf_rule *,
+ struct pf_rule_actions *);
static int pf_test_rule(struct pf_rule **, struct pf_state **,
int, struct pfi_kif *, struct mbuf *, int,
struct pf_pdesc *, struct pf_rule **,
@@ -264,7 +268,8 @@ static int pf_test_state_icmp(struct pf_state **, int,
struct pfi_kif *, struct mbuf *, int,
void *, struct pf_pdesc *, u_short *);
static int pf_test_state_other(struct pf_state **, int,
- struct pfi_kif *, struct mbuf *, struct pf_pdesc *);
+ struct pfi_kif *, struct mbuf *, int,
+ struct pf_pdesc *);
static u_int8_t pf_get_wscale(struct mbuf *, int, u_int16_t,
sa_family_t);
static u_int16_t pf_get_mss(struct mbuf *, int, u_int16_t,
@@ -287,6 +292,8 @@ static u_int pf_purge_expired_states(u_int, int);
static void pf_purge_unlinked_rules(void);
static int pf_mtag_uminit(void *, int, int);
static void pf_mtag_free(struct m_tag *);
+static void pf_packet_rework_nat(struct mbuf *, struct pf_pdesc *,
+ int, struct pf_state_key *);
#ifdef INET
static void pf_route(struct mbuf **, struct pf_rule *, int,
struct ifnet *, struct pf_state *,
@@ -303,31 +310,53 @@ static void pf_route6(struct mbuf **, struct pf_rule *, int,
int in4_cksum(struct mbuf *m, u_int8_t nxt, int off, int len);
extern int pf_end_threads;
+extern struct proc *pf_purge_proc;
VNET_DEFINE(struct pf_limit, pf_limits[PF_LIMIT_MAX]);
-#define PACKET_LOOPED(pd) ((pd)->pf_mtag && \
- (pd)->pf_mtag->flags & PF_PACKET_LOOPED)
+#define PACKET_UNDO_NAT(_m, _pd, _off, _s, _dir) \
+ do { \
+ struct pf_state_key *nk; \
+ if ((_dir) == PF_OUT) \
+ nk = (_s)->key[PF_SK_STACK]; \
+ else \
+ nk = (_s)->key[PF_SK_WIRE]; \
+ pf_packet_rework_nat(_m, _pd, _off, nk); \
+ } while (0)
+#define PACKET_REDO_NAT(_m, _pd, _off, _s, _dir) \
+ do { \
+ struct pf_state_key *nk; \
+ if ((_dir) == PF_OUT) \
+ nk = (_s)->key[PF_SK_WIRE]; \
+ else \
+ nk = (_s)->key[PF_SK_STACK]; \
+ pf_packet_rework_nat(_m, _pd, _off, nk); \
+ } while (0)
+
+
+#define PACKET_LOOPED(pd) (((pd)->pf_mtag && \
+ (pd)->pf_mtag->flags & PF_PACKET_LOOPED) ? 1 : 0)
#define STATE_LOOKUP(i, k, d, s, pd) \
do { \
(s) = pf_find_state((i), (k), (d)); \
if ((s) == NULL) \
return (PF_DROP); \
- if (PACKET_LOOPED(pd)) \
+ if (PACKET_LOOPED(pd)) { \
+ if ((s)->key[PF_SK_WIRE] != (s)->key[PF_SK_STACK]) { \
+ PACKET_REDO_NAT(m, pd, off, s, direction); \
+ } \
return (PF_PASS); \
+ } \
if ((d) == PF_OUT && \
(((s)->rule.ptr->rt == PF_ROUTETO && \
- (s)->rule.ptr->direction == PF_OUT) || \
- ((s)->rule.ptr->rt == PF_REPLYTO && \
- (s)->rule.ptr->direction == PF_IN)) && \
+ (s)->rule.ptr->direction == PF_OUT)) && \
(s)->rt_kif != NULL && \
(s)->rt_kif != (i)) \
return (PF_PASS); \
} while (0)
-#define BOUND_IFACE(r, k) \
- ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : V_pfi_all
+#define BOUND_IFACE(r, k) k
#define STATE_INC_COUNTERS(s) \
do { \
@@ -413,6 +442,72 @@ pf_addr_cmp(struct pf_addr *a, struct pf_addr *b, sa_family_t af)
return (0);
}
+static void
+pf_packet_rework_nat(struct mbuf *m, struct pf_pdesc *pd, int off,
+ struct pf_state_key *nk)
+{
+
+ switch (pd->proto) {
+ case IPPROTO_TCP: {
+ struct tcphdr *th = pd->hdr.tcp;
+
+ if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af))
+ pf_change_ap(m, pd->src, &th->th_sport, pd->ip_sum,
+ &th->th_sum, &nk->addr[pd->sidx],
+ nk->port[pd->sidx], 0, pd->af);
+ if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af))
+ pf_change_ap(m, pd->dst, &th->th_dport, pd->ip_sum,
+ &th->th_sum, &nk->addr[pd->didx],
+ nk->port[pd->didx], 0, pd->af);
+ m_copyback(m, off, sizeof(*th), (caddr_t)th);
+ }
+ break;
+ case IPPROTO_UDP: {
+ struct udphdr *uh = pd->hdr.udp;
+
+ if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af))
+ pf_change_ap(m, pd->src, &uh->uh_sport, pd->ip_sum,
+ &uh->uh_sum, &nk->addr[pd->sidx],
+ nk->port[pd->sidx], 1, pd->af);
+ if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af))
+ pf_change_ap(m, pd->dst, &uh->uh_dport, pd->ip_sum,
+ &uh->uh_sum, &nk->addr[pd->didx],
+ nk->port[pd->didx], 1, pd->af);
+ m_copyback(m, off, sizeof(*uh), (caddr_t)uh);
+ }
+ break;
+ /* case IPPROTO_ICMP: */
+ /* XXX: If we want to do this for icmp is probably wrong!?! */
+ /* break; */
+ default:
+ if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af)) {
+ switch (pd->af) {
+ case AF_INET:
+ pf_change_a(&pd->src->v4.s_addr,
+ pd->ip_sum, nk->addr[pd->sidx].v4.s_addr,
+ 0);
+ break;
+ case AF_INET6:
+ PF_ACPY(pd->src, &nk->addr[pd->sidx], pd->af);
+ break;
+ }
+ }
+ if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af)) {
+ switch (pd->af) {
+ case AF_INET:
+ pf_change_a(&pd->dst->v4.s_addr,
+ pd->ip_sum, nk->addr[pd->didx].v4.s_addr,
+ 0);
+ break;
+ case AF_INET6:
+ PF_ACPY(pd->dst, &nk->addr[pd->didx], pd->af);
+ break;
+ }
+ }
+ break;
+ }
+}
+
static __inline uint32_t
pf_hashkey(struct pf_state_key *sk)
{
@@ -1319,7 +1414,8 @@ pf_find_state(struct pfi_kif *kif, struct pf_state_key_cmp *key, u_int dir)
/* List is sorted, if-bound states before floating ones. */
TAILQ_FOREACH(s, &sk->states[idx], key_list[idx])
- if (s->kif == V_pfi_all || s->kif == kif) {
+ /* if (s->kif == V_pfi_all || s->kif == kif) { */
+ {
PF_STATE_LOCK(s);
PF_HASHROW_UNLOCK(kh);
if (s->timeout >= PFTM_MAX) {
@@ -1454,48 +1550,44 @@ pf_purge_thread(void *unused __unused)
VNET_ITERATOR_DECL(vnet_iter);
u_int idx = 0;
- for (;;) {
- PF_RULES_RLOCK();
- rw_sleep(pf_purge_thread, &pf_rules_lock, 0, "pftm", hz / 10);
- PF_RULES_RUNLOCK();
+ sx_xlock(&pf_end_lock);
+ while (pf_end_threads == 0) {
+ sx_sleep(pf_purge_thread, &pf_end_lock, 0, "pftm", hz / 10);
VNET_LIST_RLOCK();
VNET_FOREACH(vnet_iter) {
CURVNET_SET(vnet_iter);
- if (pf_end_threads) {
- pf_end_threads++;
- wakeup(pf_purge_thread);
- kproc_exit(0);
- }
-
- /* Wait while V_pf_default_rule.timeout is initialized. */
- if (V_pf_vnet_active == 0) {
- CURVNET_RESTORE();
- continue;
- }
+ /* Wait while V_pf_default_rule.timeout is initialized. */
+ if (V_pf_vnet_active == 0) {
+ CURVNET_RESTORE();
+ continue;
+ }
- /* Process 1/interval fraction of the state table every run. */
- idx = pf_purge_expired_states(idx, pf_hashmask /
+ /* Process 1/interval fraction of the state table every run. */
+ idx = pf_purge_expired_states(idx, pf_hashmask /
(V_pf_default_rule.timeout[PFTM_INTERVAL] * 10));
- /* Purge other expired types every PFTM_INTERVAL seconds. */
- if (idx == 0) {
- /*
- * Order is important:
- * - states and src nodes reference rules
- * - states and rules reference kifs
- */
- pf_purge_expired_fragments();
- pf_purge_expired_src_nodes();
- pf_purge_unlinked_rules();
- pfi_kif_purge();
- }
- CURVNET_RESTORE();
+ /* Purge other expired types every PFTM_INTERVAL seconds. */
+ if (idx == 0) {
+ /*
+ * Order is important:
+ * - states and src nodes reference rules
+ * - states and rules reference kifs
+ */
+ pf_purge_expired_fragments();
+ pf_purge_expired_src_nodes();
+ pf_purge_unlinked_rules();
+ pfi_kif_purge();
+ }
+ CURVNET_RESTORE();
}
VNET_LIST_RUNLOCK();
}
- /* not reached */
+
+ pf_end_threads++;
+ sx_xunlock(&pf_end_lock);
+ kproc_exit(0);
}
void
@@ -2697,6 +2789,7 @@ pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p)
return (pf_match(op, a1, a2, p));
}
+#ifdef PF_USER_INFO
static int
pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u)
{
@@ -2712,6 +2805,7 @@ pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g)
return (0);
return (pf_match(op, a1, a2, g));
}
+#endif
int
pf_match_tag(struct mbuf *m, struct pf_rule *r, int *tag, int mtag)
@@ -2905,6 +2999,22 @@ pf_addr_inc(struct pf_addr *addr, sa_family_t af)
}
#endif /* INET6 */
+void
+pf_rule_to_actions(struct pf_rule *r, struct pf_rule_actions *a)
+{
+ if (r->qid)
+ a->qid = r->qid;
+ if (r->pqid)
+ a->pqid = r->pqid;
+ if (r->pdnpipe)
+ a->pdnpipe = r->pdnpipe;
+ if (r->dnpipe)
+ a->dnpipe = r->dnpipe;
+ if (r->free_flags & PFRULE_DN_IS_PIPE)
+ a->flags |= PFRULE_DN_IS_PIPE;
+}
+
+#ifdef PF_USER_INFO
int
pf_socket_lookup(int direction, struct pf_pdesc *pd, struct mbuf *m)
{
@@ -2984,6 +3094,7 @@ pf_socket_lookup(int direction, struct pf_pdesc *pd, struct mbuf *m)
return (1);
}
+#endif
static u_int8_t
pf_get_wscale(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
@@ -3161,12 +3272,14 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction,
PF_RULES_RASSERT();
+#ifdef PF_USER_INFO
if (inp != NULL) {
INP_LOCK_ASSERT(inp);
pd->lookup.uid = inp->inp_cred->cr_uid;
pd->lookup.gid = inp->inp_cred->cr_groups[0];
pd->lookup.done = 1;
}
+#endif
switch (pd->proto) {
case IPPROTO_TCP:
@@ -3377,7 +3490,11 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction,
/* icmp only. type always 0 in other cases */
else if (r->code && r->code != icmpcode + 1)
r = TAILQ_NEXT(r, entries);
- else if (r->tos && !(r->tos == pd->tos))
+ else if ((r->rule_flag & PFRULE_TOS) && r->tos &&
+ !(r->tos == pd->tos))
+ r = TAILQ_NEXT(r, entries);
+ else if ((r->rule_flag & PFRULE_DSCP) && r->tos &&
+ !(r->tos == (pd->tos & DSCP_MASK)))
r = TAILQ_NEXT(r, entries);
else if (r->rule_flag & PFRULE_FRAGMENT)
r = TAILQ_NEXT(r, entries);
@@ -3385,6 +3502,7 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction,
(r->flagset & th->th_flags) != r->flags)
r = TAILQ_NEXT(r, entries);
/* tcp/udp only. uid.op always 0 in other cases */
+#ifdef PF_USER_INFO
else if (r->uid.op && (pd->lookup.done || (pd->lookup.done =
pf_socket_lookup(direction, pd, m), 1)) &&
!pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1],
@@ -3396,6 +3514,7 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction,
!pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1],
pd->lookup.gid))
r = TAILQ_NEXT(r, entries);
+#endif
else if (r->prio &&
!pf_match_ieee8021q_pcp(r->prio, m))
r = TAILQ_NEXT(r, entries);
@@ -3416,10 +3535,20 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction,
if (r->rtableid >= 0)
rtableid = r->rtableid;
if (r->anchor == NULL) {
- match = 1;
- *rm = r;
- *am = a;
- *rsm = ruleset;
+ if (r->action == PF_MATCH) {
+ r->packets[direction == PF_OUT]++;
+ r->bytes[direction == PF_OUT] += pd->tot_len;
+ pf_rule_to_actions(r, &pd->act);
+ if (r->log)
+ PFLOG_PACKET(kif, m, af,
+ direction, PFRES_MATCH, r,
+ a, ruleset, pd, 1);
+ } else {
+ match = 1;
+ *rm = r;
+ *am = a;
+ *rsm = ruleset;
+ }
if ((*rm)->quick)
break;
r = TAILQ_NEXT(r, entries);
@@ -3438,6 +3567,9 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction,
REASON_SET(&reason, PFRES_MATCH);
+ /* apply actions for last matching pass/block rule */
+ pf_rule_to_actions(r, &pd->act);
+
if (r->log || (nr != NULL && nr->log)) {
if (rewrite)
m_copyback(m, off, hdrlen, pd->hdr.any);
@@ -3611,6 +3743,11 @@ pf_create_state(struct pf_rule *r, struct pf_rule *nr, struct pf_rule *a,
s->state_flags |= PFSTATE_SLOPPY;
s->log = r->log & PF_LOG_ALL;
s->sync_state = PFSYNC_S_NONE;
+ s->qid = pd->act.qid;
+ s->pqid = pd->act.pqid;
+ s->pdnpipe = pd->act.pdnpipe;
+ s->dnpipe = pd->act.dnpipe;
+ s->state_flags |= pd->act.flags;
if (nr != NULL)
s->log |= nr->log & PF_LOG_ALL;
switch (pd->proto) {
@@ -3849,6 +3986,9 @@ pf_test_fragment(struct pf_rule **rm, int direction, struct pfi_kif *kif,
r = r->skip[PF_SKIP_DST_ADDR].ptr;
else if (r->tos && !(r->tos == pd->tos))
r = TAILQ_NEXT(r, entries);
+ else if ((r->rule_flag & PFRULE_DSCP) && r->tos &&
+ !(r->tos == (pd->tos & DSCP_MASK)))
+ r = TAILQ_NEXT(r, entries);
else if (r->os_fingerprint != PF_OSFP_ANY)
r = TAILQ_NEXT(r, entries);
else if (pd->proto == IPPROTO_UDP &&
@@ -3872,10 +4012,20 @@ pf_test_fragment(struct pf_rule **rm, int direction, struct pfi_kif *kif,
r = TAILQ_NEXT(r, entries);
else {
if (r->anchor == NULL) {
- match = 1;
- *rm = r;
- *am = a;
- *rsm = ruleset;
+ if (r->action == PF_MATCH) {
+ r->packets[direction == PF_OUT]++;
+ r->bytes[direction == PF_OUT] += pd->tot_len;
+ pf_rule_to_actions(r, &pd->act);
+ if (r->log)
+ PFLOG_PACKET(kif, m, af,
+ direction, PFRES_MATCH, r,
+ a, ruleset, pd, 1);
+ } else {
+ match = 1;
+ *rm = r;
+ *am = a;
+ *rsm = ruleset;
+ }
if ((*rm)->quick)
break;
r = TAILQ_NEXT(r, entries);
@@ -3894,6 +4044,9 @@ pf_test_fragment(struct pf_rule **rm, int direction, struct pfi_kif *kif,
REASON_SET(&reason, PFRES_MATCH);
+ /* apply actions for last matching pass/block rule */
+ pf_rule_to_actions(r, &pd->act);
+
if (r->log)
PFLOG_PACKET(kif, m, af, direction, reason, r, a, ruleset, pd,
1);
@@ -5151,7 +5304,7 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif,
static int
pf_test_state_other(struct pf_state **state, int direction, struct pfi_kif *kif,
- struct mbuf *m, struct pf_pdesc *pd)
+ struct mbuf *m, int off, struct pf_pdesc *pd)
{
struct pf_state_peer *src, *dst;
struct pf_state_key_cmp key;
@@ -5487,6 +5640,12 @@ pf_route(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
ip = mtod(m0, struct ip *);
+ if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
+ if (s)
+ PF_STATE_UNLOCK(s);
+ return;
+ }
+
bzero(&dst, sizeof(dst));
dst.sin_family = AF_INET;
dst.sin_len = sizeof(dst);
@@ -5531,7 +5690,72 @@ pf_route(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
if (ifp == NULL)
goto bad;
- if (oifp != ifp) {
+ else if (r->rt == PF_REPLYTO || (r->rt == PF_ROUTETO && ifp->if_type == IFT_ENC)) {
+ /* XXX: Copied from ifaof_ifpforaddr() since it mostly will not return NULL! */
+ struct sockaddr_in inaddr;
+ struct sockaddr *addr;
+ struct ifaddr *ifa;
+ char *cp, *cp2, *cp3;
+ char *cplim;
+
+ inaddr.sin_addr = ip->ip_dst;
+ inaddr.sin_family = AF_INET;
+ inaddr.sin_len = sizeof(inaddr);
+ inaddr.sin_port = 0;
+ addr = (struct sockaddr *)&inaddr;
+
+ IF_ADDR_RLOCK(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ if (ifa->ifa_addr->sa_family != AF_INET)
+ continue;
+ if (ifa->ifa_netmask == 0) {
+ if ((bcmp(addr, ifa->ifa_addr, addr->sa_len) == 0) ||
+ (ifa->ifa_dstaddr &&
+ (bcmp(addr, ifa->ifa_dstaddr, addr->sa_len) == 0))) {
+ IF_ADDR_RUNLOCK(ifp);
+ return;
+ }
+ continue;
+ }
+ if (ifp->if_flags & IFF_POINTOPOINT) {
+ if (bcmp(addr, ifa->ifa_dstaddr, addr->sa_len) == 0) {
+ IF_ADDR_RUNLOCK(ifp);
+ return;
+ }
+ } else {
+ cp = addr->sa_data;
+ cp2 = ifa->ifa_addr->sa_data;
+ cp3 = ifa->ifa_netmask->sa_data;
+ cplim = ifa->ifa_netmask->sa_len + (char *)ifa->ifa_netmask;
+ for (; cp3 < cplim; cp3++)
+ if ((*cp++ ^ *cp2++) & *cp3)
+ break;
+ if (cp3 == cplim) {
+ IF_ADDR_RUNLOCK(ifp);
+ return;
+ }
+ }
+ }
+ IF_ADDR_RUNLOCK(ifp);
+ }
+ else if (r->rt == PF_ROUTETO && r->direction == dir && in_localip(ip->ip_dst))
+ return;
+
+ if (s != NULL && r->rt == PF_REPLYTO) {
+ /*
+ * Send it out since it came from state recorded ifp(rt_addr).
+ * Routing table lookup might have chosen not correct interface!
+ */
+ } else if (oifp != ifp) {
+ if (in_broadcast(ip->ip_dst, oifp)) /* XXX: LOCKING of address list?! */
+ return;
+
+ if (s && r->rt == PF_ROUTETO && pd->nat_rule != NULL &&
+ r->direction == PF_OUT && r->direction == dir &&
+ pd->pf_mtag->routed < 2) {
+ PACKET_UNDO_NAT(m0, pd, ntohs(ip->ip_off), s, dir);
+ }
+
if (pf_test(PF_OUT, 0, ifp, &m0, NULL) != PF_PASS)
goto bad;
else if (m0 == NULL)
@@ -5584,6 +5808,9 @@ pf_route(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
error = EMSGSIZE;
KMOD_IPSTAT_INC(ips_cantfrag);
if (r->rt != PF_DUPTO) {
+ if (s && pd->nat_rule != NULL)
+ PACKET_UNDO_NAT(m0, pd, ntohs(ip->ip_off), s, dir);
+
icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0,
ifp->if_mtu);
goto done;
@@ -5663,6 +5890,12 @@ pf_route6(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
ip6 = mtod(m0, struct ip6_hdr *);
+ if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) {
+ if (s)
+ PF_STATE_UNLOCK(s);
+ return;
+ }
+
bzero(&dst, sizeof(dst));
dst.sin6_family = AF_INET6;
dst.sin6_len = sizeof(dst);
@@ -5702,8 +5935,70 @@ pf_route6(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
if (ifp == NULL)
goto bad;
+ else if (r->rt == PF_REPLYTO) {
+ /* XXX: Copied from ifaof_ifpforaddr() since it mostly will not return NULL! */
+ struct sockaddr_in6 inaddr6;
+ struct sockaddr *addr;
+ struct ifaddr *ifa;
+ char *cp, *cp2, *cp3;
+ char *cplim;
+
+ inaddr6.sin6_addr = ip6->ip6_dst;
+ inaddr6.sin6_family = AF_INET6;
+ inaddr6.sin6_len = sizeof(inaddr6);
+ inaddr6.sin6_port = 0;
+ inaddr6.sin6_flowinfo = 0;
+ addr = (struct sockaddr *)&inaddr6;
+
+ IF_ADDR_RLOCK(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ if (ifa->ifa_addr->sa_family != AF_INET6)
+ continue;
+ if (ifa->ifa_netmask == 0) {
+ if ((bcmp(addr, ifa->ifa_addr, addr->sa_len) == 0) ||
+ (ifa->ifa_dstaddr &&
+ (bcmp(addr, ifa->ifa_dstaddr, addr->sa_len) == 0))) {
+ IF_ADDR_RUNLOCK(ifp);
+ return;
+ }
+ continue;
+ }
+ if (ifp->if_flags & IFF_POINTOPOINT) {
+ if (bcmp(addr, ifa->ifa_dstaddr, addr->sa_len) == 0) {
+ IF_ADDR_RUNLOCK(ifp);
+ return;
+ }
+ } else {
+ cp = addr->sa_data;
+ cp2 = ifa->ifa_addr->sa_data;
+ cp3 = ifa->ifa_netmask->sa_data;
+ cplim = ifa->ifa_netmask->sa_len + (char *)ifa->ifa_netmask;
+ for (; cp3 < cplim; cp3++)
+ if ((*cp++ ^ *cp2++) & *cp3)
+ break;
+ if (cp3 == cplim) {
+ IF_ADDR_RUNLOCK(ifp);
+ return;
+ }
+ }
+ }
+ IF_ADDR_RUNLOCK(ifp);
+ } else if (r->rt == PF_ROUTETO && r->direction == dir && in6_localaddr(&ip6->ip6_dst))
+ return;
+
+ if (s != NULL && r->rt == PF_REPLYTO) {
+ /*
+ * Send it out since it came from state recorded ifp(rt_addr).
+ * Routing table lookup might have chosen not correct interface!
+ */
+ } else if (oifp != ifp) {
+ if (s && r->rt == PF_ROUTETO && pd->nat_rule != NULL &&
+ r->direction == PF_OUT && r->direction == dir &&
+ pd->pf_mtag->routed < 2) {
+ int ip_off = ((caddr_t)ip6 - m0->m_data) + sizeof(struct ip6_hdr);
+ PACKET_UNDO_NAT(m0, pd, ip_off, s, dir);
+ }
- if (oifp != ifp) {
if (pf_test6(PF_OUT, PFIL_FWD, ifp, &m0, NULL) != PF_PASS)
goto bad;
else if (m0 == NULL)
@@ -5737,9 +6032,12 @@ pf_route6(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
nd6_output_ifp(ifp, ifp, m0, &dst, NULL);
else {
in6_ifstat_inc(ifp, ifs6_in_toobig);
- if (r->rt != PF_DUPTO)
+ if (r->rt != PF_DUPTO) {
+ if (s && pd->nat_rule != NULL)
+ PACKET_UNDO_NAT(m0, pd, ((caddr_t)ip6 - m0->m_data) + sizeof(struct ip6_hdr), s, dir);
+
icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu);
- else
+ } else
goto bad;
}
@@ -5905,7 +6203,8 @@ pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb *
struct pf_state *s = NULL;
struct pf_ruleset *ruleset = NULL;
struct pf_pdesc pd;
- int off, dirndx, pqid = 0;
+ int off = 0, dirndx, pqid = 0;
+ struct ip_fw_args dnflow;
M_ASSERTPKTHDR(m);
@@ -5931,22 +6230,19 @@ pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb *
PF_RULES_RLOCK();
- if (ip_divert_ptr != NULL &&
+ if ((ip_divert_ptr != NULL || ip_dn_io_ptr != NULL) &&
((ipfwtag = m_tag_locate(m, MTAG_IPFW_RULE, 0, NULL)) != NULL)) {
- struct ipfw_rule_ref *rr = (struct ipfw_rule_ref *)(ipfwtag+1);
- if (rr->info & IPFW_IS_DIVERT && rr->rulenum == 0) {
- if (pd.pf_mtag == NULL &&
- ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
- action = PF_DROP;
- goto done;
- }
- pd.pf_mtag->flags |= PF_PACKET_LOOPED;
- m_tag_delete(m, ipfwtag);
+ if (pd.pf_mtag == NULL &&
+ ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
+ action = PF_DROP;
+ goto done;
}
+ pd.pf_mtag->flags |= PF_PACKET_LOOPED;
if (pd.pf_mtag && pd.pf_mtag->flags & PF_FASTFWD_OURS_PRESENT) {
m->m_flags |= M_FASTFWD_OURS;
pd.pf_mtag->flags &= ~PF_FASTFWD_OURS_PRESENT;
}
+ m_tag_delete(m, ipfwtag);
} else if (pf_normalize_ip(m0, dir, kif, &reason, &pd) != PF_PASS) {
/* We do IP header normalization and packet reassembly here */
action = PF_DROP;
@@ -5973,7 +6269,7 @@ pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb *
pd.sidx = (dir == PF_IN) ? 0 : 1;
pd.didx = (dir == PF_IN) ? 1 : 0;
pd.af = AF_INET;
- pd.tos = h->ip_tos;
+ pd.tos = h->ip_tos & ~IPTOS_ECN_MASK;
pd.tot_len = ntohs(h->ip_len);
/* handle fragments that didn't get reassembled by normalization */
@@ -5994,6 +6290,9 @@ pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb *
log = action != PF_PASS;
goto done;
}
+ dnflow.f_id._flags = th.th_flags;
+ dnflow.f_id.dst_port = ntohs(th.th_dport);
+ dnflow.f_id.src_port = ntohs(th.th_sport);
pd.p_len = pd.tot_len - off - (th.th_off << 2);
if ((th.th_flags & TH_ACK) && pd.p_len == 0)
pqid = 1;
@@ -6003,6 +6302,20 @@ pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb *
action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
&reason);
if (action == PF_PASS) {
+ if (dir == PF_IN && s != NULL &&
+ s->nat_rule.ptr != NULL &&
+ s->nat_rule.ptr->action == PF_NAT) {
+ dnflow.f_id.dst_port =
+ ntohs(s->key[(s->direction == PF_IN)]->
+ port[(s->direction == PF_OUT)]);
+ }
+ if (dir == PF_OUT && s != NULL &&
+ s->nat_rule.ptr != NULL &&
+ s->nat_rule.ptr->action != PF_NAT) {
+ dnflow.f_id.src_port =
+ ntohs(s->key[(s->direction == PF_OUT)]->
+ port[(s->direction == PF_IN)]);
+ }
if (pfsync_update_state_ptr != NULL)
pfsync_update_state_ptr(s);
r = s->rule.ptr;
@@ -6023,6 +6336,8 @@ pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb *
log = action != PF_PASS;
goto done;
}
+ dnflow.f_id.dst_port = ntohs(uh.uh_dport);
+ dnflow.f_id.src_port = ntohs(uh.uh_sport);
if (uh.uh_dport == 0 ||
ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
ntohs(uh.uh_ulen) < sizeof(struct udphdr)) {
@@ -6032,6 +6347,20 @@ pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb *
}
action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
if (action == PF_PASS) {
+ if (dir == PF_IN && s != NULL &&
+ s->nat_rule.ptr != NULL &&
+ s->nat_rule.ptr->action == PF_NAT) {
+ dnflow.f_id.dst_port =
+ ntohs(s->key[(s->direction == PF_IN)]->
+ port[(s->direction == PF_OUT)]);
+ }
+ if (dir == PF_OUT && s != NULL &&
+ s->nat_rule.ptr != NULL &&
+ s->nat_rule.ptr->action != PF_NAT) {
+ dnflow.f_id.src_port =
+ ntohs(s->key[(s->direction == PF_OUT)]->
+ port[(s->direction == PF_IN)]);
+ }
if (pfsync_update_state_ptr != NULL)
pfsync_update_state_ptr(s);
r = s->rule.ptr;
@@ -6076,7 +6405,7 @@ pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb *
#endif
default:
- action = pf_test_state_other(&s, dir, kif, m, &pd);
+ action = pf_test_state_other(&s, dir, kif, m, off, &pd);
if (action == PF_PASS) {
if (pfsync_update_state_ptr != NULL)
pfsync_update_state_ptr(s);
@@ -6120,7 +6449,14 @@ done:
}
#ifdef ALTQ
- if (action == PF_PASS && r->qid) {
+ if (s && s->qid) {
+ pd.act.pqid = s->pqid;
+ pd.act.qid = s->qid;
+ } else if (r->qid) {
+ pd.act.pqid = r->pqid;
+ pd.act.qid = r->qid;
+ }
+ if (action == PF_PASS && pd.act.qid) {
if (pd.pf_mtag == NULL &&
((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
action = PF_DROP;
@@ -6129,9 +6465,9 @@ done:
if (s != NULL)
pd.pf_mtag->qid_hash = pf_state_hash(s);
if (pqid || (pd.tos & IPTOS_LOWDELAY))
- pd.pf_mtag->qid = r->pqid;
+ pd.pf_mtag->qid = pd.act.pqid;
else
- pd.pf_mtag->qid = r->qid;
+ pd.pf_mtag->qid = pd.act.qid;
/* Add hints for ecn. */
pd.pf_mtag->hdr = h;
}
@@ -6139,8 +6475,78 @@ done:
}
#endif /* ALTQ */
+ if (pd.pf_mtag == NULL &&
+ ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
+ action = PF_DROP;
+ REASON_SET(&reason, PFRES_MEMORY);
+ }
+ if (s && (s->dnpipe || s->pdnpipe)) {
+ pd.act.dnpipe = s->dnpipe;
+ pd.act.pdnpipe = s->pdnpipe;
+ pd.act.flags = s->state_flags;
+ } else if (r->dnpipe || r->pdnpipe) {
+ pd.act.dnpipe = r->dnpipe;
+ pd.act.pdnpipe = r->pdnpipe;
+ pd.act.flags = r->free_flags;
+ }
+ if ((pd.act.dnpipe || pd.act.pdnpipe) && ip_dn_io_ptr == NULL) {
+ /* XXX: ipfw has the same behaviour! */
+ action = PF_DROP;
+ REASON_SET(&reason, PFRES_MEMORY);
+ } else if (action == PF_PASS &&
+ (pd.act.dnpipe || pd.act.pdnpipe) && !PACKET_LOOPED(&pd)) {
+ if (dir != r->direction && pd.act.pdnpipe) {
+ dnflow.rule.info = pd.act.pdnpipe;
+ } else if (dir == r->direction) {
+ dnflow.rule.info = pd.act.dnpipe;
+ } else
+ goto continueprocessing;
+
+ if (pd.act.flags & PFRULE_DN_IS_PIPE)
+ dnflow.rule.info |= IPFW_IS_PIPE;
+ dnflow.f_id.addr_type = 4; /* IPv4 type */
+ dnflow.f_id.proto = pd.proto;
+ if (dir == PF_OUT && s != NULL && s->nat_rule.ptr != NULL &&
+ s->nat_rule.ptr->action == PF_NAT)
+ dnflow.f_id.src_ip =
+ ntohl(s->key[(s->direction == PF_IN)]->
+ addr[(s->direction == PF_OUT)].v4.s_addr);
+ else
+ dnflow.f_id.src_ip = ntohl(h->ip_src.s_addr);
+ if (dir == PF_IN && s != NULL && s->nat_rule.ptr != NULL &&
+ s->nat_rule.ptr->action != PF_NAT)
+ dnflow.f_id.dst_ip =
+ ntohl(s->key[(s->direction == PF_OUT)]->
+ addr[(s->direction == PF_IN)].v4.s_addr);
+ else
+ dnflow.f_id.dst_ip = ntohl(h->ip_dst.s_addr);
+ dnflow.f_id.extra = dnflow.rule.info;
+
+ if (m->m_flags & M_FASTFWD_OURS) {
+ pd.pf_mtag->flags |= PF_FASTFWD_OURS_PRESENT;
+ m->m_flags &= ~M_FASTFWD_OURS;
+ }
+
+ if (s != NULL && s->nat_rule.ptr)
+ PACKET_UNDO_NAT(m, &pd, off, s, dir);
+
+ ip_dn_io_ptr(m0, (dir == PF_IN) ? DIR_IN : DIR_OUT, &dnflow);
+ if (*m0 == NULL) {
+ if (s)
+ PF_STATE_UNLOCK(s);
+ return (action);
+ }
+ /* This is dummynet fast io processing */
+ ipfwtag = m_tag_locate(m, MTAG_IPFW_RULE, 0, NULL);
+ if (ipfwtag != NULL)
+ m_tag_delete(*m0, ipfwtag);
+ if (s != NULL && s->nat_rule.ptr)
+ PACKET_REDO_NAT(m, &pd, off, s, dir);
+ }
+continueprocessing:
+
/*
- * connections redirected to loopback should not match sockets
+ * Connections redirected to loopback should match sockets
* bound specifically to loopback due to security implications,
* see tcp_input() and in_pcblookup_listen().
*/
@@ -6149,7 +6555,7 @@ done:
(s->nat_rule.ptr->action == PF_RDR ||
s->nat_rule.ptr->action == PF_BINAT) &&
(ntohl(pd.dst->v4.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET)
- m->m_flags |= M_SKIP_FIREWALL;
+ m->m_flags |= M_FASTFWD_OURS;
if (action == PF_PASS && r->divert.port && ip_divert_ptr != NULL &&
!PACKET_LOOPED(&pd)) {
@@ -6193,6 +6599,9 @@ done:
}
}
+ if (PACKET_LOOPED(&pd))
+ pd.pf_mtag->flags &= ~PF_PACKET_LOOPED;
+
if (log) {
struct pf_rule *lr;
@@ -6293,6 +6702,8 @@ pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb
struct pf_ruleset *ruleset = NULL;
struct pf_pdesc pd;
int off, terminal = 0, dirndx, rh_cnt = 0, pqid = 0;
+ struct m_tag *dn_tag;
+ struct ip_fw_args dnflow;
M_ASSERTPKTHDR(m);
@@ -6319,8 +6730,22 @@ pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb
PF_RULES_RLOCK();
+ if (ip_dn_io_ptr != NULL &&
+ ((dn_tag = m_tag_locate(m, MTAG_IPFW_RULE, 0, NULL)) != NULL)) {
+ if (pd.pf_mtag == NULL &&
+ ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
+ action = PF_DROP;
+ goto done;
+ }
+ pd.pf_mtag->flags |= PF_PACKET_LOOPED;
+ if (pd.pf_mtag->flags & PF_FASTFWD_OURS_PRESENT) {
+ m->m_flags |= M_FASTFWD_OURS;
+ pd.pf_mtag->flags &= ~PF_FASTFWD_OURS_PRESENT;
+ }
+ m_tag_delete(m, dn_tag);
+ }
/* We do IP header normalization and packet reassembly here */
- if (pf_normalize_ip6(m0, dir, kif, &reason, &pd) != PF_PASS) {
+ else if (pf_normalize_ip6(m0, dir, kif, &reason, &pd) != PF_PASS) {
action = PF_DROP;
goto done;
}
@@ -6434,6 +6859,9 @@ pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb
log = action != PF_PASS;
goto done;
}
+ dnflow.f_id._flags = th.th_flags;
+ dnflow.f_id.dst_port = th.th_dport;
+ dnflow.f_id.src_port = th.th_sport;
pd.p_len = pd.tot_len - off - (th.th_off << 2);
action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
if (action == PF_DROP)
@@ -6461,6 +6889,8 @@ pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb
log = action != PF_PASS;
goto done;
}
+ dnflow.f_id.dst_port = uh.uh_dport;
+ dnflow.f_id.src_port = uh.uh_sport;
if (uh.uh_dport == 0 ||
ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
ntohs(uh.uh_ulen) < sizeof(struct udphdr)) {
@@ -6512,7 +6942,7 @@ pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb
}
default:
- action = pf_test_state_other(&s, dir, kif, m, &pd);
+ action = pf_test_state_other(&s, dir, kif, m, off, &pd);
if (action == PF_PASS) {
if (pfsync_update_state_ptr != NULL)
pfsync_update_state_ptr(s);
@@ -6562,7 +6992,14 @@ done:
}
#ifdef ALTQ
- if (action == PF_PASS && r->qid) {
+ if (s && s->qid) {
+ pd.act.pqid = s->pqid;
+ pd.act.qid = s->qid;
+ } else if (r->qid) {
+ pd.act.pqid = r->pqid;
+ pd.act.qid = r->qid;
+ }
+ if (action == PF_PASS && pd.act.qid) {
if (pd.pf_mtag == NULL &&
((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
action = PF_DROP;
@@ -6571,26 +7008,88 @@ done:
if (s != NULL)
pd.pf_mtag->qid_hash = pf_state_hash(s);
if (pd.tos & IPTOS_LOWDELAY)
- pd.pf_mtag->qid = r->pqid;
+ pd.pf_mtag->qid = pd.act.pqid;
else
- pd.pf_mtag->qid = r->qid;
+ pd.pf_mtag->qid = pd.act.qid;
/* Add hints for ecn. */
pd.pf_mtag->hdr = h;
}
}
#endif /* ALTQ */
+ if (pd.pf_mtag == NULL &&
+ ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
+ action = PF_DROP;
+ REASON_SET(&reason, PFRES_MEMORY);
+ }
+ if (s && (s->dnpipe || s->pdnpipe)) {
+ pd.act.dnpipe = s->dnpipe;
+ pd.act.pdnpipe = s->pdnpipe;
+ pd.act.flags = s->state_flags;
+ } else if (r->dnpipe || r->pdnpipe) {
+ pd.act.dnpipe = r->dnpipe;
+ pd.act.pdnpipe = r->pdnpipe;
+ pd.act.flags = r->free_flags;
+ }
+ if ((pd.act.dnpipe || pd.act.pdnpipe) && ip_dn_io_ptr == NULL) {
+ /* XXX: ipfw has the same behaviour! */
+ action = PF_DROP;
+ REASON_SET(&reason, PFRES_MEMORY);
+ } else if (action == PF_PASS &&
+ (pd.act.dnpipe || pd.act.pdnpipe) && !PACKET_LOOPED(&pd)) {
+ if (dir != r->direction && pd.act.pdnpipe) {
+ dnflow.rule.info = pd.act.pdnpipe;
+ } else if (dir == r->direction && pd.act.dnpipe) {
+ dnflow.rule.info = pd.act.dnpipe;
+ } else
+ goto continueprocessing6;
+
+ if (pd.act.flags & PFRULE_DN_IS_PIPE)
+ dnflow.rule.info |= IPFW_IS_PIPE;
+ dnflow.f_id.addr_type = 6; /* IPv4 type */
+ dnflow.f_id.proto = pd.proto;
+ dnflow.f_id.src_ip = 0;
+ dnflow.f_id.dst_ip = 0;
+ if (dir == PF_OUT && s != NULL && s->nat_rule.ptr != NULL &&
+ s->nat_rule.ptr->action == PF_NAT)
+ dnflow.f_id.src_ip6 = s->key[(s->direction == PF_IN)]->addr[0].v6;
+ else
+ dnflow.f_id.src_ip6 = h->ip6_src;
+ dnflow.f_id.dst_ip6 = h->ip6_dst;
+
+ if (s != NULL && s->nat_rule.ptr)
+ PACKET_UNDO_NAT(m, &pd, off, s, dir);
+
+ ip_dn_io_ptr(m0,
+ ((dir == PF_IN) ? DIR_IN : DIR_OUT) | PROTO_IPV6, &dnflow);
+ if (*m0 == NULL) {
+ if (s)
+ PF_STATE_UNLOCK(s);
+ return (action);
+ }
+ /* This is dummynet fast io processing */
+ dn_tag = m_tag_locate(m, MTAG_IPFW_RULE, 0, NULL);
+ if (dn_tag != NULL)
+ m_tag_delete(*m0, dn_tag);
+ if (s != NULL && s->nat_rule.ptr)
+ PACKET_REDO_NAT(m, &pd, off, s, dir);
+ }
+continueprocessing6:
+
if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
(s->nat_rule.ptr->action == PF_RDR ||
s->nat_rule.ptr->action == PF_BINAT) &&
IN6_IS_ADDR_LOOPBACK(&pd.dst->v6))
- m->m_flags |= M_SKIP_FIREWALL;
+ m->m_flags |= M_FASTFWD_OURS;
/* XXX: Anybody working on it?! */
if (r->divert.port)
printf("pf: divert(9) is not supported for IPv6\n");
+ if (PACKET_LOOPED(&pd))
+ pd.pf_mtag->flags &= ~PF_PACKET_LOOPED;
+
if (log) {
struct pf_rule *lr;
diff --git a/sys/netpfil/pf/pf.h b/sys/netpfil/pf/pf.h
index 96f638e..c10195f 100644
--- a/sys/netpfil/pf/pf.h
+++ b/sys/netpfil/pf/pf.h
@@ -45,7 +45,8 @@
enum { PF_INOUT, PF_IN, PF_OUT };
enum { PF_PASS, PF_DROP, PF_SCRUB, PF_NOSCRUB, PF_NAT, PF_NONAT,
- PF_BINAT, PF_NOBINAT, PF_RDR, PF_NORDR, PF_SYNPROXY_DROP, PF_DEFER };
+ PF_BINAT, PF_NOBINAT, PF_RDR, PF_NORDR, PF_SYNPROXY_DROP, PF_DEFER,
+ PF_MATCH };
enum { PF_RULESET_SCRUB, PF_RULESET_FILTER, PF_RULESET_NAT,
PF_RULESET_BINAT, PF_RULESET_RDR, PF_RULESET_MAX };
enum { PF_OP_NONE, PF_OP_IRG, PF_OP_EQ, PF_OP_NE, PF_OP_LT,
diff --git a/sys/netpfil/pf/pf_ioctl.c b/sys/netpfil/pf/pf_ioctl.c
index bcab655..a3da0e8 100644
--- a/sys/netpfil/pf/pf_ioctl.c
+++ b/sys/netpfil/pf/pf_ioctl.c
@@ -200,9 +200,11 @@ VNET_DEFINE(int, pf_vnet_active);
#define V_pf_vnet_active VNET(pf_vnet_active)
int pf_end_threads;
+struct proc *pf_purge_proc;
struct rwlock pf_rules_lock;
struct sx pf_ioctl_lock;
+struct sx pf_end_lock;
/* pfsync */
pfsync_state_import_t *pfsync_state_import_ptr = NULL;
@@ -1170,7 +1172,9 @@ pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td
rule->states_cur = counter_u64_alloc(M_WAITOK);
rule->states_tot = counter_u64_alloc(M_WAITOK);
rule->src_nodes = counter_u64_alloc(M_WAITOK);
+#ifdef PF_USER_INFO
rule->cuid = td->td_ucred->cr_ruid;
+#endif
rule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
TAILQ_INIT(&rule->rpool.list);
@@ -1196,7 +1200,6 @@ pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td
V_ticket_pabuf));
ERROUT(EBUSY);
}
-
tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
pf_rulequeue);
if (tail)
@@ -1280,8 +1283,29 @@ pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td
}
rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
+#ifndef PF_USER_INFO
+ if (rule->cuid) {
+ tail = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
+ while ((tail != NULL) && (tail->cuid != rule->cuid))
+ tail = TAILQ_NEXT(tail, entries);
+ if (tail != NULL) {
+ rule->evaluations = tail->evaluations;
+ rule->packets[0] = tail->packets[0];
+ rule->packets[1] = tail->packets[1];
+ rule->bytes[0] = tail->bytes[0];
+ rule->bytes[1] = tail->bytes[1];
+ } else {
+ rule->evaluations = rule->packets[0] = rule->packets[1] =
+ rule->bytes[0] = rule->bytes[1] = 0;
+ }
+ } else {
+ rule->evaluations = rule->packets[0] = rule->packets[1] =
+ rule->bytes[0] = rule->bytes[1] = 0;
+ }
+#else
rule->evaluations = rule->packets[0] = rule->packets[1] =
rule->bytes[0] = rule->bytes[1] = 0;
+#endif
TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
rule, entries);
ruleset->rules[rs_num].inactive.rcount++;
@@ -1431,7 +1455,9 @@ DIOCADDRULE_error:
newrule->states_cur = counter_u64_alloc(M_WAITOK);
newrule->states_tot = counter_u64_alloc(M_WAITOK);
newrule->src_nodes = counter_u64_alloc(M_WAITOK);
+#ifdef PF_USER_INFO
newrule->cuid = td->td_ucred->cr_ruid;
+#endif
newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
TAILQ_INIT(&newrule->rpool.list);
}
@@ -1719,6 +1745,30 @@ relock_DIOCKILLSTATES:
break;
}
+ case DIOCKILLSCHEDULE: {
+ struct pf_state *state;
+ struct pfioc_schedule_kill *psk = (struct pfioc_schedule_kill *)addr;
+ int killed = 0;
+ u_int i;
+
+ for (i = 0; i <= pf_hashmask; i++) {
+ struct pf_idhash *ih = &V_pf_idhash[i];
+
+relock_DIOCKILLSCHEDULE:
+ PF_HASHROW_LOCK(ih);
+ LIST_FOREACH(state, &ih->states, entry) {
+ if (!strcmp(psk->schedule, state->rule.ptr->schedule)) {
+ pf_unlink_state(state, PF_ENTER_LOCKED);
+ killed++;
+ goto relock_DIOCKILLSCHEDULE;
+ }
+ }
+ PF_HASHROW_UNLOCK(ih);
+ }
+ psk->numberkilled = killed;
+ break;
+ }
+
case DIOCADDSTATE: {
struct pfioc_state *ps = (struct pfioc_state *)addr;
struct pfsync_state *sp = &ps->state;
@@ -3830,8 +3880,8 @@ hook_pf(void)
pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
if (pfh_inet == NULL)
return (ESRCH); /* XXX */
- pfil_add_hook_flags(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet);
- pfil_add_hook_flags(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet);
+ pfil_add_named_hook_flags(pf_check_in, NULL, "pf", PFIL_IN | PFIL_WAITOK, pfh_inet);
+ pfil_add_named_hook_flags(pf_check_out, NULL, "pf", PFIL_OUT | PFIL_WAITOK, pfh_inet);
#endif
#ifdef INET6
pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
@@ -3844,8 +3894,10 @@ hook_pf(void)
#endif
return (ESRCH); /* XXX */
}
- pfil_add_hook_flags(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet6);
- pfil_add_hook_flags(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet6);
+ pfil_add_named_hook_flags(pf_check6_in, NULL, "pf", PFIL_IN | PFIL_WAITOK,
+ pfh_inet6);
+ pfil_add_named_hook_flags(pf_check6_out, NULL, "pf", PFIL_OUT | PFIL_WAITOK,
+ pfh_inet6);
#endif
V_pf_pfil_hooked = 1;
@@ -3914,6 +3966,7 @@ pf_load(void)
rw_init(&pf_rules_lock, "pf rulesets");
sx_init(&pf_ioctl_lock, "pf ioctl");
+ sx_init(&pf_end_lock, "pf end thread");
pf_mtag_initialize();
@@ -3922,7 +3975,7 @@ pf_load(void)
return (ENOMEM);
pf_end_threads = 0;
- error = kproc_create(pf_purge_thread, NULL, NULL, 0, 0, "pf purge");
+ error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge");
if (error != 0)
return (error);
@@ -3950,12 +4003,12 @@ pf_unload_vnet(void)
return;
}
- pf_unload_vnet_purge();
-
PF_RULES_WLOCK();
shutdown_pf();
PF_RULES_WUNLOCK();
+ pf_unload_vnet_purge();
+
pf_normalize_cleanup();
PF_RULES_WLOCK();
pfi_cleanup_vnet();
@@ -3972,11 +4025,13 @@ pf_unload(void)
{
int error = 0;
+ sx_xlock(&pf_end_lock);
pf_end_threads = 1;
while (pf_end_threads < 2) {
wakeup_one(pf_purge_thread);
- rw_sleep(pf_purge_thread, &pf_rules_lock, 0, "pftmo", 0);
+ sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0);
}
+ sx_xunlock(&pf_end_lock);
if (pf_dev != NULL)
destroy_dev(pf_dev);
@@ -3985,6 +4040,7 @@ pf_unload(void)
rw_destroy(&pf_rules_lock);
sx_destroy(&pf_ioctl_lock);
+ sx_destroy(&pf_end_lock);
return (error);
}
diff --git a/sys/netpfil/pf/pf_norm.c b/sys/netpfil/pf/pf_norm.c
index 612dff6..ca6f02b 100644
--- a/sys/netpfil/pf/pf_norm.c
+++ b/sys/netpfil/pf/pf_norm.c
@@ -90,8 +90,10 @@ struct pf_fragment {
TAILQ_ENTRY(pf_fragment) frag_next;
uint32_t fr_timeout;
uint16_t fr_maxlen; /* maximum length of single fragment */
+ uint16_t fr_entries; /* Total number of pf_fragment entries */
TAILQ_HEAD(pf_fragq, pf_frent) fr_queue;
};
+#define PF_MAX_FRENT_PER_FRAGMENT 64
struct pf_fragment_tag {
uint16_t ft_hdrlen; /* header length of reassembled pkt */
@@ -376,6 +378,7 @@ pf_fillup_fragment(struct pf_fragment_cmp *key, struct pf_frent *frent,
*(struct pf_fragment_cmp *)frag = *key;
frag->fr_timeout = time_uptime;
frag->fr_maxlen = frent->fe_len;
+ frag->fr_entries = 0;
TAILQ_INIT(&frag->fr_queue);
RB_INSERT(pf_frag_tree, &V_pf_frag_tree, frag);
@@ -387,6 +390,9 @@ pf_fillup_fragment(struct pf_fragment_cmp *key, struct pf_frent *frent,
return (frag);
}
+ if (frag->fr_entries >= PF_MAX_FRENT_PER_FRAGMENT)
+ goto bad_fragment;
+
KASSERT(!TAILQ_EMPTY(&frag->fr_queue), ("!TAILQ_EMPTY()->fr_queue"));
/* Remember maximum fragment len for refragmentation. */
@@ -459,6 +465,8 @@ pf_fillup_fragment(struct pf_fragment_cmp *key, struct pf_frent *frent,
else
TAILQ_INSERT_AFTER(&frag->fr_queue, prev, frent, fr_next);
+ frag->fr_entries++;
+
return (frag);
bad_fragment:
@@ -789,6 +797,7 @@ pf_refragment6(struct ifnet *ifp, struct mbuf **m0, struct m_tag *mtag)
for (t = m; m; m = t) {
t = m->m_nextpkt;
m->m_nextpkt = NULL;
+ m->m_pkthdr.rcvif = ifp;
m->m_flags |= M_SKIP_FIREWALL;
memset(&pd, 0, sizeof(pd));
pd.pf_mtag = pf_find_mtag(m);
@@ -1815,7 +1824,7 @@ pf_scrub_ip(struct mbuf **m0, u_int32_t flags, u_int8_t min_ttl, u_int8_t tos)
u_int16_t ov, nv;
ov = *(u_int16_t *)h;
- h->ip_tos = tos;
+ h->ip_tos = tos | (h->ip_tos & IPTOS_ECN_MASK);
nv = *(u_int16_t *)h;
h->ip_sum = pf_cksum_fixup(h->ip_sum, ov, nv, 0);
diff --git a/sys/netpfil/pf/pf_ruleset.c b/sys/netpfil/pf/pf_ruleset.c
index 61da586..5bb3be6 100644
--- a/sys/netpfil/pf/pf_ruleset.c
+++ b/sys/netpfil/pf/pf_ruleset.c
@@ -121,6 +121,7 @@ pf_get_ruleset_number(u_int8_t action)
return (PF_RULESET_SCRUB);
break;
case PF_PASS:
+ case PF_MATCH:
case PF_DROP:
return (PF_RULESET_FILTER);
break;
OpenPOWER on IntegriCloud