summaryrefslogtreecommitdiffstats
path: root/net/netfilter/nf_flow_table_core.c
diff options
context:
space:
mode:
authorFelix Fietkau <nbd@nbd.name>2018-02-26 10:15:21 +0100
committerPablo Neira Ayuso <pablo@netfilter.org>2018-04-24 10:28:54 +0200
commit59c466dd68e796f3a7a0709d90c72ce2d84e29c2 (patch)
tree0c0222d84d3a45a07d09416fe97ace13111b0f5b /net/netfilter/nf_flow_table_core.c
parent6bdc3c68d94c5d6adc675ee55361962e9dd2489d (diff)
downloadop-kernel-dev-59c466dd68e796f3a7a0709d90c72ce2d84e29c2.zip
op-kernel-dev-59c466dd68e796f3a7a0709d90c72ce2d84e29c2.tar.gz
netfilter: nf_flow_table: add a new flow state for tearing down offloading
On cleanup, this will be treated differently from FLOW_OFFLOAD_DYING: If FLOW_OFFLOAD_DYING is set, the connection is going away, so both the offload state and the connection tracking entry will be deleted. If FLOW_OFFLOAD_TEARDOWN is set, the connection remains alive, but the offload state is torn down. This is useful for cases that require more complex state tracking / timeout handling on TCP, or if the connection has been idle for too long. Support for sending flows back to the slow path will be implemented in a following patch Signed-off-by: Felix Fietkau <nbd@nbd.name> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Diffstat (limited to 'net/netfilter/nf_flow_table_core.c')
-rw-r--r--net/netfilter/nf_flow_table_core.c22
1 files changed, 14 insertions, 8 deletions
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index 0d38f20..5a81e4f 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -174,6 +174,12 @@ static void flow_offload_del(struct nf_flowtable *flow_table,
flow_offload_free(flow);
}
+void flow_offload_teardown(struct flow_offload *flow)
+{
+ flow->flags |= FLOW_OFFLOAD_TEARDOWN;
+}
+EXPORT_SYMBOL_GPL(flow_offload_teardown);
+
struct flow_offload_tuple_rhash *
flow_offload_lookup(struct nf_flowtable *flow_table,
struct flow_offload_tuple *tuple)
@@ -226,11 +232,6 @@ static inline bool nf_flow_has_expired(const struct flow_offload *flow)
return (__s32)(flow->timeout - (u32)jiffies) <= 0;
}
-static inline bool nf_flow_is_dying(const struct flow_offload *flow)
-{
- return flow->flags & FLOW_OFFLOAD_DYING;
-}
-
static int nf_flow_offload_gc_step(struct nf_flowtable *flow_table)
{
struct flow_offload_tuple_rhash *tuplehash;
@@ -258,7 +259,8 @@ static int nf_flow_offload_gc_step(struct nf_flowtable *flow_table)
flow = container_of(tuplehash, struct flow_offload, tuplehash[0]);
if (nf_flow_has_expired(flow) ||
- nf_flow_is_dying(flow))
+ (flow->flags & (FLOW_OFFLOAD_DYING |
+ FLOW_OFFLOAD_TEARDOWN)))
flow_offload_del(flow_table, flow);
}
out:
@@ -419,10 +421,14 @@ static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
{
struct net_device *dev = data;
- if (dev && flow->tuplehash[0].tuple.iifidx != dev->ifindex)
+ if (!dev) {
+ flow_offload_teardown(flow);
return;
+ }
- flow_offload_dead(flow);
+ if (flow->tuplehash[0].tuple.iifidx == dev->ifindex ||
+ flow->tuplehash[1].tuple.iifidx == dev->ifindex)
+ flow_offload_dead(flow);
}
static void nf_flow_table_iterate_cleanup(struct nf_flowtable *flowtable,
OpenPOWER on IntegriCloud