diff options
author | David S. Miller <davem@davemloft.net> | 2014-09-26 16:21:29 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-09-26 16:21:29 -0400 |
commit | e7af85db54430c7cb7e15de9b3e0f72074d94dfb (patch) | |
tree | 4827f6435c305b95ae7a1b46d3a8ab230291d25a | |
parent | 445f7f4d62628cb2971db884084d162ecb622ec7 (diff) | |
parent | 679ab4ddbdfab8af39104e63819db71f428aefd9 (diff) | |
download | op-kernel-dev-e7af85db54430c7cb7e15de9b3e0f72074d94dfb.zip op-kernel-dev-e7af85db54430c7cb7e15de9b3e0f72074d94dfb.tar.gz |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf
Pablo Neira Ayuso says:
====================
nf pull request for net
This series contains netfilter fixes for net, they are:
1) Fix lockdep splat in nft_hash when releasing sets from the
rcu_callback context. We don't the mutex there anymore.
2) Remove unnecessary spinlock_bh in the destroy path of the nf_tables
rbtree set type from rcu_callback context.
3) Fix another lockdep splat in rhashtable. None of the callers hold
a mutex when calling rhashtable_destroy.
4) Fix duplicated error reporting from nfnetlink when aborting and
replaying a batch.
5) Fix a Kconfig issue reported by kbuild robot.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | lib/rhashtable.c | 8 | ||||
-rw-r--r-- | net/netfilter/Kconfig | 1 | ||||
-rw-r--r-- | net/netfilter/nfnetlink.c | 64 | ||||
-rw-r--r-- | net/netfilter/nft_hash.c | 12 | ||||
-rw-r--r-- | net/netfilter/nft_rbtree.c | 2 |
5 files changed, 75 insertions, 12 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 7b36e4d..16d0263 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -588,13 +588,13 @@ EXPORT_SYMBOL_GPL(rhashtable_init); * rhashtable_destroy - destroy hash table * @ht: the hash table to destroy * - * Frees the bucket array. + * Frees the bucket array. This function is not rcu safe, therefore the caller + * has to make sure that no resizing may happen by unpublishing the hashtable + * and waiting for the quiescent cycle before releasing the bucket array. */ void rhashtable_destroy(const struct rhashtable *ht) { - const struct bucket_table *tbl = rht_dereference(ht->tbl, ht); - - bucket_table_free(tbl); + bucket_table_free(ht->tbl); } EXPORT_SYMBOL_GPL(rhashtable_destroy); diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index b5c1d3a..6d77cce 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -847,6 +847,7 @@ config NETFILTER_XT_TARGET_TPROXY tristate '"TPROXY" target transparent proxying support' depends on NETFILTER_XTABLES depends on NETFILTER_ADVANCED + depends on (IPV6 || IPV6=n) depends on IP_NF_MANGLE select NF_DEFRAG_IPV4 select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index c138b8f..f37f071 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -222,6 +222,51 @@ replay: } } +struct nfnl_err { + struct list_head head; + struct nlmsghdr *nlh; + int err; +}; + +static int nfnl_err_add(struct list_head *list, struct nlmsghdr *nlh, int err) +{ + struct nfnl_err *nfnl_err; + + nfnl_err = kmalloc(sizeof(struct nfnl_err), GFP_KERNEL); + if (nfnl_err == NULL) + return -ENOMEM; + + nfnl_err->nlh = nlh; + nfnl_err->err = err; + list_add_tail(&nfnl_err->head, list); + + return 0; +} + +static void nfnl_err_del(struct nfnl_err *nfnl_err) +{ + list_del(&nfnl_err->head); + kfree(nfnl_err); +} + +static void nfnl_err_reset(struct list_head *err_list) +{ + struct nfnl_err *nfnl_err, *next; + + list_for_each_entry_safe(nfnl_err, next, err_list, head) + nfnl_err_del(nfnl_err); +} + +static void nfnl_err_deliver(struct list_head *err_list, struct sk_buff *skb) +{ + struct nfnl_err *nfnl_err, *next; + + list_for_each_entry_safe(nfnl_err, next, err_list, head) { + netlink_ack(skb, nfnl_err->nlh, nfnl_err->err); + nfnl_err_del(nfnl_err); + } +} + static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh, u_int16_t subsys_id) { @@ -230,6 +275,7 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh, const struct nfnetlink_subsystem *ss; const struct nfnl_callback *nc; bool success = true, done = false; + static LIST_HEAD(err_list); int err; if (subsys_id >= NFNL_SUBSYS_COUNT) @@ -287,6 +333,7 @@ replay: type = nlh->nlmsg_type; if (type == NFNL_MSG_BATCH_BEGIN) { /* Malformed: Batch begin twice */ + nfnl_err_reset(&err_list); success = false; goto done; } else if (type == NFNL_MSG_BATCH_END) { @@ -333,6 +380,7 @@ replay: * original skb. */ if (err == -EAGAIN) { + nfnl_err_reset(&err_list); ss->abort(skb); nfnl_unlock(subsys_id); kfree_skb(nskb); @@ -341,11 +389,24 @@ replay: } ack: if (nlh->nlmsg_flags & NLM_F_ACK || err) { + /* Errors are delivered once the full batch has been + * processed, this avoids that the same error is + * reported several times when replaying the batch. + */ + if (nfnl_err_add(&err_list, nlh, err) < 0) { + /* We failed to enqueue an error, reset the + * list of errors and send OOM to userspace + * pointing to the batch header. + */ + nfnl_err_reset(&err_list); + netlink_ack(skb, nlmsg_hdr(oskb), -ENOMEM); + success = false; + goto done; + } /* We don't stop processing the batch on errors, thus, * userspace gets all the errors that the batch * triggers. */ - netlink_ack(skb, nlh, err); if (err) success = false; } @@ -361,6 +422,7 @@ done: else ss->abort(skb); + nfnl_err_deliver(&err_list, oskb); nfnl_unlock(subsys_id); kfree_skb(nskb); } diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c index 28fb8f3..8892b7b 100644 --- a/net/netfilter/nft_hash.c +++ b/net/netfilter/nft_hash.c @@ -180,15 +180,17 @@ static int nft_hash_init(const struct nft_set *set, static void nft_hash_destroy(const struct nft_set *set) { const struct rhashtable *priv = nft_set_priv(set); - const struct bucket_table *tbl; + const struct bucket_table *tbl = priv->tbl; struct nft_hash_elem *he, *next; unsigned int i; - tbl = rht_dereference(priv->tbl, priv); - for (i = 0; i < tbl->size; i++) - rht_for_each_entry_safe(he, next, tbl->buckets[i], priv, node) + for (i = 0; i < tbl->size; i++) { + for (he = rht_entry(tbl->buckets[i], struct nft_hash_elem, node); + he != NULL; he = next) { + next = rht_entry(he->node.next, struct nft_hash_elem, node); nft_hash_elem_destroy(set, he); - + } + } rhashtable_destroy(priv); } diff --git a/net/netfilter/nft_rbtree.c b/net/netfilter/nft_rbtree.c index e1836ff..46214f2 100644 --- a/net/netfilter/nft_rbtree.c +++ b/net/netfilter/nft_rbtree.c @@ -234,13 +234,11 @@ static void nft_rbtree_destroy(const struct nft_set *set) struct nft_rbtree_elem *rbe; struct rb_node *node; - spin_lock_bh(&nft_rbtree_lock); while ((node = priv->root.rb_node) != NULL) { rb_erase(node, &priv->root); rbe = rb_entry(node, struct nft_rbtree_elem, node); nft_rbtree_elem_destroy(set, rbe); } - spin_unlock_bh(&nft_rbtree_lock); } static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features, |