summaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2011-09-22 03:23:13 -0400
committerDavid S. Miller <davem@davemloft.net>2011-09-22 03:23:13 -0400
commit8decf868790b48a727d7e7ca164f2bcd3c1389c0 (patch)
treeb759a5f861f842af7ea76f9011b579d06e9d5508 /net/core
parent3fc72370186be2f9d4d6ef06d99e1caa5d92c564 (diff)
parentd93dc5c4478c1fd5de85a3e8aece9aad7bbae044 (diff)
downloadop-kernel-dev-8decf868790b48a727d7e7ca164f2bcd3c1389c0.zip
op-kernel-dev-8decf868790b48a727d7e7ca164f2bcd3c1389c0.tar.gz
Merge branch 'master' of github.com:davem330/net
Conflicts: MAINTAINERS drivers/net/Kconfig drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c drivers/net/ethernet/broadcom/tg3.c drivers/net/wireless/iwlwifi/iwl-pci.c drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c drivers/net/wireless/rt2x00/rt2800usb.c drivers/net/wireless/wl12xx/main.c
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c8
-rw-r--r--net/core/fib_rules.c4
-rw-r--r--net/core/flow.c36
-rw-r--r--net/core/neighbour.c8
-rw-r--r--net/core/netpoll.c4
-rw-r--r--net/core/skbuff.c22
6 files changed, 57 insertions, 25 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 4b9981c..bf49a47 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1517,6 +1517,14 @@ static inline bool is_skb_forwardable(struct net_device *dev,
*/
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
{
+ if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
+ if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
+ atomic_long_inc(&dev->rx_dropped);
+ kfree_skb(skb);
+ return NET_RX_DROP;
+ }
+ }
+
skb_orphan(skb);
nf_reset(skb);
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 67c5c28..38be474 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -384,8 +384,8 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
*/
list_for_each_entry(r, &ops->rules_list, list) {
if (r->action == FR_ACT_GOTO &&
- r->target == rule->pref) {
- BUG_ON(rtnl_dereference(r->ctarget) != NULL);
+ r->target == rule->pref &&
+ rtnl_dereference(r->ctarget) == NULL) {
rcu_assign_pointer(r->ctarget, rule);
if (--ops->unresolved_rules == 0)
break;
diff --git a/net/core/flow.c b/net/core/flow.c
index bf32c33..555a456 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -30,6 +30,7 @@ struct flow_cache_entry {
struct hlist_node hlist;
struct list_head gc_list;
} u;
+ struct net *net;
u16 family;
u8 dir;
u32 genid;
@@ -172,29 +173,26 @@ static void flow_new_hash_rnd(struct flow_cache *fc,
static u32 flow_hash_code(struct flow_cache *fc,
struct flow_cache_percpu *fcp,
- const struct flowi *key)
+ const struct flowi *key,
+ size_t keysize)
{
const u32 *k = (const u32 *) key;
+ const u32 length = keysize * sizeof(flow_compare_t) / sizeof(u32);
- return jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd)
+ return jhash2(k, length, fcp->hash_rnd)
& (flow_cache_hash_size(fc) - 1);
}
-typedef unsigned long flow_compare_t;
-
/* I hear what you're saying, use memcmp. But memcmp cannot make
- * important assumptions that we can here, such as alignment and
- * constant size.
+ * important assumptions that we can here, such as alignment.
*/
-static int flow_key_compare(const struct flowi *key1, const struct flowi *key2)
+static int flow_key_compare(const struct flowi *key1, const struct flowi *key2,
+ size_t keysize)
{
const flow_compare_t *k1, *k1_lim, *k2;
- const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t);
-
- BUILD_BUG_ON(sizeof(struct flowi) % sizeof(flow_compare_t));
k1 = (const flow_compare_t *) key1;
- k1_lim = k1 + n_elem;
+ k1_lim = k1 + keysize;
k2 = (const flow_compare_t *) key2;
@@ -215,6 +213,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
struct flow_cache_entry *fle, *tfle;
struct hlist_node *entry;
struct flow_cache_object *flo;
+ size_t keysize;
unsigned int hash;
local_bh_disable();
@@ -222,6 +221,11 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
fle = NULL;
flo = NULL;
+
+ keysize = flow_key_size(family);
+ if (!keysize)
+ goto nocache;
+
/* Packet really early in init? Making flow_cache_init a
* pre-smp initcall would solve this. --RR */
if (!fcp->hash_table)
@@ -230,11 +234,12 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
if (fcp->hash_rnd_recalc)
flow_new_hash_rnd(fc, fcp);
- hash = flow_hash_code(fc, fcp, key);
+ hash = flow_hash_code(fc, fcp, key, keysize);
hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) {
- if (tfle->family == family &&
+ if (tfle->net == net &&
+ tfle->family == family &&
tfle->dir == dir &&
- flow_key_compare(key, &tfle->key) == 0) {
+ flow_key_compare(key, &tfle->key, keysize) == 0) {
fle = tfle;
break;
}
@@ -246,9 +251,10 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
if (fle) {
+ fle->net = net;
fle->family = family;
fle->dir = dir;
- memcpy(&fle->key, key, sizeof(*key));
+ memcpy(&fle->key, key, keysize * sizeof(flow_compare_t));
fle->object = NULL;
hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
fcp->hash_count++;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 4002261..4344964 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1331,11 +1331,15 @@ static void neigh_proxy_process(unsigned long arg)
if (tdif <= 0) {
struct net_device *dev = skb->dev;
+
__skb_unlink(skb, &tbl->proxy_queue);
- if (tbl->proxy_redo && netif_running(dev))
+ if (tbl->proxy_redo && netif_running(dev)) {
+ rcu_read_lock();
tbl->proxy_redo(skb);
- else
+ rcu_read_unlock();
+ } else {
kfree_skb(skb);
+ }
dev_put(dev);
} else if (!sched_next || tdif < sched_next)
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index d676a56..f57d946 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -558,13 +558,14 @@ int __netpoll_rx(struct sk_buff *skb)
if (skb_shared(skb))
goto out;
- iph = (struct iphdr *)skb->data;
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
goto out;
+ iph = (struct iphdr *)skb->data;
if (iph->ihl < 5 || iph->version != 4)
goto out;
if (!pskb_may_pull(skb, iph->ihl*4))
goto out;
+ iph = (struct iphdr *)skb->data;
if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
goto out;
@@ -579,6 +580,7 @@ int __netpoll_rx(struct sk_buff *skb)
if (pskb_trim_rcsum(skb, len))
goto out;
+ iph = (struct iphdr *)skb->data;
if (iph->protocol != IPPROTO_UDP)
goto out;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 296afd0..5b2c5f1 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -613,8 +613,21 @@ struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
}
EXPORT_SYMBOL_GPL(skb_morph);
-/* skb frags copy userspace buffers to kernel */
-static int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
+/* skb_copy_ubufs - copy userspace skb frags buffers to kernel
+ * @skb: the skb to modify
+ * @gfp_mask: allocation priority
+ *
+ * This must be called on SKBTX_DEV_ZEROCOPY skb.
+ * It will copy all frags into kernel and drop the reference
+ * to userspace pages.
+ *
+ * If this function is called from an interrupt gfp_mask() must be
+ * %GFP_ATOMIC.
+ *
+ * Returns 0 on success or a negative error code on failure
+ * to allocate kernel memory to copy to.
+ */
+int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
{
int i;
int num_frags = skb_shinfo(skb)->nr_frags;
@@ -654,6 +667,8 @@ static int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
skb_shinfo(skb)->frags[i - 1].page = head;
head = (struct page *)head->private;
}
+
+ skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
return 0;
}
@@ -679,7 +694,6 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
if (skb_copy_ubufs(skb, gfp_mask))
return NULL;
- skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
}
n = skb + 1;
@@ -805,7 +819,6 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
n = NULL;
goto out;
}
- skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
@@ -898,7 +911,6 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
if (skb_copy_ubufs(skb, gfp_mask))
goto nofrags;
- skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
skb_frag_ref(skb, i);
OpenPOWER on IntegriCloud