summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/802/tr.c22
-rw-r--r--net/decnet/dn_neigh.c2
-rw-r--r--net/ipv4/icmp.c12
-rw-r--r--net/ipv4/ipcomp.c2
-rw-r--r--net/ipv4/netfilter/ipt_ECN.c17
-rw-r--r--net/ipv4/netfilter/ipt_TCPMSS.c7
-rw-r--r--net/ipv4/tcp_output.c39
-rw-r--r--net/ipv6/ip6_input.c9
-rw-r--r--net/ipv6/ipcomp6.c2
-rw-r--r--net/ipv6/raw.c2
-rw-r--r--net/sunrpc/xdr.c1
11 files changed, 62 insertions, 53 deletions
diff --git a/net/802/tr.c b/net/802/tr.c
index a755e88..1bb7dc1 100644
--- a/net/802/tr.c
+++ b/net/802/tr.c
@@ -251,10 +251,11 @@ void tr_source_route(struct sk_buff *skb,struct trh_hdr *trh,struct net_device *
unsigned int hash;
struct rif_cache *entry;
unsigned char *olddata;
+ unsigned long flags;
static const unsigned char mcast_func_addr[]
= {0xC0,0x00,0x00,0x04,0x00,0x00};
- spin_lock_bh(&rif_lock);
+ spin_lock_irqsave(&rif_lock, flags);
/*
* Broadcasts are single route as stated in RFC 1042
@@ -323,7 +324,7 @@ printk("source routing for %02X:%02X:%02X:%02X:%02X:%02X\n",trh->daddr[0],
else
slack = 18 - ((ntohs(trh->rcf) & TR_RCF_LEN_MASK)>>8);
olddata = skb->data;
- spin_unlock_bh(&rif_lock);
+ spin_unlock_irqrestore(&rif_lock, flags);
skb_pull(skb, slack);
memmove(skb->data, olddata, sizeof(struct trh_hdr) - slack);
@@ -337,10 +338,11 @@ printk("source routing for %02X:%02X:%02X:%02X:%02X:%02X\n",trh->daddr[0],
static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev)
{
unsigned int hash, rii_p = 0;
+ unsigned long flags;
struct rif_cache *entry;
- spin_lock_bh(&rif_lock);
+ spin_lock_irqsave(&rif_lock, flags);
/*
* Firstly see if the entry exists
@@ -378,7 +380,7 @@ printk("adding rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n",
if(!entry)
{
printk(KERN_DEBUG "tr.c: Couldn't malloc rif cache entry !\n");
- spin_unlock_bh(&rif_lock);
+ spin_unlock_irqrestore(&rif_lock, flags);
return;
}
@@ -420,7 +422,7 @@ printk("updating rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n",
}
entry->last_used=jiffies;
}
- spin_unlock_bh(&rif_lock);
+ spin_unlock_irqrestore(&rif_lock, flags);
}
/*
@@ -430,9 +432,9 @@ printk("updating rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n",
static void rif_check_expire(unsigned long dummy)
{
int i;
- unsigned long next_interval = jiffies + sysctl_tr_rif_timeout/2;
+ unsigned long flags, next_interval = jiffies + sysctl_tr_rif_timeout/2;
- spin_lock_bh(&rif_lock);
+ spin_lock_irqsave(&rif_lock, flags);
for(i =0; i < RIF_TABLE_SIZE; i++) {
struct rif_cache *entry, **pentry;
@@ -454,7 +456,7 @@ static void rif_check_expire(unsigned long dummy)
}
}
- spin_unlock_bh(&rif_lock);
+ spin_unlock_irqrestore(&rif_lock, flags);
mod_timer(&rif_timer, next_interval);
@@ -485,7 +487,7 @@ static struct rif_cache *rif_get_idx(loff_t pos)
static void *rif_seq_start(struct seq_file *seq, loff_t *pos)
{
- spin_lock_bh(&rif_lock);
+ spin_lock_irq(&rif_lock);
return *pos ? rif_get_idx(*pos - 1) : SEQ_START_TOKEN;
}
@@ -516,7 +518,7 @@ static void *rif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
static void rif_seq_stop(struct seq_file *seq, void *v)
{
- spin_unlock_bh(&rif_lock);
+ spin_unlock_irq(&rif_lock);
}
static int rif_seq_show(struct seq_file *seq, void *v)
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index f32dba9..8d0cc3c 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -148,12 +148,12 @@ static int dn_neigh_construct(struct neighbour *neigh)
__neigh_parms_put(neigh->parms);
neigh->parms = neigh_parms_clone(parms);
- rcu_read_unlock();
if (dn_db->use_long)
neigh->ops = &dn_long_ops;
else
neigh->ops = &dn_short_ops;
+ rcu_read_unlock();
if (dn->flags & DN_NDFLAG_P3)
neigh->ops = &dn_phase3_ops;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 3d78464..badfc58 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -349,12 +349,12 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param,
{
struct sk_buff *skb;
- ip_append_data(icmp_socket->sk, icmp_glue_bits, icmp_param,
- icmp_param->data_len+icmp_param->head_len,
- icmp_param->head_len,
- ipc, rt, MSG_DONTWAIT);
-
- if ((skb = skb_peek(&icmp_socket->sk->sk_write_queue)) != NULL) {
+ if (ip_append_data(icmp_socket->sk, icmp_glue_bits, icmp_param,
+ icmp_param->data_len+icmp_param->head_len,
+ icmp_param->head_len,
+ ipc, rt, MSG_DONTWAIT) < 0)
+ ip_flush_pending_frames(icmp_socket->sk);
+ else if ((skb = skb_peek(&icmp_socket->sk->sk_write_queue)) != NULL) {
struct icmphdr *icmph = skb->h.icmph;
unsigned int csum = 0;
struct sk_buff *skb1;
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index 2065944..7ded6e6 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -358,7 +358,7 @@ static struct crypto_tfm **ipcomp_alloc_tfms(const char *alg_name)
int cpu;
/* This can be any valid CPU ID so we don't need locking. */
- cpu = smp_processor_id();
+ cpu = raw_smp_processor_id();
list_for_each_entry(pos, &ipcomp_tfms_list, list) {
struct crypto_tfm *tfm;
diff --git a/net/ipv4/netfilter/ipt_ECN.c b/net/ipv4/netfilter/ipt_ECN.c
index ada9911..94a0ce1 100644
--- a/net/ipv4/netfilter/ipt_ECN.c
+++ b/net/ipv4/netfilter/ipt_ECN.c
@@ -61,16 +61,20 @@ set_ect_tcp(struct sk_buff **pskb, const struct ipt_ECN_info *einfo, int inward)
if (!tcph)
return 0;
- if (!(einfo->operation & IPT_ECN_OP_SET_ECE
- || tcph->ece == einfo->proto.tcp.ece)
- && (!(einfo->operation & IPT_ECN_OP_SET_CWR
- || tcph->cwr == einfo->proto.tcp.cwr)))
+ if ((!(einfo->operation & IPT_ECN_OP_SET_ECE) ||
+ tcph->ece == einfo->proto.tcp.ece) &&
+ ((!(einfo->operation & IPT_ECN_OP_SET_CWR) ||
+ tcph->cwr == einfo->proto.tcp.cwr)))
return 1;
if (!skb_ip_make_writable(pskb, (*pskb)->nh.iph->ihl*4+sizeof(*tcph)))
return 0;
tcph = (void *)(*pskb)->nh.iph + (*pskb)->nh.iph->ihl*4;
+ if ((*pskb)->ip_summed == CHECKSUM_HW &&
+ skb_checksum_help(*pskb, inward))
+ return 0;
+
diffs[0] = ((u_int16_t *)tcph)[6];
if (einfo->operation & IPT_ECN_OP_SET_ECE)
tcph->ece = einfo->proto.tcp.ece;
@@ -79,13 +83,10 @@ set_ect_tcp(struct sk_buff **pskb, const struct ipt_ECN_info *einfo, int inward)
diffs[1] = ((u_int16_t *)tcph)[6];
diffs[0] = diffs[0] ^ 0xFFFF;
- if ((*pskb)->ip_summed != CHECKSUM_HW)
+ if ((*pskb)->ip_summed != CHECKSUM_UNNECESSARY)
tcph->check = csum_fold(csum_partial((char *)diffs,
sizeof(diffs),
tcph->check^0xFFFF));
- else
- if (skb_checksum_help(*pskb, inward))
- return 0;
(*pskb)->nfcache |= NFC_ALTERED;
return 1;
}
diff --git a/net/ipv4/netfilter/ipt_TCPMSS.c b/net/ipv4/netfilter/ipt_TCPMSS.c
index 1049050..7b84a25 100644
--- a/net/ipv4/netfilter/ipt_TCPMSS.c
+++ b/net/ipv4/netfilter/ipt_TCPMSS.c
@@ -61,6 +61,10 @@ ipt_tcpmss_target(struct sk_buff **pskb,
if (!skb_ip_make_writable(pskb, (*pskb)->len))
return NF_DROP;
+ if ((*pskb)->ip_summed == CHECKSUM_HW &&
+ skb_checksum_help(*pskb, out == NULL))
+ return NF_DROP;
+
iph = (*pskb)->nh.iph;
tcplen = (*pskb)->len - iph->ihl*4;
@@ -186,9 +190,6 @@ ipt_tcpmss_target(struct sk_buff **pskb,
newmss);
retmodified:
- /* We never hw checksum SYN packets. */
- BUG_ON((*pskb)->ip_summed == CHECKSUM_HW);
-
(*pskb)->nfcache |= NFC_UNKNOWN | NFC_ALTERED;
return IPT_CONTINUE;
}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 3ed6fc1..566045e 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -861,7 +861,8 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
u16 flags;
/* All of a TSO frame must be composed of paged data. */
- BUG_ON(skb->len != skb->data_len);
+ if (skb->len != skb->data_len)
+ return tcp_fragment(sk, skb, len, mss_now);
buff = sk_stream_alloc_pskb(sk, 0, 0, GFP_ATOMIC);
if (unlikely(buff == NULL))
@@ -974,6 +975,8 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
sent_pkts = 0;
while ((skb = sk->sk_send_head)) {
+ unsigned int limit;
+
tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
BUG_ON(!tso_segs);
@@ -994,9 +997,10 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
break;
}
+ limit = mss_now;
if (tso_segs > 1) {
- u32 limit = tcp_window_allows(tp, skb,
- mss_now, cwnd_quota);
+ limit = tcp_window_allows(tp, skb,
+ mss_now, cwnd_quota);
if (skb->len < limit) {
unsigned int trim = skb->len % mss_now;
@@ -1004,15 +1008,12 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
if (trim)
limit = skb->len - trim;
}
- if (skb->len > limit) {
- if (tso_fragment(sk, skb, limit, mss_now))
- break;
- }
- } else if (unlikely(skb->len > mss_now)) {
- if (unlikely(tcp_fragment(sk, skb, mss_now, mss_now)))
- break;
}
+ if (skb->len > limit &&
+ unlikely(tso_fragment(sk, skb, limit, mss_now)))
+ break;
+
TCP_SKB_CB(skb)->when = tcp_time_stamp;
if (unlikely(tcp_transmit_skb(sk, skb_clone(skb, GFP_ATOMIC))))
@@ -1064,11 +1065,14 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now)
cwnd_quota = tcp_snd_test(sk, skb, mss_now, TCP_NAGLE_PUSH);
if (likely(cwnd_quota)) {
+ unsigned int limit;
+
BUG_ON(!tso_segs);
+ limit = mss_now;
if (tso_segs > 1) {
- u32 limit = tcp_window_allows(tp, skb,
- mss_now, cwnd_quota);
+ limit = tcp_window_allows(tp, skb,
+ mss_now, cwnd_quota);
if (skb->len < limit) {
unsigned int trim = skb->len % mss_now;
@@ -1076,15 +1080,12 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now)
if (trim)
limit = skb->len - trim;
}
- if (skb->len > limit) {
- if (unlikely(tso_fragment(sk, skb, limit, mss_now)))
- return;
- }
- } else if (unlikely(skb->len > mss_now)) {
- if (unlikely(tcp_fragment(sk, skb, mss_now, mss_now)))
- return;
}
+ if (skb->len > limit &&
+ unlikely(tso_fragment(sk, skb, limit, mss_now)))
+ return;
+
/* Send it out now. */
TCP_SKB_CB(skb)->when = tcp_time_stamp;
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 866f107..10fbb50 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -198,12 +198,13 @@ resubmit:
if (!raw_sk) {
if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
IP6_INC_STATS_BH(IPSTATS_MIB_INUNKNOWNPROTOS);
- icmpv6_param_prob(skb, ICMPV6_UNK_NEXTHDR, nhoff);
+ icmpv6_send(skb, ICMPV6_PARAMPROB,
+ ICMPV6_UNK_NEXTHDR, nhoff,
+ skb->dev);
}
- } else {
+ } else
IP6_INC_STATS_BH(IPSTATS_MIB_INDELIVERS);
- kfree_skb(skb);
- }
+ kfree_skb(skb);
}
rcu_read_unlock();
return 0;
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 423feb4..135383e 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -354,7 +354,7 @@ static struct crypto_tfm **ipcomp6_alloc_tfms(const char *alg_name)
int cpu;
/* This can be any valid CPU ID so we don't need locking. */
- cpu = smp_processor_id();
+ cpu = raw_smp_processor_id();
list_for_each_entry(pos, &ipcomp6_tfms_list, list) {
struct crypto_tfm *tfm;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index e2b848e..1d4d75b 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -328,6 +328,8 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
if (skb->ip_summed != CHECKSUM_UNNECESSARY) {
if (skb->ip_summed == CHECKSUM_HW) {
+ skb_postpull_rcsum(skb, skb->nh.raw,
+ skb->h.raw - skb->nh.raw);
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (csum_ipv6_magic(&skb->nh.ipv6h->saddr,
&skb->nh.ipv6h->daddr,
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 8a4d9c1..fde16f4 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -993,6 +993,7 @@ xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
return -EINVAL;
} else {
if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
+ desc->array_len > desc->array_maxlen ||
(unsigned long) base + 4 + desc->array_len *
desc->elem_size > buf->len)
return -EINVAL;
OpenPOWER on IntegriCloud