summaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c95
-rw-r--r--net/core/dst.c15
-rw-r--r--net/core/ethtool.c224
-rw-r--r--net/core/filter.c709
-rw-r--r--net/core/flow.c8
-rw-r--r--net/core/net-sysfs.c2
-rw-r--r--net/core/net_namespace.c2
-rw-r--r--net/core/netclassid_cgroup.c15
-rw-r--r--net/core/netprio_cgroup.c41
-rw-r--r--net/core/pktgen.c60
-rw-r--r--net/core/ptp_classifier.c2
-rw-r--r--net/core/rtnetlink.c53
-rw-r--r--net/core/skbuff.c20
-rw-r--r--net/core/sock.c53
-rw-r--r--net/core/sock_diag.c4
15 files changed, 773 insertions, 530 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 48d81e4..867adb2 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1661,6 +1661,29 @@ bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(is_skb_forwardable);
+int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
+{
+ if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
+ if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
+ atomic_long_inc(&dev->rx_dropped);
+ kfree_skb(skb);
+ return NET_RX_DROP;
+ }
+ }
+
+ if (unlikely(!is_skb_forwardable(dev, skb))) {
+ atomic_long_inc(&dev->rx_dropped);
+ kfree_skb(skb);
+ return NET_RX_DROP;
+ }
+
+ skb_scrub_packet(skb, true);
+ skb->protocol = eth_type_trans(skb, dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__dev_forward_skb);
+
/**
* dev_forward_skb - loopback an skb to another netif
*
@@ -1681,24 +1704,7 @@ EXPORT_SYMBOL_GPL(is_skb_forwardable);
*/
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
{
- if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
- if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
- atomic_long_inc(&dev->rx_dropped);
- kfree_skb(skb);
- return NET_RX_DROP;
- }
- }
-
- if (unlikely(!is_skb_forwardable(dev, skb))) {
- atomic_long_inc(&dev->rx_dropped);
- kfree_skb(skb);
- return NET_RX_DROP;
- }
-
- skb_scrub_packet(skb, true);
- skb->protocol = eth_type_trans(skb, dev);
-
- return netif_rx_internal(skb);
+ return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
}
EXPORT_SYMBOL_GPL(dev_forward_skb);
@@ -2284,7 +2290,7 @@ EXPORT_SYMBOL(skb_checksum_help);
__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
{
__be16 type = skb->protocol;
- int vlan_depth = ETH_HLEN;
+ int vlan_depth = skb->mac_len;
/* Tunnel gso handlers can set protocol to ethernet. */
if (type == htons(ETH_P_TEB)) {
@@ -2418,7 +2424,7 @@ EXPORT_SYMBOL(netdev_rx_csum_fault);
* 2. No high memory really exists on this machine.
*/
-static int illegal_highdma(const struct net_device *dev, struct sk_buff *skb)
+static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
{
#ifdef CONFIG_HIGHMEM
int i;
@@ -2493,38 +2499,36 @@ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
}
static netdev_features_t harmonize_features(struct sk_buff *skb,
- const struct net_device *dev,
- netdev_features_t features)
+ netdev_features_t features)
{
int tmp;
if (skb->ip_summed != CHECKSUM_NONE &&
!can_checksum_protocol(features, skb_network_protocol(skb, &tmp))) {
features &= ~NETIF_F_ALL_CSUM;
- } else if (illegal_highdma(dev, skb)) {
+ } else if (illegal_highdma(skb->dev, skb)) {
features &= ~NETIF_F_SG;
}
return features;
}
-netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
- const struct net_device *dev)
+netdev_features_t netif_skb_features(struct sk_buff *skb)
{
__be16 protocol = skb->protocol;
- netdev_features_t features = dev->features;
+ netdev_features_t features = skb->dev->features;
- if (skb_shinfo(skb)->gso_segs > dev->gso_max_segs)
+ if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
features &= ~NETIF_F_GSO_MASK;
if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
protocol = veh->h_vlan_encapsulated_proto;
} else if (!vlan_tx_tag_present(skb)) {
- return harmonize_features(skb, dev, features);
+ return harmonize_features(skb, features);
}
- features &= (dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
+ features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX);
if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
@@ -2532,9 +2536,9 @@ netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX;
- return harmonize_features(skb, dev, features);
+ return harmonize_features(skb, features);
}
-EXPORT_SYMBOL(netif_skb_dev_features);
+EXPORT_SYMBOL(netif_skb_features);
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq)
@@ -3439,7 +3443,7 @@ out:
* @rx_handler: receive handler to register
* @rx_handler_data: data pointer that is used by rx handler
*
- * Register a receive hander for a device. This handler will then be
+ * Register a receive handler for a device. This handler will then be
* called from __netif_receive_skb. A negative errno code is returned
* on a failure.
*
@@ -4043,6 +4047,7 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
skb->vlan_tci = 0;
skb->dev = napi->dev;
skb->skb_iif = 0;
+ skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
napi->skb = skb;
}
@@ -4155,8 +4160,8 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
struct softnet_data *next = remsd->rps_ipi_next;
if (cpu_online(remsd->cpu))
- __smp_call_function_single(remsd->cpu,
- &remsd->csd, 0);
+ smp_call_function_single_async(remsd->cpu,
+ &remsd->csd);
remsd = next;
}
} else
@@ -4588,8 +4593,7 @@ void *netdev_lower_get_next_private(struct net_device *dev,
if (&lower->list == &dev->adj_list.lower)
return NULL;
- if (iter)
- *iter = lower->list.next;
+ *iter = lower->list.next;
return lower->private;
}
@@ -4617,8 +4621,7 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev,
if (&lower->list == &dev->adj_list.lower)
return NULL;
- if (iter)
- *iter = &lower->list;
+ *iter = &lower->list;
return lower->private;
}
@@ -5239,6 +5242,7 @@ void __dev_set_rx_mode(struct net_device *dev)
if (ops->ndo_set_rx_mode)
ops->ndo_set_rx_mode(dev);
}
+EXPORT_SYMBOL(__dev_set_rx_mode);
void dev_set_rx_mode(struct net_device *dev)
{
@@ -5600,10 +5604,6 @@ static void rollback_registered_many(struct list_head *head)
*/
call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
- if (!dev->rtnl_link_ops ||
- dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
- rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
-
/*
* Flush the unicast and multicast chains
*/
@@ -5613,6 +5613,10 @@ static void rollback_registered_many(struct list_head *head)
if (dev->netdev_ops->ndo_uninit)
dev->netdev_ops->ndo_uninit(dev);
+ if (!dev->rtnl_link_ops ||
+ dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
+ rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
+
/* Notifier chain MUST detach us all upper devices. */
WARN_ON(netdev_has_any_upper_dev(dev));
@@ -5696,6 +5700,13 @@ static netdev_features_t netdev_fix_features(struct net_device *dev,
}
}
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ if (dev->netdev_ops->ndo_busy_poll)
+ features |= NETIF_F_BUSY_POLL;
+ else
+#endif
+ features &= ~NETIF_F_BUSY_POLL;
+
return features;
}
diff --git a/net/core/dst.c b/net/core/dst.c
index ca4231e..80d6286 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -142,12 +142,12 @@ loop:
mutex_unlock(&dst_gc_mutex);
}
-int dst_discard(struct sk_buff *skb)
+int dst_discard_sk(struct sock *sk, struct sk_buff *skb)
{
kfree_skb(skb);
return 0;
}
-EXPORT_SYMBOL(dst_discard);
+EXPORT_SYMBOL(dst_discard_sk);
const u32 dst_default_metrics[RTAX_MAX + 1] = {
/* This initializer is needed to force linker to place this variable
@@ -184,7 +184,7 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
dst->xfrm = NULL;
#endif
dst->input = dst_discard;
- dst->output = dst_discard;
+ dst->output = dst_discard_sk;
dst->error = 0;
dst->obsolete = initial_obsolete;
dst->header_len = 0;
@@ -209,8 +209,10 @@ static void ___dst_free(struct dst_entry *dst)
/* The first case (dev==NULL) is required, when
protocol module is unloaded.
*/
- if (dst->dev == NULL || !(dst->dev->flags&IFF_UP))
- dst->input = dst->output = dst_discard;
+ if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) {
+ dst->input = dst_discard;
+ dst->output = dst_discard_sk;
+ }
dst->obsolete = DST_OBSOLETE_DEAD;
}
@@ -361,7 +363,8 @@ static void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
return;
if (!unregister) {
- dst->input = dst->output = dst_discard;
+ dst->input = dst_discard;
+ dst->output = dst_discard_sk;
} else {
dst->dev = dev_net(dst->dev)->loopback_dev;
dev_hold(dst->dev);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 30071de..aa8978ac 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -97,6 +97,7 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
[NETIF_F_RXFCS_BIT] = "rx-fcs",
[NETIF_F_RXALL_BIT] = "rx-all",
[NETIF_F_HW_L2FW_DOFFLOAD_BIT] = "l2-fwd-offload",
+ [NETIF_F_BUSY_POLL_BIT] = "busy-poll",
};
static int ethtool_get_features(struct net_device *dev, void __user *useraddr)
@@ -556,6 +557,25 @@ err_out:
return ret;
}
+static int ethtool_copy_validate_indir(u32 *indir, void __user *useraddr,
+ struct ethtool_rxnfc *rx_rings,
+ u32 size)
+{
+ int ret = 0, i;
+
+ if (copy_from_user(indir, useraddr, size * sizeof(indir[0])))
+ ret = -EFAULT;
+
+ /* Validate ring indices */
+ for (i = 0; i < size; i++) {
+ if (indir[i] >= rx_rings->data) {
+ ret = -EINVAL;
+ break;
+ }
+ }
+ return ret;
+}
+
static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev,
void __user *useraddr)
{
@@ -612,6 +632,7 @@ static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev,
u32 *indir;
const struct ethtool_ops *ops = dev->ethtool_ops;
int ret;
+ u32 ringidx_offset = offsetof(struct ethtool_rxfh_indir, ring_index[0]);
if (!ops->get_rxfh_indir_size || !ops->set_rxfh_indir ||
!ops->get_rxnfc)
@@ -642,28 +663,196 @@ static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev,
for (i = 0; i < dev_size; i++)
indir[i] = ethtool_rxfh_indir_default(i, rx_rings.data);
} else {
- if (copy_from_user(indir,
- useraddr +
- offsetof(struct ethtool_rxfh_indir,
- ring_index[0]),
- dev_size * sizeof(indir[0]))) {
+ ret = ethtool_copy_validate_indir(indir,
+ useraddr + ringidx_offset,
+ &rx_rings,
+ dev_size);
+ if (ret)
+ goto out;
+ }
+
+ ret = ops->set_rxfh_indir(dev, indir);
+
+out:
+ kfree(indir);
+ return ret;
+}
+
+static noinline_for_stack int ethtool_get_rxfh(struct net_device *dev,
+ void __user *useraddr)
+{
+ int ret;
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ u32 user_indir_size = 0, user_key_size = 0;
+ u32 dev_indir_size = 0, dev_key_size = 0;
+ u32 total_size;
+ u32 indir_offset, indir_bytes;
+ u32 key_offset;
+ u32 *indir = NULL;
+ u8 *hkey = NULL;
+ u8 *rss_config;
+
+ if (!(dev->ethtool_ops->get_rxfh_indir_size ||
+ dev->ethtool_ops->get_rxfh_key_size) ||
+ !dev->ethtool_ops->get_rxfh)
+ return -EOPNOTSUPP;
+
+ if (ops->get_rxfh_indir_size)
+ dev_indir_size = ops->get_rxfh_indir_size(dev);
+
+ indir_offset = offsetof(struct ethtool_rxfh, indir_size);
+
+ if (copy_from_user(&user_indir_size,
+ useraddr + indir_offset,
+ sizeof(user_indir_size)))
+ return -EFAULT;
+
+ if (copy_to_user(useraddr + indir_offset,
+ &dev_indir_size, sizeof(dev_indir_size)))
+ return -EFAULT;
+
+ if (ops->get_rxfh_key_size)
+ dev_key_size = ops->get_rxfh_key_size(dev);
+
+ if ((dev_key_size + dev_indir_size) == 0)
+ return -EOPNOTSUPP;
+
+ key_offset = offsetof(struct ethtool_rxfh, key_size);
+
+ if (copy_from_user(&user_key_size,
+ useraddr + key_offset,
+ sizeof(user_key_size)))
+ return -EFAULT;
+
+ if (copy_to_user(useraddr + key_offset,
+ &dev_key_size, sizeof(dev_key_size)))
+ return -EFAULT;
+
+ /* If the user buffer size is 0, this is just a query for the
+ * device table size and key size. Otherwise, if the User size is
+ * not equal to device table size or key size it's an error.
+ */
+ if (!user_indir_size && !user_key_size)
+ return 0;
+
+ if ((user_indir_size && (user_indir_size != dev_indir_size)) ||
+ (user_key_size && (user_key_size != dev_key_size)))
+ return -EINVAL;
+
+ indir_bytes = user_indir_size * sizeof(indir[0]);
+ total_size = indir_bytes + user_key_size;
+ rss_config = kzalloc(total_size, GFP_USER);
+ if (!rss_config)
+ return -ENOMEM;
+
+ if (user_indir_size)
+ indir = (u32 *)rss_config;
+
+ if (user_key_size)
+ hkey = rss_config + indir_bytes;
+
+ ret = dev->ethtool_ops->get_rxfh(dev, indir, hkey);
+ if (!ret) {
+ if (copy_to_user(useraddr +
+ offsetof(struct ethtool_rxfh, rss_config[0]),
+ rss_config, total_size))
ret = -EFAULT;
+ }
+
+ kfree(rss_config);
+
+ return ret;
+}
+
+static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
+ void __user *useraddr)
+{
+ int ret;
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct ethtool_rxnfc rx_rings;
+ u32 user_indir_size = 0, dev_indir_size = 0, i;
+ u32 user_key_size = 0, dev_key_size = 0;
+ u32 *indir = NULL, indir_bytes = 0;
+ u8 *hkey = NULL;
+ u8 *rss_config;
+ u32 indir_offset, key_offset;
+ u32 rss_cfg_offset = offsetof(struct ethtool_rxfh, rss_config[0]);
+
+ if (!(ops->get_rxfh_indir_size || ops->get_rxfh_key_size) ||
+ !ops->get_rxnfc || !ops->set_rxfh)
+ return -EOPNOTSUPP;
+
+ if (ops->get_rxfh_indir_size)
+ dev_indir_size = ops->get_rxfh_indir_size(dev);
+
+ indir_offset = offsetof(struct ethtool_rxfh, indir_size);
+ if (copy_from_user(&user_indir_size,
+ useraddr + indir_offset,
+ sizeof(user_indir_size)))
+ return -EFAULT;
+
+ if (ops->get_rxfh_key_size)
+ dev_key_size = dev->ethtool_ops->get_rxfh_key_size(dev);
+
+ if ((dev_key_size + dev_indir_size) == 0)
+ return -EOPNOTSUPP;
+
+ key_offset = offsetof(struct ethtool_rxfh, key_size);
+ if (copy_from_user(&user_key_size,
+ useraddr + key_offset,
+ sizeof(user_key_size)))
+ return -EFAULT;
+
+ /* If either indir or hash key is valid, proceed further.
+ */
+ if ((user_indir_size && ((user_indir_size != 0xDEADBEEF) &&
+ user_indir_size != dev_indir_size)) ||
+ (user_key_size && (user_key_size != dev_key_size)))
+ return -EINVAL;
+
+ if (user_indir_size != 0xDEADBEEF)
+ indir_bytes = dev_indir_size * sizeof(indir[0]);
+
+ rss_config = kzalloc(indir_bytes + user_key_size, GFP_USER);
+ if (!rss_config)
+ return -ENOMEM;
+
+ rx_rings.cmd = ETHTOOL_GRXRINGS;
+ ret = ops->get_rxnfc(dev, &rx_rings, NULL);
+ if (ret)
+ goto out;
+
+ /* user_indir_size == 0 means reset the indir table to default.
+ * user_indir_size == 0xDEADBEEF means indir setting is not requested.
+ */
+ if (user_indir_size && user_indir_size != 0xDEADBEEF) {
+ indir = (u32 *)rss_config;
+ ret = ethtool_copy_validate_indir(indir,
+ useraddr + rss_cfg_offset,
+ &rx_rings,
+ user_indir_size);
+ if (ret)
goto out;
- }
+ } else if (user_indir_size == 0) {
+ indir = (u32 *)rss_config;
+ for (i = 0; i < dev_indir_size; i++)
+ indir[i] = ethtool_rxfh_indir_default(i, rx_rings.data);
+ }
- /* Validate ring indices */
- for (i = 0; i < dev_size; i++) {
- if (indir[i] >= rx_rings.data) {
- ret = -EINVAL;
- goto out;
- }
+ if (user_key_size) {
+ hkey = rss_config + indir_bytes;
+ if (copy_from_user(hkey,
+ useraddr + rss_cfg_offset + indir_bytes,
+ user_key_size)) {
+ ret = -EFAULT;
+ goto out;
}
}
- ret = ops->set_rxfh_indir(dev, indir);
+ ret = ops->set_rxfh(dev, indir, hkey);
out:
- kfree(indir);
+ kfree(rss_config);
return ret;
}
@@ -1490,6 +1679,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_GRXCLSRULE:
case ETHTOOL_GRXCLSRLALL:
case ETHTOOL_GRXFHINDIR:
+ case ETHTOOL_GRSSH:
case ETHTOOL_GFEATURES:
case ETHTOOL_GCHANNELS:
case ETHTOOL_GET_TS_INFO:
@@ -1627,6 +1817,12 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_SRXFHINDIR:
rc = ethtool_set_rxfh_indir(dev, useraddr);
break;
+ case ETHTOOL_GRSSH:
+ rc = ethtool_get_rxfh(dev, useraddr);
+ break;
+ case ETHTOOL_SRSSH:
+ rc = ethtool_set_rxfh(dev, useraddr);
+ break;
case ETHTOOL_GFEATURES:
rc = ethtool_get_features(dev, useraddr);
break;
diff --git a/net/core/filter.c b/net/core/filter.c
index 765556b..7067cb2 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -45,6 +45,27 @@
#include <linux/seccomp.h>
#include <linux/if_vlan.h>
+/* Registers */
+#define BPF_R0 regs[BPF_REG_0]
+#define BPF_R1 regs[BPF_REG_1]
+#define BPF_R2 regs[BPF_REG_2]
+#define BPF_R3 regs[BPF_REG_3]
+#define BPF_R4 regs[BPF_REG_4]
+#define BPF_R5 regs[BPF_REG_5]
+#define BPF_R6 regs[BPF_REG_6]
+#define BPF_R7 regs[BPF_REG_7]
+#define BPF_R8 regs[BPF_REG_8]
+#define BPF_R9 regs[BPF_REG_9]
+#define BPF_R10 regs[BPF_REG_10]
+
+/* Named registers */
+#define A regs[insn->a_reg]
+#define X regs[insn->x_reg]
+#define FP regs[BPF_REG_FP]
+#define ARG1 regs[BPF_REG_ARG1]
+#define CTX regs[BPF_REG_CTX]
+#define K insn->imm
+
/* No hurry in this branch
*
* Exported for the bpf jit load helper.
@@ -57,9 +78,9 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns
ptr = skb_network_header(skb) + k - SKF_NET_OFF;
else if (k >= SKF_LL_OFF)
ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
-
if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
return ptr;
+
return NULL;
}
@@ -68,6 +89,7 @@ static inline void *load_pointer(const struct sk_buff *skb, int k,
{
if (k >= 0)
return skb_header_pointer(skb, k, size, buffer);
+
return bpf_internal_load_pointer_neg_helper(skb, k, size);
}
@@ -131,210 +153,208 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
* keep, 0 for none. @ctx is the data we are operating on, @insn is the
* array of filter instructions.
*/
-unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn)
+static unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn)
{
u64 stack[MAX_BPF_STACK / sizeof(u64)];
u64 regs[MAX_BPF_REG], tmp;
- void *ptr;
- int off;
-
-#define K insn->imm
-#define A regs[insn->a_reg]
-#define X regs[insn->x_reg]
-#define R0 regs[0]
-
-#define CONT ({insn++; goto select_insn; })
-#define CONT_JMP ({insn++; goto select_insn; })
-
static const void *jumptable[256] = {
[0 ... 255] = &&default_label,
/* Now overwrite non-defaults ... */
-#define DL(A, B, C) [A|B|C] = &&A##_##B##_##C
- DL(BPF_ALU, BPF_ADD, BPF_X),
- DL(BPF_ALU, BPF_ADD, BPF_K),
- DL(BPF_ALU, BPF_SUB, BPF_X),
- DL(BPF_ALU, BPF_SUB, BPF_K),
- DL(BPF_ALU, BPF_AND, BPF_X),
- DL(BPF_ALU, BPF_AND, BPF_K),
- DL(BPF_ALU, BPF_OR, BPF_X),
- DL(BPF_ALU, BPF_OR, BPF_K),
- DL(BPF_ALU, BPF_LSH, BPF_X),
- DL(BPF_ALU, BPF_LSH, BPF_K),
- DL(BPF_ALU, BPF_RSH, BPF_X),
- DL(BPF_ALU, BPF_RSH, BPF_K),
- DL(BPF_ALU, BPF_XOR, BPF_X),
- DL(BPF_ALU, BPF_XOR, BPF_K),
- DL(BPF_ALU, BPF_MUL, BPF_X),
- DL(BPF_ALU, BPF_MUL, BPF_K),
- DL(BPF_ALU, BPF_MOV, BPF_X),
- DL(BPF_ALU, BPF_MOV, BPF_K),
- DL(BPF_ALU, BPF_DIV, BPF_X),
- DL(BPF_ALU, BPF_DIV, BPF_K),
- DL(BPF_ALU, BPF_MOD, BPF_X),
- DL(BPF_ALU, BPF_MOD, BPF_K),
- DL(BPF_ALU, BPF_NEG, 0),
- DL(BPF_ALU, BPF_END, BPF_TO_BE),
- DL(BPF_ALU, BPF_END, BPF_TO_LE),
- DL(BPF_ALU64, BPF_ADD, BPF_X),
- DL(BPF_ALU64, BPF_ADD, BPF_K),
- DL(BPF_ALU64, BPF_SUB, BPF_X),
- DL(BPF_ALU64, BPF_SUB, BPF_K),
- DL(BPF_ALU64, BPF_AND, BPF_X),
- DL(BPF_ALU64, BPF_AND, BPF_K),
- DL(BPF_ALU64, BPF_OR, BPF_X),
- DL(BPF_ALU64, BPF_OR, BPF_K),
- DL(BPF_ALU64, BPF_LSH, BPF_X),
- DL(BPF_ALU64, BPF_LSH, BPF_K),
- DL(BPF_ALU64, BPF_RSH, BPF_X),
- DL(BPF_ALU64, BPF_RSH, BPF_K),
- DL(BPF_ALU64, BPF_XOR, BPF_X),
- DL(BPF_ALU64, BPF_XOR, BPF_K),
- DL(BPF_ALU64, BPF_MUL, BPF_X),
- DL(BPF_ALU64, BPF_MUL, BPF_K),
- DL(BPF_ALU64, BPF_MOV, BPF_X),
- DL(BPF_ALU64, BPF_MOV, BPF_K),
- DL(BPF_ALU64, BPF_ARSH, BPF_X),
- DL(BPF_ALU64, BPF_ARSH, BPF_K),
- DL(BPF_ALU64, BPF_DIV, BPF_X),
- DL(BPF_ALU64, BPF_DIV, BPF_K),
- DL(BPF_ALU64, BPF_MOD, BPF_X),
- DL(BPF_ALU64, BPF_MOD, BPF_K),
- DL(BPF_ALU64, BPF_NEG, 0),
- DL(BPF_JMP, BPF_CALL, 0),
- DL(BPF_JMP, BPF_JA, 0),
- DL(BPF_JMP, BPF_JEQ, BPF_X),
- DL(BPF_JMP, BPF_JEQ, BPF_K),
- DL(BPF_JMP, BPF_JNE, BPF_X),
- DL(BPF_JMP, BPF_JNE, BPF_K),
- DL(BPF_JMP, BPF_JGT, BPF_X),
- DL(BPF_JMP, BPF_JGT, BPF_K),
- DL(BPF_JMP, BPF_JGE, BPF_X),
- DL(BPF_JMP, BPF_JGE, BPF_K),
- DL(BPF_JMP, BPF_JSGT, BPF_X),
- DL(BPF_JMP, BPF_JSGT, BPF_K),
- DL(BPF_JMP, BPF_JSGE, BPF_X),
- DL(BPF_JMP, BPF_JSGE, BPF_K),
- DL(BPF_JMP, BPF_JSET, BPF_X),
- DL(BPF_JMP, BPF_JSET, BPF_K),
- DL(BPF_JMP, BPF_EXIT, 0),
- DL(BPF_STX, BPF_MEM, BPF_B),
- DL(BPF_STX, BPF_MEM, BPF_H),
- DL(BPF_STX, BPF_MEM, BPF_W),
- DL(BPF_STX, BPF_MEM, BPF_DW),
- DL(BPF_STX, BPF_XADD, BPF_W),
- DL(BPF_STX, BPF_XADD, BPF_DW),
- DL(BPF_ST, BPF_MEM, BPF_B),
- DL(BPF_ST, BPF_MEM, BPF_H),
- DL(BPF_ST, BPF_MEM, BPF_W),
- DL(BPF_ST, BPF_MEM, BPF_DW),
- DL(BPF_LDX, BPF_MEM, BPF_B),
- DL(BPF_LDX, BPF_MEM, BPF_H),
- DL(BPF_LDX, BPF_MEM, BPF_W),
- DL(BPF_LDX, BPF_MEM, BPF_DW),
- DL(BPF_LD, BPF_ABS, BPF_W),
- DL(BPF_LD, BPF_ABS, BPF_H),
- DL(BPF_LD, BPF_ABS, BPF_B),
- DL(BPF_LD, BPF_IND, BPF_W),
- DL(BPF_LD, BPF_IND, BPF_H),
- DL(BPF_LD, BPF_IND, BPF_B),
+#define DL(A, B, C) [BPF_##A|BPF_##B|BPF_##C] = &&A##_##B##_##C
+ DL(ALU, ADD, X),
+ DL(ALU, ADD, K),
+ DL(ALU, SUB, X),
+ DL(ALU, SUB, K),
+ DL(ALU, AND, X),
+ DL(ALU, AND, K),
+ DL(ALU, OR, X),
+ DL(ALU, OR, K),
+ DL(ALU, LSH, X),
+ DL(ALU, LSH, K),
+ DL(ALU, RSH, X),
+ DL(ALU, RSH, K),
+ DL(ALU, XOR, X),
+ DL(ALU, XOR, K),
+ DL(ALU, MUL, X),
+ DL(ALU, MUL, K),
+ DL(ALU, MOV, X),
+ DL(ALU, MOV, K),
+ DL(ALU, DIV, X),
+ DL(ALU, DIV, K),
+ DL(ALU, MOD, X),
+ DL(ALU, MOD, K),
+ DL(ALU, NEG, 0),
+ DL(ALU, END, TO_BE),
+ DL(ALU, END, TO_LE),
+ DL(ALU64, ADD, X),
+ DL(ALU64, ADD, K),
+ DL(ALU64, SUB, X),
+ DL(ALU64, SUB, K),
+ DL(ALU64, AND, X),
+ DL(ALU64, AND, K),
+ DL(ALU64, OR, X),
+ DL(ALU64, OR, K),
+ DL(ALU64, LSH, X),
+ DL(ALU64, LSH, K),
+ DL(ALU64, RSH, X),
+ DL(ALU64, RSH, K),
+ DL(ALU64, XOR, X),
+ DL(ALU64, XOR, K),
+ DL(ALU64, MUL, X),
+ DL(ALU64, MUL, K),
+ DL(ALU64, MOV, X),
+ DL(ALU64, MOV, K),
+ DL(ALU64, ARSH, X),
+ DL(ALU64, ARSH, K),
+ DL(ALU64, DIV, X),
+ DL(ALU64, DIV, K),
+ DL(ALU64, MOD, X),
+ DL(ALU64, MOD, K),
+ DL(ALU64, NEG, 0),
+ DL(JMP, CALL, 0),
+ DL(JMP, JA, 0),
+ DL(JMP, JEQ, X),
+ DL(JMP, JEQ, K),
+ DL(JMP, JNE, X),
+ DL(JMP, JNE, K),
+ DL(JMP, JGT, X),
+ DL(JMP, JGT, K),
+ DL(JMP, JGE, X),
+ DL(JMP, JGE, K),
+ DL(JMP, JSGT, X),
+ DL(JMP, JSGT, K),
+ DL(JMP, JSGE, X),
+ DL(JMP, JSGE, K),
+ DL(JMP, JSET, X),
+ DL(JMP, JSET, K),
+ DL(JMP, EXIT, 0),
+ DL(STX, MEM, B),
+ DL(STX, MEM, H),
+ DL(STX, MEM, W),
+ DL(STX, MEM, DW),
+ DL(STX, XADD, W),
+ DL(STX, XADD, DW),
+ DL(ST, MEM, B),
+ DL(ST, MEM, H),
+ DL(ST, MEM, W),
+ DL(ST, MEM, DW),
+ DL(LDX, MEM, B),
+ DL(LDX, MEM, H),
+ DL(LDX, MEM, W),
+ DL(LDX, MEM, DW),
+ DL(LD, ABS, W),
+ DL(LD, ABS, H),
+ DL(LD, ABS, B),
+ DL(LD, IND, W),
+ DL(LD, IND, H),
+ DL(LD, IND, B),
#undef DL
};
+ void *ptr;
+ int off;
- regs[FP_REG] = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
- regs[ARG1_REG] = (u64) (unsigned long) ctx;
+#define CONT ({ insn++; goto select_insn; })
+#define CONT_JMP ({ insn++; goto select_insn; })
+
+ FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
+ ARG1 = (u64) (unsigned long) ctx;
+
+ /* Register for user BPF programs need to be reset first. */
+ regs[BPF_REG_A] = 0;
+ regs[BPF_REG_X] = 0;
select_insn:
goto *jumptable[insn->code];
/* ALU */
#define ALU(OPCODE, OP) \
- BPF_ALU64_##OPCODE##_BPF_X: \
+ ALU64_##OPCODE##_X: \
A = A OP X; \
CONT; \
- BPF_ALU_##OPCODE##_BPF_X: \
+ ALU_##OPCODE##_X: \
A = (u32) A OP (u32) X; \
CONT; \
- BPF_ALU64_##OPCODE##_BPF_K: \
+ ALU64_##OPCODE##_K: \
A = A OP K; \
CONT; \
- BPF_ALU_##OPCODE##_BPF_K: \
+ ALU_##OPCODE##_K: \
A = (u32) A OP (u32) K; \
CONT;
- ALU(BPF_ADD, +)
- ALU(BPF_SUB, -)
- ALU(BPF_AND, &)
- ALU(BPF_OR, |)
- ALU(BPF_LSH, <<)
- ALU(BPF_RSH, >>)
- ALU(BPF_XOR, ^)
- ALU(BPF_MUL, *)
+ ALU(ADD, +)
+ ALU(SUB, -)
+ ALU(AND, &)
+ ALU(OR, |)
+ ALU(LSH, <<)
+ ALU(RSH, >>)
+ ALU(XOR, ^)
+ ALU(MUL, *)
#undef ALU
- BPF_ALU_BPF_NEG_0:
+ ALU_NEG_0:
A = (u32) -A;
CONT;
- BPF_ALU64_BPF_NEG_0:
+ ALU64_NEG_0:
A = -A;
CONT;
- BPF_ALU_BPF_MOV_BPF_X:
+ ALU_MOV_X:
A = (u32) X;
CONT;
- BPF_ALU_BPF_MOV_BPF_K:
+ ALU_MOV_K:
A = (u32) K;
CONT;
- BPF_ALU64_BPF_MOV_BPF_X:
+ ALU64_MOV_X:
A = X;
CONT;
- BPF_ALU64_BPF_MOV_BPF_K:
+ ALU64_MOV_K:
A = K;
CONT;
- BPF_ALU64_BPF_ARSH_BPF_X:
+ ALU64_ARSH_X:
(*(s64 *) &A) >>= X;
CONT;
- BPF_ALU64_BPF_ARSH_BPF_K:
+ ALU64_ARSH_K:
(*(s64 *) &A) >>= K;
CONT;
- BPF_ALU64_BPF_MOD_BPF_X:
+ ALU64_MOD_X:
+ if (unlikely(X == 0))
+ return 0;
tmp = A;
- if (X)
- A = do_div(tmp, X);
+ A = do_div(tmp, X);
CONT;
- BPF_ALU_BPF_MOD_BPF_X:
+ ALU_MOD_X:
+ if (unlikely(X == 0))
+ return 0;
tmp = (u32) A;
- if (X)
- A = do_div(tmp, (u32) X);
+ A = do_div(tmp, (u32) X);
CONT;
- BPF_ALU64_BPF_MOD_BPF_K:
+ ALU64_MOD_K:
tmp = A;
- if (K)
- A = do_div(tmp, K);
+ A = do_div(tmp, K);
CONT;
- BPF_ALU_BPF_MOD_BPF_K:
+ ALU_MOD_K:
tmp = (u32) A;
- if (K)
- A = do_div(tmp, (u32) K);
+ A = do_div(tmp, (u32) K);
CONT;
- BPF_ALU64_BPF_DIV_BPF_X:
- if (X)
- do_div(A, X);
+ ALU64_DIV_X:
+ if (unlikely(X == 0))
+ return 0;
+ do_div(A, X);
CONT;
- BPF_ALU_BPF_DIV_BPF_X:
+ ALU_DIV_X:
+ if (unlikely(X == 0))
+ return 0;
tmp = (u32) A;
- if (X)
- do_div(tmp, (u32) X);
+ do_div(tmp, (u32) X);
A = (u32) tmp;
CONT;
- BPF_ALU64_BPF_DIV_BPF_K:
- if (K)
- do_div(A, K);
+ ALU64_DIV_K:
+ do_div(A, K);
CONT;
- BPF_ALU_BPF_DIV_BPF_K:
+ ALU_DIV_K:
tmp = (u32) A;
- if (K)
- do_div(tmp, (u32) K);
+ do_div(tmp, (u32) K);
A = (u32) tmp;
CONT;
- BPF_ALU_BPF_END_BPF_TO_BE:
+ ALU_END_TO_BE:
switch (K) {
case 16:
A = (__force u16) cpu_to_be16(A);
@@ -347,7 +367,7 @@ select_insn:
break;
}
CONT;
- BPF_ALU_BPF_END_BPF_TO_LE:
+ ALU_END_TO_LE:
switch (K) {
case 16:
A = (__force u16) cpu_to_le16(A);
@@ -362,142 +382,144 @@ select_insn:
CONT;
/* CALL */
- BPF_JMP_BPF_CALL_0:
- /* Function call scratches R1-R5 registers, preserves R6-R9,
- * and stores return value into R0.
+ JMP_CALL_0:
+ /* Function call scratches BPF_R1-BPF_R5 registers,
+ * preserves BPF_R6-BPF_R9, and stores return value
+ * into BPF_R0.
*/
- R0 = (__bpf_call_base + insn->imm)(regs[1], regs[2], regs[3],
- regs[4], regs[5]);
+ BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
+ BPF_R4, BPF_R5);
CONT;
/* JMP */
- BPF_JMP_BPF_JA_0:
+ JMP_JA_0:
insn += insn->off;
CONT;
- BPF_JMP_BPF_JEQ_BPF_X:
+ JMP_JEQ_X:
if (A == X) {
insn += insn->off;
CONT_JMP;
}
CONT;
- BPF_JMP_BPF_JEQ_BPF_K:
+ JMP_JEQ_K:
if (A == K) {
insn += insn->off;
CONT_JMP;
}
CONT;
- BPF_JMP_BPF_JNE_BPF_X:
+ JMP_JNE_X:
if (A != X) {
insn += insn->off;
CONT_JMP;
}
CONT;
- BPF_JMP_BPF_JNE_BPF_K:
+ JMP_JNE_K:
if (A != K) {
insn += insn->off;
CONT_JMP;
}
CONT;
- BPF_JMP_BPF_JGT_BPF_X:
+ JMP_JGT_X:
if (A > X) {
insn += insn->off;
CONT_JMP;
}
CONT;
- BPF_JMP_BPF_JGT_BPF_K:
+ JMP_JGT_K:
if (A > K) {
insn += insn->off;
CONT_JMP;
}
CONT;
- BPF_JMP_BPF_JGE_BPF_X:
+ JMP_JGE_X:
if (A >= X) {
insn += insn->off;
CONT_JMP;
}
CONT;
- BPF_JMP_BPF_JGE_BPF_K:
+ JMP_JGE_K:
if (A >= K) {
insn += insn->off;
CONT_JMP;
}
CONT;
- BPF_JMP_BPF_JSGT_BPF_X:
- if (((s64)A) > ((s64)X)) {
+ JMP_JSGT_X:
+ if (((s64) A) > ((s64) X)) {
insn += insn->off;
CONT_JMP;
}
CONT;
- BPF_JMP_BPF_JSGT_BPF_K:
- if (((s64)A) > ((s64)K)) {
+ JMP_JSGT_K:
+ if (((s64) A) > ((s64) K)) {
insn += insn->off;
CONT_JMP;
}
CONT;
- BPF_JMP_BPF_JSGE_BPF_X:
- if (((s64)A) >= ((s64)X)) {
+ JMP_JSGE_X:
+ if (((s64) A) >= ((s64) X)) {
insn += insn->off;
CONT_JMP;
}
CONT;
- BPF_JMP_BPF_JSGE_BPF_K:
- if (((s64)A) >= ((s64)K)) {
+ JMP_JSGE_K:
+ if (((s64) A) >= ((s64) K)) {
insn += insn->off;
CONT_JMP;
}
CONT;
- BPF_JMP_BPF_JSET_BPF_X:
+ JMP_JSET_X:
if (A & X) {
insn += insn->off;
CONT_JMP;
}
CONT;
- BPF_JMP_BPF_JSET_BPF_K:
+ JMP_JSET_K:
if (A & K) {
insn += insn->off;
CONT_JMP;
}
CONT;
- BPF_JMP_BPF_EXIT_0:
- return R0;
+ JMP_EXIT_0:
+ return BPF_R0;
/* STX and ST and LDX*/
#define LDST(SIZEOP, SIZE) \
- BPF_STX_BPF_MEM_##SIZEOP: \
+ STX_MEM_##SIZEOP: \
*(SIZE *)(unsigned long) (A + insn->off) = X; \
CONT; \
- BPF_ST_BPF_MEM_##SIZEOP: \
+ ST_MEM_##SIZEOP: \
*(SIZE *)(unsigned long) (A + insn->off) = K; \
CONT; \
- BPF_LDX_BPF_MEM_##SIZEOP: \
+ LDX_MEM_##SIZEOP: \
A = *(SIZE *)(unsigned long) (X + insn->off); \
CONT;
- LDST(BPF_B, u8)
- LDST(BPF_H, u16)
- LDST(BPF_W, u32)
- LDST(BPF_DW, u64)
+ LDST(B, u8)
+ LDST(H, u16)
+ LDST(W, u32)
+ LDST(DW, u64)
#undef LDST
- BPF_STX_BPF_XADD_BPF_W: /* lock xadd *(u32 *)(A + insn->off) += X */
+ STX_XADD_W: /* lock xadd *(u32 *)(A + insn->off) += X */
atomic_add((u32) X, (atomic_t *)(unsigned long)
(A + insn->off));
CONT;
- BPF_STX_BPF_XADD_BPF_DW: /* lock xadd *(u64 *)(A + insn->off) += X */
+ STX_XADD_DW: /* lock xadd *(u64 *)(A + insn->off) += X */
atomic64_add((u64) X, (atomic64_t *)(unsigned long)
(A + insn->off));
CONT;
- BPF_LD_BPF_ABS_BPF_W: /* R0 = ntohl(*(u32 *) (skb->data + K)) */
+ LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + K)) */
off = K;
load_word:
- /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only
- * appearing in the programs where ctx == skb. All programs
- * keep 'ctx' in regs[CTX_REG] == R6, sk_convert_filter()
- * saves it in R6, internal BPF verifier will check that
- * R6 == ctx.
+ /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are
+ * only appearing in the programs where ctx ==
+ * skb. All programs keep 'ctx' in regs[BPF_REG_CTX]
+ * == BPF_R6, sk_convert_filter() saves it in BPF_R6,
+ * internal BPF verifier will check that BPF_R6 ==
+ * ctx.
*
- * BPF_ABS and BPF_IND are wrappers of function calls, so
- * they scratch R1-R5 registers, preserve R6-R9, and store
- * return value into R0.
+ * BPF_ABS and BPF_IND are wrappers of function calls,
+ * so they scratch BPF_R1-BPF_R5 registers, preserve
+ * BPF_R6-BPF_R9, and store return value into BPF_R0.
*
* Implicit input:
* ctx
@@ -507,39 +529,39 @@ load_word:
* K == 32-bit immediate
*
* Output:
- * R0 - 8/16/32-bit skb data converted to cpu endianness
+ * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
*/
ptr = load_pointer((struct sk_buff *) ctx, off, 4, &tmp);
if (likely(ptr != NULL)) {
- R0 = get_unaligned_be32(ptr);
+ BPF_R0 = get_unaligned_be32(ptr);
CONT;
}
return 0;
- BPF_LD_BPF_ABS_BPF_H: /* R0 = ntohs(*(u16 *) (skb->data + K)) */
+ LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + K)) */
off = K;
load_half:
ptr = load_pointer((struct sk_buff *) ctx, off, 2, &tmp);
if (likely(ptr != NULL)) {
- R0 = get_unaligned_be16(ptr);
+ BPF_R0 = get_unaligned_be16(ptr);
CONT;
}
return 0;
- BPF_LD_BPF_ABS_BPF_B: /* R0 = *(u8 *) (ctx + K) */
+ LD_ABS_B: /* BPF_R0 = *(u8 *) (ctx + K) */
off = K;
load_byte:
ptr = load_pointer((struct sk_buff *) ctx, off, 1, &tmp);
if (likely(ptr != NULL)) {
- R0 = *(u8 *)ptr;
+ BPF_R0 = *(u8 *)ptr;
CONT;
}
return 0;
- BPF_LD_BPF_IND_BPF_W: /* R0 = ntohl(*(u32 *) (skb->data + X + K)) */
+ LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + X + K)) */
off = K + X;
goto load_word;
- BPF_LD_BPF_IND_BPF_H: /* R0 = ntohs(*(u16 *) (skb->data + X + K)) */
+ LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + X + K)) */
off = K + X;
goto load_half;
- BPF_LD_BPF_IND_BPF_B: /* R0 = *(u8 *) (skb->data + X + K) */
+ LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + X + K) */
off = K + X;
goto load_byte;
@@ -547,24 +569,8 @@ load_byte:
/* If we ever reach this, we have a bug somewhere. */
WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
return 0;
-#undef CONT_JMP
-#undef CONT
-
-#undef R0
-#undef X
-#undef A
-#undef K
}
-u32 sk_run_filter_int_seccomp(const struct seccomp_data *ctx,
- const struct sock_filter_int *insni)
- __attribute__ ((alias ("__sk_run_filter")));
-
-u32 sk_run_filter_int_skb(const struct sk_buff *ctx,
- const struct sock_filter_int *insni)
- __attribute__ ((alias ("__sk_run_filter")));
-EXPORT_SYMBOL_GPL(sk_run_filter_int_skb);
-
/* Helper to find the offset of pkt_type in sk_buff structure. We want
* to make sure its still a 3bit field starting at a byte boundary;
* taken from arch/x86/net/bpf_jit_comp.c.
@@ -585,64 +591,67 @@ static unsigned int pkt_type_offset(void)
return -1;
}
-static u64 __skb_get_pay_offset(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
+static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
{
- struct sk_buff *skb = (struct sk_buff *)(long) ctx;
-
- return __skb_get_poff(skb);
+ return __skb_get_poff((struct sk_buff *)(unsigned long) ctx);
}
-static u64 __skb_get_nlattr(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
+static u64 __skb_get_nlattr(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
{
- struct sk_buff *skb = (struct sk_buff *)(long) ctx;
+ struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx;
struct nlattr *nla;
if (skb_is_nonlinear(skb))
return 0;
- if (A > skb->len - sizeof(struct nlattr))
+ if (skb->len < sizeof(struct nlattr))
return 0;
- nla = nla_find((struct nlattr *) &skb->data[A], skb->len - A, X);
+ if (a > skb->len - sizeof(struct nlattr))
+ return 0;
+
+ nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x);
if (nla)
return (void *) nla - (void *) skb->data;
return 0;
}
-static u64 __skb_get_nlattr_nest(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
+static u64 __skb_get_nlattr_nest(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
{
- struct sk_buff *skb = (struct sk_buff *)(long) ctx;
+ struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx;
struct nlattr *nla;
if (skb_is_nonlinear(skb))
return 0;
- if (A > skb->len - sizeof(struct nlattr))
+ if (skb->len < sizeof(struct nlattr))
return 0;
- nla = (struct nlattr *) &skb->data[A];
- if (nla->nla_len > A - skb->len)
+ if (a > skb->len - sizeof(struct nlattr))
return 0;
- nla = nla_find_nested(nla, X);
+ nla = (struct nlattr *) &skb->data[a];
+ if (nla->nla_len > skb->len - a)
+ return 0;
+
+ nla = nla_find_nested(nla, x);
if (nla)
return (void *) nla - (void *) skb->data;
return 0;
}
-static u64 __get_raw_cpu_id(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
+static u64 __get_raw_cpu_id(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
{
return raw_smp_processor_id();
}
-/* Register mappings for user programs. */
-#define A_REG 0
-#define X_REG 7
-#define TMP_REG 8
-#define ARG2_REG 2
-#define ARG3_REG 3
+/* note that this only generates 32-bit random numbers */
+static u64 __get_random_u32(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
+{
+ return prandom_u32();
+}
static bool convert_bpf_extensions(struct sock_filter *fp,
struct sock_filter_int **insnp)
@@ -653,57 +662,46 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
case SKF_AD_OFF + SKF_AD_PROTOCOL:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
- insn->code = BPF_LDX | BPF_MEM | BPF_H;
- insn->a_reg = A_REG;
- insn->x_reg = CTX_REG;
- insn->off = offsetof(struct sk_buff, protocol);
+ /* A = *(u16 *) (ctx + offsetof(protocol)) */
+ *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
+ offsetof(struct sk_buff, protocol));
insn++;
/* A = ntohs(A) [emitting a nop or swap16] */
insn->code = BPF_ALU | BPF_END | BPF_FROM_BE;
- insn->a_reg = A_REG;
+ insn->a_reg = BPF_REG_A;
insn->imm = 16;
break;
case SKF_AD_OFF + SKF_AD_PKTTYPE:
- insn->code = BPF_LDX | BPF_MEM | BPF_B;
- insn->a_reg = A_REG;
- insn->x_reg = CTX_REG;
- insn->off = pkt_type_offset();
+ *insn = BPF_LDX_MEM(BPF_B, BPF_REG_A, BPF_REG_CTX,
+ pkt_type_offset());
if (insn->off < 0)
return false;
insn++;
- insn->code = BPF_ALU | BPF_AND | BPF_K;
- insn->a_reg = A_REG;
- insn->imm = PKT_TYPE_MAX;
+ *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, PKT_TYPE_MAX);
break;
case SKF_AD_OFF + SKF_AD_IFINDEX:
case SKF_AD_OFF + SKF_AD_HATYPE:
- if (FIELD_SIZEOF(struct sk_buff, dev) == 8)
- insn->code = BPF_LDX | BPF_MEM | BPF_DW;
- else
- insn->code = BPF_LDX | BPF_MEM | BPF_W;
- insn->a_reg = TMP_REG;
- insn->x_reg = CTX_REG;
- insn->off = offsetof(struct sk_buff, dev);
+ *insn = BPF_LDX_MEM(size_to_bpf(FIELD_SIZEOF(struct sk_buff, dev)),
+ BPF_REG_TMP, BPF_REG_CTX,
+ offsetof(struct sk_buff, dev));
insn++;
- insn->code = BPF_JMP | BPF_JNE | BPF_K;
- insn->a_reg = TMP_REG;
- insn->imm = 0;
- insn->off = 1;
+ /* if (tmp != 0) goto pc+1 */
+ *insn = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1);
insn++;
- insn->code = BPF_JMP | BPF_EXIT;
+ *insn = BPF_EXIT_INSN();
insn++;
BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
- insn->a_reg = A_REG;
- insn->x_reg = TMP_REG;
+ insn->a_reg = BPF_REG_A;
+ insn->x_reg = BPF_REG_TMP;
if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX) {
insn->code = BPF_LDX | BPF_MEM | BPF_W;
@@ -717,55 +715,45 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
case SKF_AD_OFF + SKF_AD_MARK:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
- insn->code = BPF_LDX | BPF_MEM | BPF_W;
- insn->a_reg = A_REG;
- insn->x_reg = CTX_REG;
- insn->off = offsetof(struct sk_buff, mark);
+ *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
+ offsetof(struct sk_buff, mark));
break;
case SKF_AD_OFF + SKF_AD_RXHASH:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
- insn->code = BPF_LDX | BPF_MEM | BPF_W;
- insn->a_reg = A_REG;
- insn->x_reg = CTX_REG;
- insn->off = offsetof(struct sk_buff, hash);
+ *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
+ offsetof(struct sk_buff, hash));
break;
case SKF_AD_OFF + SKF_AD_QUEUE:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
- insn->code = BPF_LDX | BPF_MEM | BPF_H;
- insn->a_reg = A_REG;
- insn->x_reg = CTX_REG;
- insn->off = offsetof(struct sk_buff, queue_mapping);
+ *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
+ offsetof(struct sk_buff, queue_mapping));
break;
case SKF_AD_OFF + SKF_AD_VLAN_TAG:
case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
- insn->code = BPF_LDX | BPF_MEM | BPF_H;
- insn->a_reg = A_REG;
- insn->x_reg = CTX_REG;
- insn->off = offsetof(struct sk_buff, vlan_tci);
+ /* A = *(u16 *) (ctx + offsetof(vlan_tci)) */
+ *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
+ offsetof(struct sk_buff, vlan_tci));
insn++;
BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
if (fp->k == SKF_AD_OFF + SKF_AD_VLAN_TAG) {
- insn->code = BPF_ALU | BPF_AND | BPF_K;
- insn->a_reg = A_REG;
- insn->imm = ~VLAN_TAG_PRESENT;
+ *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A,
+ ~VLAN_TAG_PRESENT);
} else {
- insn->code = BPF_ALU | BPF_RSH | BPF_K;
- insn->a_reg = A_REG;
- insn->imm = 12;
+ /* A >>= 12 */
+ *insn = BPF_ALU32_IMM(BPF_RSH, BPF_REG_A, 12);
insn++;
- insn->code = BPF_ALU | BPF_AND | BPF_K;
- insn->a_reg = A_REG;
- insn->imm = 1;
+ /* A &= 1 */
+ *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 1);
}
break;
@@ -773,22 +761,17 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
case SKF_AD_OFF + SKF_AD_NLATTR:
case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
case SKF_AD_OFF + SKF_AD_CPU:
+ case SKF_AD_OFF + SKF_AD_RANDOM:
/* arg1 = ctx */
- insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
- insn->a_reg = ARG1_REG;
- insn->x_reg = CTX_REG;
+ *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_ARG1, BPF_REG_CTX);
insn++;
/* arg2 = A */
- insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
- insn->a_reg = ARG2_REG;
- insn->x_reg = A_REG;
+ *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_ARG2, BPF_REG_A);
insn++;
/* arg3 = X */
- insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
- insn->a_reg = ARG3_REG;
- insn->x_reg = X_REG;
+ *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_ARG3, BPF_REG_X);
insn++;
/* Emit call(ctx, arg2=A, arg3=X) */
@@ -806,13 +789,15 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
case SKF_AD_OFF + SKF_AD_CPU:
insn->imm = __get_raw_cpu_id - __bpf_call_base;
break;
+ case SKF_AD_OFF + SKF_AD_RANDOM:
+ insn->imm = __get_random_u32 - __bpf_call_base;
+ break;
}
break;
case SKF_AD_OFF + SKF_AD_ALU_XOR_X:
- insn->code = BPF_ALU | BPF_XOR | BPF_X;
- insn->a_reg = A_REG;
- insn->x_reg = X_REG;
+ /* A ^= X */
+ *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X);
break;
default:
@@ -862,7 +847,7 @@ int sk_convert_filter(struct sock_filter *prog, int len,
u8 bpf_src;
BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
- BUILD_BUG_ON(FP_REG + 1 != MAX_BPF_REG);
+ BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
if (len <= 0 || len >= BPF_MAXINSNS)
return -EINVAL;
@@ -878,9 +863,7 @@ do_pass:
fp = prog;
if (new_insn) {
- new_insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
- new_insn->a_reg = CTX_REG;
- new_insn->x_reg = ARG1_REG;
+ *new_insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_CTX, BPF_REG_ARG1);
}
new_insn++;
@@ -930,8 +913,8 @@ do_pass:
break;
insn->code = fp->code;
- insn->a_reg = A_REG;
- insn->x_reg = X_REG;
+ insn->a_reg = BPF_REG_A;
+ insn->x_reg = BPF_REG_X;
insn->imm = fp->k;
break;
@@ -965,16 +948,16 @@ do_pass:
* in compare insn.
*/
insn->code = BPF_ALU | BPF_MOV | BPF_K;
- insn->a_reg = TMP_REG;
+ insn->a_reg = BPF_REG_TMP;
insn->imm = fp->k;
insn++;
- insn->a_reg = A_REG;
- insn->x_reg = TMP_REG;
+ insn->a_reg = BPF_REG_A;
+ insn->x_reg = BPF_REG_TMP;
bpf_src = BPF_X;
} else {
- insn->a_reg = A_REG;
- insn->x_reg = X_REG;
+ insn->a_reg = BPF_REG_A;
+ insn->x_reg = BPF_REG_X;
insn->imm = fp->k;
bpf_src = BPF_SRC(fp->code);
}
@@ -1008,34 +991,28 @@ do_pass:
/* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
case BPF_LDX | BPF_MSH | BPF_B:
- insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
- insn->a_reg = TMP_REG;
- insn->x_reg = A_REG;
+ /* tmp = A */
+ *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_TMP, BPF_REG_A);
insn++;
- insn->code = BPF_LD | BPF_ABS | BPF_B;
- insn->a_reg = A_REG;
- insn->imm = fp->k;
+ /* A = BPF_R0 = *(u8 *) (skb->data + K) */
+ *insn = BPF_LD_ABS(BPF_B, fp->k);
insn++;
- insn->code = BPF_ALU | BPF_AND | BPF_K;
- insn->a_reg = A_REG;
- insn->imm = 0xf;
+ /* A &= 0xf */
+ *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf);
insn++;
- insn->code = BPF_ALU | BPF_LSH | BPF_K;
- insn->a_reg = A_REG;
- insn->imm = 2;
+ /* A <<= 2 */
+ *insn = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2);
insn++;
- insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
- insn->a_reg = X_REG;
- insn->x_reg = A_REG;
+ /* X = A */
+ *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_X, BPF_REG_A);
insn++;
- insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
- insn->a_reg = A_REG;
- insn->x_reg = TMP_REG;
+ /* A = tmp */
+ *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_A, BPF_REG_TMP);
break;
/* RET_K, RET_A are remaped into 2 insns. */
@@ -1045,19 +1022,20 @@ do_pass:
(BPF_RVAL(fp->code) == BPF_K ?
BPF_K : BPF_X);
insn->a_reg = 0;
- insn->x_reg = A_REG;
+ insn->x_reg = BPF_REG_A;
insn->imm = fp->k;
insn++;
- insn->code = BPF_JMP | BPF_EXIT;
+ *insn = BPF_EXIT_INSN();
break;
/* Store to stack. */
case BPF_ST:
case BPF_STX:
insn->code = BPF_STX | BPF_MEM | BPF_W;
- insn->a_reg = FP_REG;
- insn->x_reg = fp->code == BPF_ST ? A_REG : X_REG;
+ insn->a_reg = BPF_REG_FP;
+ insn->x_reg = fp->code == BPF_ST ?
+ BPF_REG_A : BPF_REG_X;
insn->off = -(BPF_MEMWORDS - fp->k) * 4;
break;
@@ -1066,8 +1044,8 @@ do_pass:
case BPF_LDX | BPF_MEM:
insn->code = BPF_LDX | BPF_MEM | BPF_W;
insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ?
- A_REG : X_REG;
- insn->x_reg = FP_REG;
+ BPF_REG_A : BPF_REG_X;
+ insn->x_reg = BPF_REG_FP;
insn->off = -(BPF_MEMWORDS - fp->k) * 4;
break;
@@ -1076,22 +1054,18 @@ do_pass:
case BPF_LDX | BPF_IMM:
insn->code = BPF_ALU | BPF_MOV | BPF_K;
insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ?
- A_REG : X_REG;
+ BPF_REG_A : BPF_REG_X;
insn->imm = fp->k;
break;
/* X = A */
case BPF_MISC | BPF_TAX:
- insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
- insn->a_reg = X_REG;
- insn->x_reg = A_REG;
+ *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_X, BPF_REG_A);
break;
/* A = X */
case BPF_MISC | BPF_TXA:
- insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
- insn->a_reg = A_REG;
- insn->x_reg = X_REG;
+ *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_A, BPF_REG_X);
break;
/* A = skb->len or X = skb->len */
@@ -1099,17 +1073,15 @@ do_pass:
case BPF_LDX | BPF_W | BPF_LEN:
insn->code = BPF_LDX | BPF_MEM | BPF_W;
insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ?
- A_REG : X_REG;
- insn->x_reg = CTX_REG;
+ BPF_REG_A : BPF_REG_X;
+ insn->x_reg = BPF_REG_CTX;
insn->off = offsetof(struct sk_buff, len);
break;
/* access seccomp_data fields */
case BPF_LDX | BPF_ABS | BPF_W:
- insn->code = BPF_LDX | BPF_MEM | BPF_W;
- insn->a_reg = A_REG;
- insn->x_reg = CTX_REG;
- insn->off = fp->k;
+ /* A = *(u32 *) (ctx + K) */
+ *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k);
break;
default:
@@ -1356,6 +1328,7 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
ANCILLARY(VLAN_TAG);
ANCILLARY(VLAN_TAG_PRESENT);
ANCILLARY(PAY_OFFSET);
+ ANCILLARY(RANDOM);
}
/* ancillary operation unknown or unsupported */
@@ -1415,7 +1388,7 @@ static void sk_filter_release_rcu(struct rcu_head *rcu)
struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
sk_release_orig_filter(fp);
- bpf_jit_free(fp);
+ sk_filter_free(fp);
}
/**
@@ -1453,7 +1426,7 @@ static struct sk_filter *__sk_migrate_realloc(struct sk_filter *fp,
fp_new = sock_kmalloc(sk, len, GFP_KERNEL);
if (fp_new) {
- memcpy(fp_new, fp, sizeof(struct sk_filter));
+ *fp_new = *fp;
/* As we're kepping orig_prog in fp_new along,
* we need to make sure we're not evicting it
* from the old fp.
@@ -1515,7 +1488,6 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
goto out_err_free;
}
- fp->bpf_func = sk_run_filter_int_skb;
fp->len = new_len;
/* 2nd pass: remap sock_filter insns into sock_filter_int insns. */
@@ -1528,6 +1500,8 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
*/
goto out_err_free;
+ sk_filter_select_runtime(fp);
+
kfree(old_prog);
return fp;
@@ -1542,6 +1516,33 @@ out_err:
return ERR_PTR(err);
}
+void __weak bpf_int_jit_compile(struct sk_filter *prog)
+{
+}
+
+/**
+ * sk_filter_select_runtime - select execution runtime for BPF program
+ * @fp: sk_filter populated with internal BPF program
+ *
+ * try to JIT internal BPF program, if JIT is not available select interpreter
+ * BPF program will be executed via SK_RUN_FILTER() macro
+ */
+void sk_filter_select_runtime(struct sk_filter *fp)
+{
+ fp->bpf_func = (void *) __sk_run_filter;
+
+ /* Probe if internal BPF can be JITed */
+ bpf_int_jit_compile(fp);
+}
+EXPORT_SYMBOL_GPL(sk_filter_select_runtime);
+
+/* free internal BPF program */
+void sk_filter_free(struct sk_filter *fp)
+{
+ bpf_jit_free(fp);
+}
+EXPORT_SYMBOL_GPL(sk_filter_free);
+
static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
struct sock *sk)
{
@@ -1737,10 +1738,10 @@ void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to)
[BPF_S_ANC_RXHASH] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_CPU] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_ALU_XOR_X] = BPF_LD|BPF_B|BPF_ABS,
- [BPF_S_ANC_SECCOMP_LD_W] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_VLAN_TAG] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_VLAN_TAG_PRESENT] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_PAY_OFFSET] = BPF_LD|BPF_B|BPF_ABS,
+ [BPF_S_ANC_RANDOM] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_LD_W_LEN] = BPF_LD|BPF_W|BPF_LEN,
[BPF_S_LD_W_IND] = BPF_LD|BPF_W|BPF_IND,
[BPF_S_LD_H_IND] = BPF_LD|BPF_H|BPF_IND,
diff --git a/net/core/flow.c b/net/core/flow.c
index 31cfb36..a0348fd 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -455,6 +455,8 @@ int flow_cache_init(struct net *net)
if (!fc->percpu)
return -ENOMEM;
+ cpu_notifier_register_begin();
+
for_each_online_cpu(i) {
if (flow_cache_cpu_prepare(fc, i))
goto err;
@@ -462,7 +464,9 @@ int flow_cache_init(struct net *net)
fc->hotcpu_notifier = (struct notifier_block){
.notifier_call = flow_cache_cpu,
};
- register_hotcpu_notifier(&fc->hotcpu_notifier);
+ __register_hotcpu_notifier(&fc->hotcpu_notifier);
+
+ cpu_notifier_register_done();
setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
(unsigned long) fc);
@@ -478,6 +482,8 @@ err:
fcp->hash_table = NULL;
}
+ cpu_notifier_register_done();
+
free_percpu(fc->percpu);
fc->percpu = NULL;
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 4623962..1cac29e 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -802,7 +802,7 @@ exit:
kobject_put(kobj);
return error;
}
-#endif /* CONFIG_SYFS */
+#endif /* CONFIG_SYSFS */
int
net_rx_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 81d3a9a..05e949d 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -273,7 +273,7 @@ static void cleanup_net(struct work_struct *work)
{
const struct pernet_operations *ops;
struct net *net, *tmp;
- LIST_HEAD(net_kill_list);
+ struct list_head net_kill_list;
LIST_HEAD(net_exit_list);
/* Atomically snapshot the list of namespaces to cleanup */
diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c
index 719efd5..22931e1 100644
--- a/net/core/netclassid_cgroup.c
+++ b/net/core/netclassid_cgroup.c
@@ -23,7 +23,7 @@ static inline struct cgroup_cls_state *css_cls_state(struct cgroup_subsys_state
struct cgroup_cls_state *task_cls_state(struct task_struct *p)
{
- return css_cls_state(task_css(p, net_cls_subsys_id));
+ return css_cls_state(task_css(p, net_cls_cgrp_id));
}
EXPORT_SYMBOL_GPL(task_cls_state);
@@ -73,7 +73,7 @@ static void cgrp_attach(struct cgroup_subsys_state *css,
void *v = (void *)(unsigned long)cs->classid;
struct task_struct *p;
- cgroup_taskset_for_each(p, css, tset) {
+ cgroup_taskset_for_each(p, tset) {
task_lock(p);
iterate_fd(p->files, 0, update_classid, v);
task_unlock(p);
@@ -102,19 +102,10 @@ static struct cftype ss_files[] = {
{ } /* terminate */
};
-struct cgroup_subsys net_cls_subsys = {
- .name = "net_cls",
+struct cgroup_subsys net_cls_cgrp_subsys = {
.css_alloc = cgrp_css_alloc,
.css_online = cgrp_css_online,
.css_free = cgrp_css_free,
.attach = cgrp_attach,
- .subsys_id = net_cls_subsys_id,
.base_cftypes = ss_files,
- .module = THIS_MODULE,
};
-
-static int __init init_netclassid_cgroup(void)
-{
- return cgroup_load_subsys(&net_cls_subsys);
-}
-__initcall(init_netclassid_cgroup);
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index 9043cae..3825f66 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -186,7 +186,7 @@ static int read_priomap(struct seq_file *sf, void *v)
}
static int write_priomap(struct cgroup_subsys_state *css, struct cftype *cft,
- const char *buffer)
+ char *buffer)
{
char devname[IFNAMSIZ + 1];
struct net_device *dev;
@@ -224,7 +224,7 @@ static void net_prio_attach(struct cgroup_subsys_state *css,
struct task_struct *p;
void *v = (void *)(unsigned long)css->cgroup->id;
- cgroup_taskset_for_each(p, css, tset) {
+ cgroup_taskset_for_each(p, tset) {
task_lock(p);
iterate_fd(p->files, 0, update_netprio, v);
task_unlock(p);
@@ -244,15 +244,12 @@ static struct cftype ss_files[] = {
{ } /* terminate */
};
-struct cgroup_subsys net_prio_subsys = {
- .name = "net_prio",
+struct cgroup_subsys net_prio_cgrp_subsys = {
.css_alloc = cgrp_css_alloc,
.css_online = cgrp_css_online,
.css_free = cgrp_css_free,
.attach = net_prio_attach,
- .subsys_id = net_prio_subsys_id,
.base_cftypes = ss_files,
- .module = THIS_MODULE,
};
static int netprio_device_event(struct notifier_block *unused,
@@ -283,37 +280,9 @@ static struct notifier_block netprio_device_notifier = {
static int __init init_cgroup_netprio(void)
{
- int ret;
-
- ret = cgroup_load_subsys(&net_prio_subsys);
- if (ret)
- goto out;
-
register_netdevice_notifier(&netprio_device_notifier);
-
-out:
- return ret;
-}
-
-static void __exit exit_cgroup_netprio(void)
-{
- struct netprio_map *old;
- struct net_device *dev;
-
- unregister_netdevice_notifier(&netprio_device_notifier);
-
- cgroup_unload_subsys(&net_prio_subsys);
-
- rtnl_lock();
- for_each_netdev(&init_net, dev) {
- old = rtnl_dereference(dev->priomap);
- RCU_INIT_POINTER(dev->priomap, NULL);
- if (old)
- kfree_rcu(old, rcu);
- }
- rtnl_unlock();
+ return 0;
}
-module_init(init_cgroup_netprio);
-module_exit(exit_cgroup_netprio);
+subsys_initcall(init_cgroup_netprio);
MODULE_LICENSE("GPL v2");
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index d0dac57..fc17a9d 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -573,7 +573,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
is_zero_ether_addr(pkt_dev->src_mac) ?
pkt_dev->odev->dev_addr : pkt_dev->src_mac);
- seq_printf(seq, "dst_mac: ");
+ seq_puts(seq, "dst_mac: ");
seq_printf(seq, "%pM\n", pkt_dev->dst_mac);
seq_printf(seq,
@@ -588,7 +588,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
if (pkt_dev->nr_labels) {
unsigned int i;
- seq_printf(seq, " mpls: ");
+ seq_puts(seq, " mpls: ");
for (i = 0; i < pkt_dev->nr_labels; i++)
seq_printf(seq, "%08x%s", ntohl(pkt_dev->labels[i]),
i == pkt_dev->nr_labels-1 ? "\n" : ", ");
@@ -613,67 +613,67 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
if (pkt_dev->node >= 0)
seq_printf(seq, " node: %d\n", pkt_dev->node);
- seq_printf(seq, " Flags: ");
+ seq_puts(seq, " Flags: ");
if (pkt_dev->flags & F_IPV6)
- seq_printf(seq, "IPV6 ");
+ seq_puts(seq, "IPV6 ");
if (pkt_dev->flags & F_IPSRC_RND)
- seq_printf(seq, "IPSRC_RND ");
+ seq_puts(seq, "IPSRC_RND ");
if (pkt_dev->flags & F_IPDST_RND)
- seq_printf(seq, "IPDST_RND ");
+ seq_puts(seq, "IPDST_RND ");
if (pkt_dev->flags & F_TXSIZE_RND)
- seq_printf(seq, "TXSIZE_RND ");
+ seq_puts(seq, "TXSIZE_RND ");
if (pkt_dev->flags & F_UDPSRC_RND)
- seq_printf(seq, "UDPSRC_RND ");
+ seq_puts(seq, "UDPSRC_RND ");
if (pkt_dev->flags & F_UDPDST_RND)
- seq_printf(seq, "UDPDST_RND ");
+ seq_puts(seq, "UDPDST_RND ");
if (pkt_dev->flags & F_UDPCSUM)
- seq_printf(seq, "UDPCSUM ");
+ seq_puts(seq, "UDPCSUM ");
if (pkt_dev->flags & F_MPLS_RND)
- seq_printf(seq, "MPLS_RND ");
+ seq_puts(seq, "MPLS_RND ");
if (pkt_dev->flags & F_QUEUE_MAP_RND)
- seq_printf(seq, "QUEUE_MAP_RND ");
+ seq_puts(seq, "QUEUE_MAP_RND ");
if (pkt_dev->flags & F_QUEUE_MAP_CPU)
- seq_printf(seq, "QUEUE_MAP_CPU ");
+ seq_puts(seq, "QUEUE_MAP_CPU ");
if (pkt_dev->cflows) {
if (pkt_dev->flags & F_FLOW_SEQ)
- seq_printf(seq, "FLOW_SEQ "); /*in sequence flows*/
+ seq_puts(seq, "FLOW_SEQ "); /*in sequence flows*/
else
- seq_printf(seq, "FLOW_RND ");
+ seq_puts(seq, "FLOW_RND ");
}
#ifdef CONFIG_XFRM
if (pkt_dev->flags & F_IPSEC_ON) {
- seq_printf(seq, "IPSEC ");
+ seq_puts(seq, "IPSEC ");
if (pkt_dev->spi)
seq_printf(seq, "spi:%u", pkt_dev->spi);
}
#endif
if (pkt_dev->flags & F_MACSRC_RND)
- seq_printf(seq, "MACSRC_RND ");
+ seq_puts(seq, "MACSRC_RND ");
if (pkt_dev->flags & F_MACDST_RND)
- seq_printf(seq, "MACDST_RND ");
+ seq_puts(seq, "MACDST_RND ");
if (pkt_dev->flags & F_VID_RND)
- seq_printf(seq, "VID_RND ");
+ seq_puts(seq, "VID_RND ");
if (pkt_dev->flags & F_SVID_RND)
- seq_printf(seq, "SVID_RND ");
+ seq_puts(seq, "SVID_RND ");
if (pkt_dev->flags & F_NODE)
- seq_printf(seq, "NODE_ALLOC ");
+ seq_puts(seq, "NODE_ALLOC ");
seq_puts(seq, "\n");
@@ -716,7 +716,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
if (pkt_dev->result[0])
seq_printf(seq, "Result: %s\n", pkt_dev->result);
else
- seq_printf(seq, "Result: Idle\n");
+ seq_puts(seq, "Result: Idle\n");
return 0;
}
@@ -1735,14 +1735,14 @@ static int pktgen_thread_show(struct seq_file *seq, void *v)
BUG_ON(!t);
- seq_printf(seq, "Running: ");
+ seq_puts(seq, "Running: ");
if_lock(t);
list_for_each_entry(pkt_dev, &t->if_list, list)
if (pkt_dev->running)
seq_printf(seq, "%s ", pkt_dev->odevname);
- seq_printf(seq, "\nStopped: ");
+ seq_puts(seq, "\nStopped: ");
list_for_each_entry(pkt_dev, &t->if_list, list)
if (!pkt_dev->running)
@@ -1751,7 +1751,7 @@ static int pktgen_thread_show(struct seq_file *seq, void *v)
if (t->result[0])
seq_printf(seq, "\nResult: %s\n", t->result);
else
- seq_printf(seq, "\nResult: NA\n");
+ seq_puts(seq, "\nResult: NA\n");
if_unlock(t);
@@ -3338,9 +3338,11 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
queue_map = skb_get_queue_mapping(pkt_dev->skb);
txq = netdev_get_tx_queue(odev, queue_map);
- __netif_tx_lock_bh(txq);
+ local_bh_disable();
- if (unlikely(netif_xmit_frozen_or_stopped(txq))) {
+ HARD_TX_LOCK(odev, txq, smp_processor_id());
+
+ if (unlikely(netif_xmit_frozen_or_drv_stopped(txq))) {
ret = NETDEV_TX_BUSY;
pkt_dev->last_ok = 0;
goto unlock;
@@ -3374,7 +3376,9 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
pkt_dev->last_ok = 0;
}
unlock:
- __netif_tx_unlock_bh(txq);
+ HARD_TX_UNLOCK(odev, txq);
+
+ local_bh_enable();
/* If pkt_dev->count is zero, then run forever */
if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {
diff --git a/net/core/ptp_classifier.c b/net/core/ptp_classifier.c
index eaba0f6..37d8615 100644
--- a/net/core/ptp_classifier.c
+++ b/net/core/ptp_classifier.c
@@ -88,7 +88,7 @@ EXPORT_SYMBOL_GPL(ptp_classify_raw);
void __init ptp_classifier_init(void)
{
- static struct sock_filter ptp_filter[] = {
+ static struct sock_filter ptp_filter[] __initdata = {
{ 0x28, 0, 0, 0x0000000c },
{ 0x15, 0, 12, 0x00000800 },
{ 0x30, 0, 0, 0x00000017 },
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index d4ff417..9837beb 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -774,7 +774,8 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev,
return 0;
}
-static size_t rtnl_port_size(const struct net_device *dev)
+static size_t rtnl_port_size(const struct net_device *dev,
+ u32 ext_filter_mask)
{
size_t port_size = nla_total_size(4) /* PORT_VF */
+ nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */
@@ -790,7 +791,8 @@ static size_t rtnl_port_size(const struct net_device *dev)
size_t port_self_size = nla_total_size(sizeof(struct nlattr))
+ port_size;
- if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent)
+ if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
+ !(ext_filter_mask & RTEXT_FILTER_VF))
return 0;
if (dev_num_vf(dev->dev.parent))
return port_self_size + vf_ports_size +
@@ -826,7 +828,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
+ nla_total_size(ext_filter_mask
& RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
+ rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
- + rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
+ + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
+ rtnl_link_get_size(dev) /* IFLA_LINKINFO */
+ rtnl_link_get_af_size(dev) /* IFLA_AF_SPEC */
+ nla_total_size(MAX_PHYS_PORT_ID_LEN); /* IFLA_PHYS_PORT_ID */
@@ -888,11 +890,13 @@ static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
return 0;
}
-static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev)
+static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
+ u32 ext_filter_mask)
{
int err;
- if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent)
+ if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
+ !(ext_filter_mask & RTEXT_FILTER_VF))
return 0;
err = rtnl_port_self_fill(skb, dev);
@@ -1079,7 +1083,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
nla_nest_end(skb, vfinfo);
}
- if (rtnl_port_fill(skb, dev))
+ if (rtnl_port_fill(skb, dev, ext_filter_mask))
goto nla_put_failure;
if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) {
@@ -1198,6 +1202,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
struct hlist_head *head;
struct nlattr *tb[IFLA_MAX+1];
u32 ext_filter_mask = 0;
+ int err;
s_h = cb->args[0];
s_idx = cb->args[1];
@@ -1218,11 +1223,17 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
hlist_for_each_entry_rcu(dev, head, index_hlist) {
if (idx < s_idx)
goto cont;
- if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, 0,
- NLM_F_MULTI,
- ext_filter_mask) <= 0)
+ err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, 0,
+ NLM_F_MULTI,
+ ext_filter_mask);
+ /* If we ran out of room on the first message,
+ * we're in trouble
+ */
+ WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
+
+ if (err <= 0)
goto out;
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
@@ -1395,7 +1406,8 @@ static int do_set_master(struct net_device *dev, int ifindex)
return 0;
}
-static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
+static int do_setlink(const struct sk_buff *skb,
+ struct net_device *dev, struct ifinfomsg *ifm,
struct nlattr **tb, char *ifname, int modified)
{
const struct net_device_ops *ops = dev->netdev_ops;
@@ -1407,7 +1419,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
err = PTR_ERR(net);
goto errout;
}
- if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) {
+ if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
err = -EPERM;
goto errout;
}
@@ -1661,7 +1673,7 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
if (err < 0)
goto errout;
- err = do_setlink(dev, ifm, tb, ifname, 0);
+ err = do_setlink(skb, dev, ifm, tb, ifname, 0);
errout:
return err;
}
@@ -1778,7 +1790,8 @@ err:
}
EXPORT_SYMBOL(rtnl_create_link);
-static int rtnl_group_changelink(struct net *net, int group,
+static int rtnl_group_changelink(const struct sk_buff *skb,
+ struct net *net, int group,
struct ifinfomsg *ifm,
struct nlattr **tb)
{
@@ -1787,7 +1800,7 @@ static int rtnl_group_changelink(struct net *net, int group,
for_each_netdev(net, dev) {
if (dev->group == group) {
- err = do_setlink(dev, ifm, tb, NULL, 0);
+ err = do_setlink(skb, dev, ifm, tb, NULL, 0);
if (err < 0)
return err;
}
@@ -1929,12 +1942,12 @@ replay:
modified = 1;
}
- return do_setlink(dev, ifm, tb, ifname, modified);
+ return do_setlink(skb, dev, ifm, tb, ifname, modified);
}
if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
if (ifm->ifi_index == 0 && tb[IFLA_GROUP])
- return rtnl_group_changelink(net,
+ return rtnl_group_changelink(skb, net,
nla_get_u32(tb[IFLA_GROUP]),
ifm, tb);
return -ENODEV;
@@ -2321,7 +2334,7 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
int err = -EINVAL;
__u8 *addr;
- if (!capable(CAP_NET_ADMIN))
+ if (!netlink_capable(skb, CAP_NET_ADMIN))
return -EPERM;
err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
@@ -2773,7 +2786,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
sz_idx = type>>2;
kind = type&3;
- if (kind != 2 && !ns_capable(net->user_ns, CAP_NET_ADMIN))
+ if (kind != 2 && !netlink_net_capable(skb, CAP_NET_ADMIN))
return -EPERM;
if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 30c7d35..3d74530 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -694,7 +694,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
#endif
memcpy(new->cb, old->cb, sizeof(old->cb));
new->csum = old->csum;
- new->local_df = old->local_df;
+ new->ignore_df = old->ignore_df;
new->pkt_type = old->pkt_type;
new->ip_summed = old->ip_summed;
skb_copy_queue_mapping(new, old);
@@ -3458,8 +3458,6 @@ static void sock_rmem_free(struct sk_buff *skb)
*/
int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
{
- int len = skb->len;
-
if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
(unsigned int)sk->sk_rcvbuf)
return -ENOMEM;
@@ -3474,7 +3472,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
skb_queue_tail(&sk->sk_error_queue, skb);
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_data_ready(sk, len);
+ sk->sk_data_ready(sk);
return 0;
}
EXPORT_SYMBOL(sock_queue_err_skb);
@@ -3915,7 +3913,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
skb->tstamp.tv64 = 0;
skb->pkt_type = PACKET_HOST;
skb->skb_iif = 0;
- skb->local_df = 0;
+ skb->ignore_df = 0;
skb_dst_drop(skb);
skb->mark = 0;
secpath_reset(skb);
@@ -3937,12 +3935,14 @@ EXPORT_SYMBOL_GPL(skb_scrub_packet);
unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
{
const struct skb_shared_info *shinfo = skb_shinfo(skb);
- unsigned int hdr_len;
if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
- hdr_len = tcp_hdrlen(skb);
- else
- hdr_len = sizeof(struct udphdr);
- return hdr_len + shinfo->gso_size;
+ return tcp_hdrlen(skb) + shinfo->gso_size;
+
+ /* UFO sets gso_size to the size of the fragmentation
+ * payload, i.e. the size of the L4 (UDP) header is already
+ * accounted for.
+ */
+ return shinfo->gso_size;
}
EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
diff --git a/net/core/sock.c b/net/core/sock.c
index c0fc6bd..664ee42 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -145,6 +145,55 @@
static DEFINE_MUTEX(proto_list_mutex);
static LIST_HEAD(proto_list);
+/**
+ * sk_ns_capable - General socket capability test
+ * @sk: Socket to use a capability on or through
+ * @user_ns: The user namespace of the capability to use
+ * @cap: The capability to use
+ *
+ * Test to see if the opener of the socket had when the socket was
+ * created and the current process has the capability @cap in the user
+ * namespace @user_ns.
+ */
+bool sk_ns_capable(const struct sock *sk,
+ struct user_namespace *user_ns, int cap)
+{
+ return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
+ ns_capable(user_ns, cap);
+}
+EXPORT_SYMBOL(sk_ns_capable);
+
+/**
+ * sk_capable - Socket global capability test
+ * @sk: Socket to use a capability on or through
+ * @cap: The global capbility to use
+ *
+ * Test to see if the opener of the socket had when the socket was
+ * created and the current process has the capability @cap in all user
+ * namespaces.
+ */
+bool sk_capable(const struct sock *sk, int cap)
+{
+ return sk_ns_capable(sk, &init_user_ns, cap);
+}
+EXPORT_SYMBOL(sk_capable);
+
+/**
+ * sk_net_capable - Network namespace socket capability test
+ * @sk: Socket to use a capability on or through
+ * @cap: The capability to use
+ *
+ * Test to see if the opener of the socket had when the socke was created
+ * and the current process has the capability @cap over the network namespace
+ * the socket is a member of.
+ */
+bool sk_net_capable(const struct sock *sk, int cap)
+{
+ return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
+}
+EXPORT_SYMBOL(sk_net_capable);
+
+
#ifdef CONFIG_MEMCG_KMEM
int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
{
@@ -428,7 +477,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
spin_unlock_irqrestore(&list->lock, flags);
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_data_ready(sk, skb_len);
+ sk->sk_data_ready(sk);
return 0;
}
EXPORT_SYMBOL(sock_queue_rcv_skb);
@@ -2196,7 +2245,7 @@ static void sock_def_error_report(struct sock *sk)
rcu_read_unlock();
}
-static void sock_def_readable(struct sock *sk, int len)
+static void sock_def_readable(struct sock *sk)
{
struct socket_wq *wq;
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index d7af188..a4216a4 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -49,7 +49,7 @@ int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype)
}
EXPORT_SYMBOL_GPL(sock_diag_put_meminfo);
-int sock_diag_put_filterinfo(struct user_namespace *user_ns, struct sock *sk,
+int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
struct sk_buff *skb, int attrtype)
{
struct sock_fprog_kern *fprog;
@@ -58,7 +58,7 @@ int sock_diag_put_filterinfo(struct user_namespace *user_ns, struct sock *sk,
unsigned int flen;
int err = 0;
- if (!ns_capable(user_ns, CAP_NET_ADMIN)) {
+ if (!may_report_filterinfo) {
nla_reserve(skb, attrtype, 0);
return 0;
}
OpenPOWER on IntegriCloud