summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/dcache.h20
-rw-r--r--include/linux/lockref.h71
-rw-r--r--include/linux/nsproxy.h6
-rw-r--r--include/linux/regmap.h1
-rw-r--r--include/linux/wait.h57
-rw-r--r--include/net/busy_poll.h1
-rw-r--r--include/net/genetlink.h20
-rw-r--r--include/net/route.h8
-rw-r--r--include/net/xfrm.h6
-rw-r--r--include/uapi/linux/cm4000_cs.h1
-rw-r--r--include/uapi/linux/icmpv6.h2
11 files changed, 180 insertions, 13 deletions
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index b90337c..efdc944 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -9,6 +9,7 @@
#include <linux/seqlock.h>
#include <linux/cache.h>
#include <linux/rcupdate.h>
+#include <linux/lockref.h>
struct nameidata;
struct path;
@@ -100,6 +101,8 @@ extern unsigned int full_name_hash(const unsigned char *, unsigned int);
# endif
#endif
+#define d_lock d_lockref.lock
+
struct dentry {
/* RCU lookup touched fields */
unsigned int d_flags; /* protected by d_lock */
@@ -112,8 +115,7 @@ struct dentry {
unsigned char d_iname[DNAME_INLINE_LEN]; /* small names */
/* Ref lookup also touches following */
- unsigned int d_count; /* protected by d_lock */
- spinlock_t d_lock; /* per dentry lock */
+ struct lockref d_lockref; /* per-dentry lock and refcount */
const struct dentry_operations *d_op;
struct super_block *d_sb; /* The root of the dentry tree */
unsigned long d_time; /* used by d_revalidate */
@@ -318,7 +320,7 @@ static inline int __d_rcu_to_refcount(struct dentry *dentry, unsigned seq)
assert_spin_locked(&dentry->d_lock);
if (!read_seqcount_retry(&dentry->d_seq, seq)) {
ret = 1;
- dentry->d_count++;
+ dentry->d_lockref.count++;
}
return ret;
@@ -326,7 +328,7 @@ static inline int __d_rcu_to_refcount(struct dentry *dentry, unsigned seq)
static inline unsigned d_count(const struct dentry *dentry)
{
- return dentry->d_count;
+ return dentry->d_lockref.count;
}
/* validate "insecure" dentry pointer */
@@ -336,6 +338,7 @@ extern int d_validate(struct dentry *, struct dentry *);
* helper function for dentry_operations.d_dname() members
*/
extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
+extern char *simple_dname(struct dentry *, char *, int);
extern char *__d_path(const struct path *, const struct path *, char *, int);
extern char *d_absolute_path(const struct path *, char *, int);
@@ -356,17 +359,14 @@ extern char *dentry_path(struct dentry *, char *, int);
static inline struct dentry *dget_dlock(struct dentry *dentry)
{
if (dentry)
- dentry->d_count++;
+ dentry->d_lockref.count++;
return dentry;
}
static inline struct dentry *dget(struct dentry *dentry)
{
- if (dentry) {
- spin_lock(&dentry->d_lock);
- dget_dlock(dentry);
- spin_unlock(&dentry->d_lock);
- }
+ if (dentry)
+ lockref_get(&dentry->d_lockref);
return dentry;
}
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
new file mode 100644
index 0000000..01233e0
--- /dev/null
+++ b/include/linux/lockref.h
@@ -0,0 +1,71 @@
+#ifndef __LINUX_LOCKREF_H
+#define __LINUX_LOCKREF_H
+
+/*
+ * Locked reference counts.
+ *
+ * These are different from just plain atomic refcounts in that they
+ * are atomic with respect to the spinlock that goes with them. In
+ * particular, there can be implementations that don't actually get
+ * the spinlock for the common decrement/increment operations, but they
+ * still have to check that the operation is done semantically as if
+ * the spinlock had been taken (using a cmpxchg operation that covers
+ * both the lock and the count word, or using memory transactions, for
+ * example).
+ */
+
+#include <linux/spinlock.h>
+
+struct lockref {
+ spinlock_t lock;
+ unsigned int count;
+};
+
+/**
+ * lockref_get - Increments reference count unconditionally
+ * @lockcnt: pointer to lockref structure
+ *
+ * This operation is only valid if you already hold a reference
+ * to the object, so you know the count cannot be zero.
+ */
+static inline void lockref_get(struct lockref *lockref)
+{
+ spin_lock(&lockref->lock);
+ lockref->count++;
+ spin_unlock(&lockref->lock);
+}
+
+/**
+ * lockref_get_not_zero - Increments count unless the count is 0
+ * @lockcnt: pointer to lockref structure
+ * Return: 1 if count updated successfully or 0 if count is 0
+ */
+static inline int lockref_get_not_zero(struct lockref *lockref)
+{
+ int retval = 0;
+
+ spin_lock(&lockref->lock);
+ if (lockref->count) {
+ lockref->count++;
+ retval = 1;
+ }
+ spin_unlock(&lockref->lock);
+ return retval;
+}
+
+/**
+ * lockref_put_or_lock - decrements count unless count <= 1 before decrement
+ * @lockcnt: pointer to lockref structure
+ * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
+ */
+static inline int lockref_put_or_lock(struct lockref *lockref)
+{
+ spin_lock(&lockref->lock);
+ if (lockref->count <= 1)
+ return 0;
+ lockref->count--;
+ spin_unlock(&lockref->lock);
+ return 1;
+}
+
+#endif /* __LINUX_LOCKREF_H */
diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h
index 10e5947..b4ec59d 100644
--- a/include/linux/nsproxy.h
+++ b/include/linux/nsproxy.h
@@ -14,6 +14,10 @@ struct fs_struct;
* A structure to contain pointers to all per-process
* namespaces - fs (mount), uts, network, sysvipc, etc.
*
+ * The pid namespace is an exception -- it's accessed using
+ * task_active_pid_ns. The pid namespace here is the
+ * namespace that children will use.
+ *
* 'count' is the number of tasks holding a reference.
* The count for each namespace, then, will be the number
* of nsproxies pointing to it, not the number of tasks.
@@ -27,7 +31,7 @@ struct nsproxy {
struct uts_namespace *uts_ns;
struct ipc_namespace *ipc_ns;
struct mnt_namespace *mnt_ns;
- struct pid_namespace *pid_ns;
+ struct pid_namespace *pid_ns_for_children;
struct net *net_ns;
};
extern struct nsproxy init_nsproxy;
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 580a532..6d91fcb 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -16,6 +16,7 @@
#include <linux/list.h>
#include <linux/rbtree.h>
#include <linux/err.h>
+#include <linux/bug.h>
struct module;
struct device;
diff --git a/include/linux/wait.h b/include/linux/wait.h
index f487a47..a67fc16 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -811,6 +811,63 @@ do { \
__ret; \
})
+#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
+ lock, ret) \
+do { \
+ DEFINE_WAIT(__wait); \
+ \
+ for (;;) { \
+ prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
+ if (condition) \
+ break; \
+ if (signal_pending(current)) { \
+ ret = -ERESTARTSYS; \
+ break; \
+ } \
+ spin_unlock_irq(&lock); \
+ ret = schedule_timeout(ret); \
+ spin_lock_irq(&lock); \
+ if (!ret) \
+ break; \
+ } \
+ finish_wait(&wq, &__wait); \
+} while (0)
+
+/**
+ * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses.
+ * The condition is checked under the lock. This is expected
+ * to be called with the lock taken.
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @lock: a locked spinlock_t, which will be released before schedule()
+ * and reacquired afterwards.
+ * @timeout: timeout, in jiffies
+ *
+ * The process is put to sleep (TASK_INTERRUPTIBLE) until the
+ * @condition evaluates to true or signal is received. The @condition is
+ * checked each time the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * This is supposed to be called while holding the lock. The lock is
+ * dropped before going to sleep and is reacquired afterwards.
+ *
+ * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
+ * was interrupted by a signal, and the remaining jiffies otherwise
+ * if the condition evaluated to true before the timeout elapsed.
+ */
+#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
+ timeout) \
+({ \
+ int __ret = timeout; \
+ \
+ if (!(condition)) \
+ __wait_event_interruptible_lock_irq_timeout( \
+ wq, condition, lock, __ret); \
+ __ret; \
+})
+
/*
* These are the old interfaces to sleep waiting for an event.
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index 8a358a2..829627d 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -123,6 +123,7 @@ static inline bool sk_busy_loop(struct sock *sk, int nonblock)
/* local bh are disabled so it is ok to use _BH */
NET_ADD_STATS_BH(sock_net(sk),
LINUX_MIB_BUSYPOLLRXPACKETS, rc);
+ cpu_relax();
} while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
!need_resched() && !busy_loop_timeout(end_time));
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 93024a4..8e0b6c8 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -61,6 +61,7 @@ struct genl_family {
struct list_head ops_list; /* private */
struct list_head family_list; /* private */
struct list_head mcast_groups; /* private */
+ struct module *module;
};
/**
@@ -121,9 +122,24 @@ struct genl_ops {
struct list_head ops_list;
};
-extern int genl_register_family(struct genl_family *family);
-extern int genl_register_family_with_ops(struct genl_family *family,
+extern int __genl_register_family(struct genl_family *family);
+
+static inline int genl_register_family(struct genl_family *family)
+{
+ family->module = THIS_MODULE;
+ return __genl_register_family(family);
+}
+
+extern int __genl_register_family_with_ops(struct genl_family *family,
struct genl_ops *ops, size_t n_ops);
+
+static inline int genl_register_family_with_ops(struct genl_family *family,
+ struct genl_ops *ops, size_t n_ops)
+{
+ family->module = THIS_MODULE;
+ return __genl_register_family_with_ops(family, ops, n_ops);
+}
+
extern int genl_unregister_family(struct genl_family *family);
extern int genl_register_ops(struct genl_family *, struct genl_ops *ops);
extern int genl_unregister_ops(struct genl_family *, struct genl_ops *ops);
diff --git a/include/net/route.h b/include/net/route.h
index 2ea40c1..afdeeb5 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -317,4 +317,12 @@ static inline int ip4_dst_hoplimit(const struct dst_entry *dst)
return hoplimit;
}
+static inline int ip_skb_dst_mtu(struct sk_buff *skb)
+{
+ struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
+
+ return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
+ skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
+}
+
#endif /* _ROUTE_H */
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 89d3d8a..e253bf0 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -341,10 +341,13 @@ struct xfrm_state_afinfo {
struct sk_buff *skb);
int (*transport_finish)(struct sk_buff *skb,
int async);
+ void (*local_error)(struct sk_buff *skb, u32 mtu);
};
extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
+extern struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
+extern void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
extern void xfrm_state_delete_tunnel(struct xfrm_state *x);
@@ -1477,6 +1480,7 @@ extern int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
extern int xfrm_output_resume(struct sk_buff *skb, int err);
extern int xfrm_output(struct sk_buff *skb);
extern int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
+extern void xfrm_local_error(struct sk_buff *skb, int mtu);
extern int xfrm4_extract_header(struct sk_buff *skb);
extern int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb);
extern int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
@@ -1497,6 +1501,7 @@ extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short fam
extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
extern int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel *handler);
extern int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel *handler);
+extern void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
extern int xfrm6_extract_header(struct sk_buff *skb);
extern int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
extern int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi);
@@ -1514,6 +1519,7 @@ extern int xfrm6_output(struct sk_buff *skb);
extern int xfrm6_output_finish(struct sk_buff *skb);
extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
u8 **prevhdr);
+extern void xfrm6_local_error(struct sk_buff *skb, u32 mtu);
#ifdef CONFIG_XFRM
extern int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
diff --git a/include/uapi/linux/cm4000_cs.h b/include/uapi/linux/cm4000_cs.h
index bc51f77..1217f75 100644
--- a/include/uapi/linux/cm4000_cs.h
+++ b/include/uapi/linux/cm4000_cs.h
@@ -2,6 +2,7 @@
#define _UAPI_CM4000_H_
#include <linux/types.h>
+#include <linux/ioctl.h>
#define MAX_ATR 33
diff --git a/include/uapi/linux/icmpv6.h b/include/uapi/linux/icmpv6.h
index e0133c7..590beda 100644
--- a/include/uapi/linux/icmpv6.h
+++ b/include/uapi/linux/icmpv6.h
@@ -115,6 +115,8 @@ struct icmp6hdr {
#define ICMPV6_NOT_NEIGHBOUR 2
#define ICMPV6_ADDR_UNREACH 3
#define ICMPV6_PORT_UNREACH 4
+#define ICMPV6_POLICY_FAIL 5
+#define ICMPV6_REJECT_ROUTE 6
/*
* Codes for Time Exceeded
OpenPOWER on IntegriCloud