summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-03-04 11:14:47 +0100
committerIngo Molnar <mingo@elte.hu>2009-03-04 11:14:47 +0100
commita1be621dfacbef0fd374d8acd553d71e07bf29ac (patch)
tree32b0355454b43b1098b5e01dd699fd7281abf781 /include
parent3612fdf780e28b10323dd12d2b0f306c7ada4d4c (diff)
parentfec6c6fec3e20637bee5d276fb61dd8b49a3f9cc (diff)
downloadop-kernel-dev-a1be621dfacbef0fd374d8acd553d71e07bf29ac.zip
op-kernel-dev-a1be621dfacbef0fd374d8acd553d71e07bf29ac.tar.gz
Merge branch 'tracing/ftrace'; commit 'v2.6.29-rc7' into tracing/core
Diffstat (limited to 'include')
-rw-r--r--include/linux/Kbuild1
-rw-r--r--include/linux/dcbnl.h4
-rw-r--r--include/linux/io-mapping.h5
-rw-r--r--include/linux/netfilter/xt_NFLOG.h2
-rw-r--r--include/linux/rcuclassic.h6
-rw-r--r--include/linux/rcupdate.h4
-rw-r--r--include/linux/rcupreempt.h15
-rw-r--r--include/linux/rcutree.h6
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/net/netfilter/nf_conntrack_core.h2
10 files changed, 45 insertions, 4 deletions
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index b97cdc5..106c3ba 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -52,6 +52,7 @@ header-y += const.h
header-y += cgroupstats.h
header-y += cramfs_fs.h
header-y += cycx_cfm.h
+header-y += dcbnl.h
header-y += dlmconstants.h
header-y += dlm_device.h
header-y += dlm_netlink.h
diff --git a/include/linux/dcbnl.h b/include/linux/dcbnl.h
index b0ef274..7d2e100 100644
--- a/include/linux/dcbnl.h
+++ b/include/linux/dcbnl.h
@@ -20,10 +20,12 @@
#ifndef __LINUX_DCBNL_H__
#define __LINUX_DCBNL_H__
+#include <linux/types.h>
+
#define DCB_PROTO_VERSION 1
struct dcbmsg {
- unsigned char dcb_family;
+ __u8 dcb_family;
__u8 cmd;
__u16 dcb_pad;
};
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index cbc2f0c..0adb0f9 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -91,8 +91,11 @@ io_mapping_unmap_atomic(void *vaddr)
static inline void *
io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
{
+ resource_size_t phys_addr;
+
BUG_ON(offset >= mapping->size);
- resource_size_t phys_addr = mapping->base + offset;
+ phys_addr = mapping->base + offset;
+
return ioremap_wc(phys_addr, PAGE_SIZE);
}
diff --git a/include/linux/netfilter/xt_NFLOG.h b/include/linux/netfilter/xt_NFLOG.h
index cdcd0ed..4b36aeb 100644
--- a/include/linux/netfilter/xt_NFLOG.h
+++ b/include/linux/netfilter/xt_NFLOG.h
@@ -2,7 +2,7 @@
#define _XT_NFLOG_TARGET
#define XT_NFLOG_DEFAULT_GROUP 0x1
-#define XT_NFLOG_DEFAULT_THRESHOLD 1
+#define XT_NFLOG_DEFAULT_THRESHOLD 0
#define XT_NFLOG_MASK 0x0
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h
index f3f697d..80044a4 100644
--- a/include/linux/rcuclassic.h
+++ b/include/linux/rcuclassic.h
@@ -181,4 +181,10 @@ extern long rcu_batches_completed_bh(void);
#define rcu_enter_nohz() do { } while (0)
#define rcu_exit_nohz() do { } while (0)
+/* A context switch is a grace period for rcuclassic. */
+static inline int rcu_blocking_is_gp(void)
+{
+ return num_online_cpus() == 1;
+}
+
#endif /* __LINUX_RCUCLASSIC_H */
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 921340a..528343e 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -52,6 +52,9 @@ struct rcu_head {
void (*func)(struct rcu_head *head);
};
+/* Internal to kernel, but needed by rcupreempt.h. */
+extern int rcu_scheduler_active;
+
#if defined(CONFIG_CLASSIC_RCU)
#include <linux/rcuclassic.h>
#elif defined(CONFIG_TREE_RCU)
@@ -265,6 +268,7 @@ extern void rcu_barrier_sched(void);
/* Internal to kernel */
extern void rcu_init(void);
+extern void rcu_scheduler_starting(void);
extern int rcu_needs_cpu(int cpu);
#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h
index 3e05c09..74304b4 100644
--- a/include/linux/rcupreempt.h
+++ b/include/linux/rcupreempt.h
@@ -142,4 +142,19 @@ static inline void rcu_exit_nohz(void)
#define rcu_exit_nohz() do { } while (0)
#endif /* CONFIG_NO_HZ */
+/*
+ * A context switch is a grace period for rcupreempt synchronize_rcu()
+ * only during early boot, before the scheduler has been initialized.
+ * So, how the heck do we get a context switch? Well, if the caller
+ * invokes synchronize_rcu(), they are willing to accept a context
+ * switch, so we simply pretend that one happened.
+ *
+ * After boot, there might be a blocked or preempted task in an RCU
+ * read-side critical section, so we cannot then take the fastpath.
+ */
+static inline int rcu_blocking_is_gp(void)
+{
+ return num_online_cpus() == 1 && !rcu_scheduler_active;
+}
+
#endif /* __LINUX_RCUPREEMPT_H */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index d4368b7..a722fb6 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -326,4 +326,10 @@ static inline void rcu_exit_nohz(void)
}
#endif /* CONFIG_NO_HZ */
+/* A context switch is a grace period for rcutree. */
+static inline int rcu_blocking_is_gp(void)
+{
+ return num_online_cpus() == 1;
+}
+
#endif /* __LINUX_RCUTREE_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 7702cb1..698ce97 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2303,9 +2303,13 @@ extern long sched_group_rt_runtime(struct task_group *tg);
extern int sched_group_set_rt_period(struct task_group *tg,
long rt_period_us);
extern long sched_group_rt_period(struct task_group *tg);
+extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
#endif
#endif
+extern int task_can_switch_user(struct user_struct *up,
+ struct task_struct *tsk);
+
#ifdef CONFIG_TASK_XACCT
static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
{
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index e78afe7..c25068e 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -59,7 +59,7 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb)
struct nf_conn *ct = (struct nf_conn *)skb->nfct;
int ret = NF_ACCEPT;
- if (ct) {
+ if (ct && ct != &nf_conntrack_untracked) {
if (!nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct))
ret = __nf_conntrack_confirm(skb);
nf_ct_deliver_cached_events(ct);
OpenPOWER on IntegriCloud