summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/RCU/whatisRCU.txt2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c6
-rw-r--r--arch/x86/kernel/traps.c2
-rw-r--r--drivers/base/power/opp.c4
-rw-r--r--include/linux/fdtable.h4
-rw-r--r--include/linux/rcupdate.h63
-rw-r--r--kernel/cgroup.c4
-rw-r--r--kernel/pid.c5
-rw-r--r--kernel/rcu/srcu.c10
-rw-r--r--kernel/rcu/tiny.c8
-rw-r--r--kernel/rcu/tree.c28
-rw-r--r--kernel/rcu/tree_plugin.h8
-rw-r--r--kernel/rcu/update.c4
-rw-r--r--kernel/sched/core.c8
-rw-r--r--kernel/workqueue.c20
-rw-r--r--security/device_cgroup.c6
16 files changed, 101 insertions, 81 deletions
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index 5746b0c..adc2184 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -883,7 +883,7 @@ All: lockdep-checked RCU-protected pointer access
rcu_access_pointer
rcu_dereference_raw
- rcu_lockdep_assert
+ RCU_LOCKDEP_WARN
rcu_sleep_check
RCU_NONIDLE
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index df919ff..3d6b526 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -54,9 +54,9 @@ static DEFINE_MUTEX(mce_chrdev_read_mutex);
#define rcu_dereference_check_mce(p) \
({ \
- rcu_lockdep_assert(rcu_read_lock_sched_held() || \
- lockdep_is_held(&mce_chrdev_read_mutex), \
- "suspicious rcu_dereference_check_mce() usage"); \
+ RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
+ !lockdep_is_held(&mce_chrdev_read_mutex), \
+ "suspicious rcu_dereference_check_mce() usage"); \
smp_load_acquire(&(p)); \
})
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index f579192..c5a5231 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -136,7 +136,7 @@ enum ctx_state ist_enter(struct pt_regs *regs)
preempt_count_add(HARDIRQ_OFFSET);
/* This code is a bit fragile. Test it. */
- rcu_lockdep_assert(rcu_is_watching(), "ist_enter didn't work");
+ RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work");
return prev_state;
}
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 677fb28..3b188f2 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -110,8 +110,8 @@ static DEFINE_MUTEX(dev_opp_list_lock);
#define opp_rcu_lockdep_assert() \
do { \
- rcu_lockdep_assert(rcu_read_lock_held() || \
- lockdep_is_held(&dev_opp_list_lock), \
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
+ !lockdep_is_held(&dev_opp_list_lock), \
"Missing rcu_read_lock() or " \
"dev_opp_list_lock protection"); \
} while (0)
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index fbb8874..674e3e2 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -86,8 +86,8 @@ static inline struct file *__fcheck_files(struct files_struct *files, unsigned i
static inline struct file *fcheck_files(struct files_struct *files, unsigned int fd)
{
- rcu_lockdep_assert(rcu_read_lock_held() ||
- lockdep_is_held(&files->file_lock),
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&
+ !lockdep_is_held(&files->file_lock),
"suspicious rcu_dereference_check() usage");
return __fcheck_files(files, fd);
}
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 33ec16b..ff47651 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -536,6 +536,11 @@ static inline int rcu_read_lock_sched_held(void)
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+/* Deprecate rcu_lockdep_assert(): Use RCU_LOCKDEP_WARN() instead. */
+static inline void __attribute((deprecated)) deprecate_rcu_lockdep_assert(void)
+{
+}
+
#ifdef CONFIG_PROVE_RCU
/**
@@ -546,17 +551,32 @@ static inline int rcu_read_lock_sched_held(void)
#define rcu_lockdep_assert(c, s) \
do { \
static bool __section(.data.unlikely) __warned; \
+ deprecate_rcu_lockdep_assert(); \
if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \
__warned = true; \
lockdep_rcu_suspicious(__FILE__, __LINE__, s); \
} \
} while (0)
+/**
+ * RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met
+ * @c: condition to check
+ * @s: informative message
+ */
+#define RCU_LOCKDEP_WARN(c, s) \
+ do { \
+ static bool __section(.data.unlikely) __warned; \
+ if (debug_lockdep_rcu_enabled() && !__warned && (c)) { \
+ __warned = true; \
+ lockdep_rcu_suspicious(__FILE__, __LINE__, s); \
+ } \
+ } while (0)
+
#if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU)
static inline void rcu_preempt_sleep_check(void)
{
- rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
- "Illegal context switch in RCU read-side critical section");
+ RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map),
+ "Illegal context switch in RCU read-side critical section");
}
#else /* #ifdef CONFIG_PROVE_RCU */
static inline void rcu_preempt_sleep_check(void)
@@ -567,15 +587,16 @@ static inline void rcu_preempt_sleep_check(void)
#define rcu_sleep_check() \
do { \
rcu_preempt_sleep_check(); \
- rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map), \
- "Illegal context switch in RCU-bh read-side critical section"); \
- rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map), \
- "Illegal context switch in RCU-sched read-side critical section"); \
+ RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \
+ "Illegal context switch in RCU-bh read-side critical section"); \
+ RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map), \
+ "Illegal context switch in RCU-sched read-side critical section"); \
} while (0)
#else /* #ifdef CONFIG_PROVE_RCU */
-#define rcu_lockdep_assert(c, s) do { } while (0)
+#define rcu_lockdep_assert(c, s) deprecate_rcu_lockdep_assert()
+#define RCU_LOCKDEP_WARN(c, s) do { } while (0)
#define rcu_sleep_check() do { } while (0)
#endif /* #else #ifdef CONFIG_PROVE_RCU */
@@ -606,13 +627,13 @@ static inline void rcu_preempt_sleep_check(void)
({ \
/* Dependency order vs. p above. */ \
typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \
- rcu_lockdep_assert(c, "suspicious rcu_dereference_check() usage"); \
+ RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \
rcu_dereference_sparse(p, space); \
((typeof(*p) __force __kernel *)(________p1)); \
})
#define __rcu_dereference_protected(p, c, space) \
({ \
- rcu_lockdep_assert(c, "suspicious rcu_dereference_protected() usage"); \
+ RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \
rcu_dereference_sparse(p, space); \
((typeof(*p) __force __kernel *)(p)); \
})
@@ -836,8 +857,8 @@ static inline void rcu_read_lock(void)
__rcu_read_lock();
__acquire(RCU);
rcu_lock_acquire(&rcu_lock_map);
- rcu_lockdep_assert(rcu_is_watching(),
- "rcu_read_lock() used illegally while idle");
+ RCU_LOCKDEP_WARN(!rcu_is_watching(),
+ "rcu_read_lock() used illegally while idle");
}
/*
@@ -887,8 +908,8 @@ static inline void rcu_read_lock(void)
*/
static inline void rcu_read_unlock(void)
{
- rcu_lockdep_assert(rcu_is_watching(),
- "rcu_read_unlock() used illegally while idle");
+ RCU_LOCKDEP_WARN(!rcu_is_watching(),
+ "rcu_read_unlock() used illegally while idle");
__release(RCU);
__rcu_read_unlock();
rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */
@@ -916,8 +937,8 @@ static inline void rcu_read_lock_bh(void)
local_bh_disable();
__acquire(RCU_BH);
rcu_lock_acquire(&rcu_bh_lock_map);
- rcu_lockdep_assert(rcu_is_watching(),
- "rcu_read_lock_bh() used illegally while idle");
+ RCU_LOCKDEP_WARN(!rcu_is_watching(),
+ "rcu_read_lock_bh() used illegally while idle");
}
/*
@@ -927,8 +948,8 @@ static inline void rcu_read_lock_bh(void)
*/
static inline void rcu_read_unlock_bh(void)
{
- rcu_lockdep_assert(rcu_is_watching(),
- "rcu_read_unlock_bh() used illegally while idle");
+ RCU_LOCKDEP_WARN(!rcu_is_watching(),
+ "rcu_read_unlock_bh() used illegally while idle");
rcu_lock_release(&rcu_bh_lock_map);
__release(RCU_BH);
local_bh_enable();
@@ -952,8 +973,8 @@ static inline void rcu_read_lock_sched(void)
preempt_disable();
__acquire(RCU_SCHED);
rcu_lock_acquire(&rcu_sched_lock_map);
- rcu_lockdep_assert(rcu_is_watching(),
- "rcu_read_lock_sched() used illegally while idle");
+ RCU_LOCKDEP_WARN(!rcu_is_watching(),
+ "rcu_read_lock_sched() used illegally while idle");
}
/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
@@ -970,8 +991,8 @@ static inline notrace void rcu_read_lock_sched_notrace(void)
*/
static inline void rcu_read_unlock_sched(void)
{
- rcu_lockdep_assert(rcu_is_watching(),
- "rcu_read_unlock_sched() used illegally while idle");
+ RCU_LOCKDEP_WARN(!rcu_is_watching(),
+ "rcu_read_unlock_sched() used illegally while idle");
rcu_lock_release(&rcu_sched_lock_map);
__release(RCU_SCHED);
preempt_enable();
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index f89d929..b89f316 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -107,8 +107,8 @@ static DEFINE_SPINLOCK(release_agent_path_lock);
struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
#define cgroup_assert_mutex_or_rcu_locked() \
- rcu_lockdep_assert(rcu_read_lock_held() || \
- lockdep_is_held(&cgroup_mutex), \
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
+ !lockdep_is_held(&cgroup_mutex), \
"cgroup_mutex or RCU read lock required");
/*
diff --git a/kernel/pid.c b/kernel/pid.c
index 4fd07d5..ca36879 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -451,9 +451,8 @@ EXPORT_SYMBOL(pid_task);
*/
struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
{
- rcu_lockdep_assert(rcu_read_lock_held(),
- "find_task_by_pid_ns() needs rcu_read_lock()"
- " protection");
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
+ "find_task_by_pid_ns() needs rcu_read_lock() protection");
return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
}
diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c
index de35087..d3fcb2e 100644
--- a/kernel/rcu/srcu.c
+++ b/kernel/rcu/srcu.c
@@ -415,11 +415,11 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount)
struct rcu_head *head = &rcu.head;
bool done = false;
- rcu_lockdep_assert(!lock_is_held(&sp->dep_map) &&
- !lock_is_held(&rcu_bh_lock_map) &&
- !lock_is_held(&rcu_lock_map) &&
- !lock_is_held(&rcu_sched_lock_map),
- "Illegal synchronize_srcu() in same-type SRCU (or RCU) read-side critical section");
+ RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) ||
+ lock_is_held(&rcu_bh_lock_map) ||
+ lock_is_held(&rcu_lock_map) ||
+ lock_is_held(&rcu_sched_lock_map),
+ "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
might_sleep();
init_completion(&rcu.completion);
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index c291bd6..d047105 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
@@ -191,10 +191,10 @@ static void rcu_process_callbacks(struct softirq_action *unused)
*/
void synchronize_sched(void)
{
- rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
- !lock_is_held(&rcu_lock_map) &&
- !lock_is_held(&rcu_sched_lock_map),
- "Illegal synchronize_sched() in RCU read-side critical section");
+ RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
+ lock_is_held(&rcu_lock_map) ||
+ lock_is_held(&rcu_sched_lock_map),
+ "Illegal synchronize_sched() in RCU read-side critical section");
cond_resched();
}
EXPORT_SYMBOL_GPL(synchronize_sched);
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index cb64d7e..0a73d26 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -649,12 +649,12 @@ static void rcu_eqs_enter_common(long long oldval, bool user)
* It is illegal to enter an extended quiescent state while
* in an RCU read-side critical section.
*/
- rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
- "Illegal idle entry in RCU read-side critical section.");
- rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map),
- "Illegal idle entry in RCU-bh read-side critical section.");
- rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map),
- "Illegal idle entry in RCU-sched read-side critical section.");
+ RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map),
+ "Illegal idle entry in RCU read-side critical section.");
+ RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map),
+ "Illegal idle entry in RCU-bh read-side critical section.");
+ RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map),
+ "Illegal idle entry in RCU-sched read-side critical section.");
}
/*
@@ -3161,10 +3161,10 @@ static inline int rcu_blocking_is_gp(void)
*/
void synchronize_sched(void)
{
- rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
- !lock_is_held(&rcu_lock_map) &&
- !lock_is_held(&rcu_sched_lock_map),
- "Illegal synchronize_sched() in RCU-sched read-side critical section");
+ RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
+ lock_is_held(&rcu_lock_map) ||
+ lock_is_held(&rcu_sched_lock_map),
+ "Illegal synchronize_sched() in RCU-sched read-side critical section");
if (rcu_blocking_is_gp())
return;
if (rcu_gp_is_expedited())
@@ -3188,10 +3188,10 @@ EXPORT_SYMBOL_GPL(synchronize_sched);
*/
void synchronize_rcu_bh(void)
{
- rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
- !lock_is_held(&rcu_lock_map) &&
- !lock_is_held(&rcu_sched_lock_map),
- "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
+ RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
+ lock_is_held(&rcu_lock_map) ||
+ lock_is_held(&rcu_sched_lock_map),
+ "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
if (rcu_blocking_is_gp())
return;
if (rcu_gp_is_expedited())
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index a983bc6..9e922f1 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -538,10 +538,10 @@ EXPORT_SYMBOL_GPL(call_rcu);
*/
void synchronize_rcu(void)
{
- rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
- !lock_is_held(&rcu_lock_map) &&
- !lock_is_held(&rcu_sched_lock_map),
- "Illegal synchronize_rcu() in RCU read-side critical section");
+ RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
+ lock_is_held(&rcu_lock_map) ||
+ lock_is_held(&rcu_sched_lock_map),
+ "Illegal synchronize_rcu() in RCU read-side critical section");
if (!rcu_scheduler_active)
return;
if (rcu_gp_is_expedited())
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index a0a0dd0..47268fb 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -589,8 +589,8 @@ EXPORT_SYMBOL_GPL(call_rcu_tasks);
void synchronize_rcu_tasks(void)
{
/* Complain if the scheduler has not started. */
- rcu_lockdep_assert(!rcu_scheduler_active,
- "synchronize_rcu_tasks called too soon");
+ RCU_LOCKDEP_WARN(rcu_scheduler_active,
+ "synchronize_rcu_tasks called too soon");
/* Wait for the grace period. */
wait_rcu_gp(call_rcu_tasks);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 78b4bad10..5e73c79 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2200,8 +2200,8 @@ unsigned long to_ratio(u64 period, u64 runtime)
#ifdef CONFIG_SMP
inline struct dl_bw *dl_bw_of(int i)
{
- rcu_lockdep_assert(rcu_read_lock_sched_held(),
- "sched RCU must be held");
+ RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
+ "sched RCU must be held");
return &cpu_rq(i)->rd->dl_bw;
}
@@ -2210,8 +2210,8 @@ static inline int dl_bw_cpus(int i)
struct root_domain *rd = cpu_rq(i)->rd;
int cpus = 0;
- rcu_lockdep_assert(rcu_read_lock_sched_held(),
- "sched RCU must be held");
+ RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
+ "sched RCU must be held");
for_each_cpu_and(i, rd->span, cpu_active_mask)
cpus++;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 4c4f061..cb91c63 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -338,20 +338,20 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
#include <trace/events/workqueue.h>
#define assert_rcu_or_pool_mutex() \
- rcu_lockdep_assert(rcu_read_lock_sched_held() || \
- lockdep_is_held(&wq_pool_mutex), \
- "sched RCU or wq_pool_mutex should be held")
+ RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
+ !lockdep_is_held(&wq_pool_mutex), \
+ "sched RCU or wq_pool_mutex should be held")
#define assert_rcu_or_wq_mutex(wq) \
- rcu_lockdep_assert(rcu_read_lock_sched_held() || \
- lockdep_is_held(&wq->mutex), \
- "sched RCU or wq->mutex should be held")
+ RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
+ !lockdep_is_held(&wq->mutex), \
+ "sched RCU or wq->mutex should be held")
#define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \
- rcu_lockdep_assert(rcu_read_lock_sched_held() || \
- lockdep_is_held(&wq->mutex) || \
- lockdep_is_held(&wq_pool_mutex), \
- "sched RCU, wq->mutex or wq_pool_mutex should be held")
+ RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
+ !lockdep_is_held(&wq->mutex) && \
+ !lockdep_is_held(&wq_pool_mutex), \
+ "sched RCU, wq->mutex or wq_pool_mutex should be held")
#define for_each_cpu_worker_pool(pool, cpu) \
for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index 188c1d2..73455089 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -400,9 +400,9 @@ static bool verify_new_ex(struct dev_cgroup *dev_cgroup,
{
bool match = false;
- rcu_lockdep_assert(rcu_read_lock_held() ||
- lockdep_is_held(&devcgroup_mutex),
- "device_cgroup:verify_new_ex called without proper synchronization");
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&
+ lockdep_is_held(&devcgroup_mutex),
+ "device_cgroup:verify_new_ex called without proper synchronization");
if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) {
if (behavior == DEVCG_DEFAULT_ALLOW) {
OpenPOWER on IntegriCloud