summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug11
-rw-r--r--lib/hexdump.c2
-rw-r--r--lib/kobject.c97
-rw-r--r--lib/lockref.c23
-rw-r--r--lib/percpu-refcount.c3
-rw-r--r--lib/scatterlist.c3
6 files changed, 125 insertions, 14 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 06344d9..ebef88f 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -312,6 +312,15 @@ config MAGIC_SYSRQ
keys are documented in <file:Documentation/sysrq.txt>. Don't say Y
unless you really know what this hack does.
+config MAGIC_SYSRQ_DEFAULT_ENABLE
+ hex "Enable magic SysRq key functions by default"
+ depends on MAGIC_SYSRQ
+ default 0x1
+ help
+ Specifies which SysRq key functions are enabled by default.
+ This may be set to 1 or 0 to enable or disable them all, or
+ to a bitmask as described in Documentation/sysrq.txt.
+
config DEBUG_KERNEL
bool "Kernel debugging"
help
@@ -983,7 +992,7 @@ config DEBUG_KOBJECT
config DEBUG_KOBJECT_RELEASE
bool "kobject release debugging"
- depends on DEBUG_KERNEL
+ depends on DEBUG_OBJECTS_TIMERS
help
kobjects are reference counted objects. This means that their
last reference count put is not predictable, and the kobject can
diff --git a/lib/hexdump.c b/lib/hexdump.c
index 3f0494c..8499c81 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -14,6 +14,8 @@
const char hex_asc[] = "0123456789abcdef";
EXPORT_SYMBOL(hex_asc);
+const char hex_asc_upper[] = "0123456789ABCDEF";
+EXPORT_SYMBOL(hex_asc_upper);
/**
* hex_to_bin - convert a hex digit to its real value
diff --git a/lib/kobject.c b/lib/kobject.c
index 9621751..5b4b888 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -13,11 +13,30 @@
*/
#include <linux/kobject.h>
+#include <linux/kobj_completion.h>
#include <linux/string.h>
#include <linux/export.h>
#include <linux/stat.h>
#include <linux/slab.h>
+/**
+ * kobject_namespace - return @kobj's namespace tag
+ * @kobj: kobject in question
+ *
+ * Returns namespace tag of @kobj if its parent has namespace ops enabled
+ * and thus @kobj should have a namespace tag associated with it. Returns
+ * %NULL otherwise.
+ */
+const void *kobject_namespace(struct kobject *kobj)
+{
+ const struct kobj_ns_type_operations *ns_ops = kobj_ns_ops(kobj);
+
+ if (!ns_ops || ns_ops->type == KOBJ_NS_TYPE_NONE)
+ return NULL;
+
+ return kobj->ktype->namespace(kobj);
+}
+
/*
* populate_dir - populate directory with attributes.
* @kobj: object we're working on.
@@ -46,13 +65,21 @@ static int populate_dir(struct kobject *kobj)
static int create_dir(struct kobject *kobj)
{
- int error = 0;
- error = sysfs_create_dir(kobj);
+ int error;
+
+ error = sysfs_create_dir_ns(kobj, kobject_namespace(kobj));
if (!error) {
error = populate_dir(kobj);
if (error)
sysfs_remove_dir(kobj);
}
+
+ /*
+ * @kobj->sd may be deleted by an ancestor going away. Hold an
+ * extra reference so that it stays until @kobj is gone.
+ */
+ sysfs_get(kobj->sd);
+
return error;
}
@@ -428,7 +455,7 @@ int kobject_rename(struct kobject *kobj, const char *new_name)
goto out;
}
- error = sysfs_rename_dir(kobj, new_name);
+ error = sysfs_rename_dir_ns(kobj, new_name, kobject_namespace(kobj));
if (error)
goto out;
@@ -472,6 +499,7 @@ int kobject_move(struct kobject *kobj, struct kobject *new_parent)
if (kobj->kset)
new_parent = kobject_get(&kobj->kset->kobj);
}
+
/* old object path */
devpath = kobject_get_path(kobj, GFP_KERNEL);
if (!devpath) {
@@ -486,7 +514,7 @@ int kobject_move(struct kobject *kobj, struct kobject *new_parent)
sprintf(devpath_string, "DEVPATH_OLD=%s", devpath);
envp[0] = devpath_string;
envp[1] = NULL;
- error = sysfs_move_dir(kobj, new_parent);
+ error = sysfs_move_dir_ns(kobj, new_parent, kobject_namespace(kobj));
if (error)
goto out;
old_parent = kobj->parent;
@@ -508,10 +536,15 @@ out:
*/
void kobject_del(struct kobject *kobj)
{
+ struct sysfs_dirent *sd;
+
if (!kobj)
return;
+ sd = kobj->sd;
sysfs_remove_dir(kobj);
+ sysfs_put(sd);
+
kobj->state_in_sysfs = 0;
kobj_kset_leave(kobj);
kobject_put(kobj->parent);
@@ -592,7 +625,7 @@ static void kobject_release(struct kref *kref)
{
struct kobject *kobj = container_of(kref, struct kobject, kref);
#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
- pr_debug("kobject: '%s' (%p): %s, parent %p (delayed)\n",
+ pr_info("kobject: '%s' (%p): %s, parent %p (delayed)\n",
kobject_name(kobj), kobj, __func__, kobj->parent);
INIT_DELAYED_WORK(&kobj->release, kobject_delayed_cleanup);
schedule_delayed_work(&kobj->release, HZ);
@@ -727,6 +760,55 @@ const struct sysfs_ops kobj_sysfs_ops = {
};
/**
+ * kobj_completion_init - initialize a kobj_completion object.
+ * @kc: kobj_completion
+ * @ktype: type of kobject to initialize
+ *
+ * kobj_completion structures can be embedded within structures with different
+ * lifetime rules. During the release of the enclosing object, we can
+ * wait on the release of the kobject so that we don't free it while it's
+ * still busy.
+ */
+void kobj_completion_init(struct kobj_completion *kc, struct kobj_type *ktype)
+{
+ init_completion(&kc->kc_unregister);
+ kobject_init(&kc->kc_kobj, ktype);
+}
+EXPORT_SYMBOL_GPL(kobj_completion_init);
+
+/**
+ * kobj_completion_release - release a kobj_completion object
+ * @kobj: kobject embedded in kobj_completion
+ *
+ * Used with kobject_release to notify waiters that the kobject has been
+ * released.
+ */
+void kobj_completion_release(struct kobject *kobj)
+{
+ struct kobj_completion *kc = kobj_to_kobj_completion(kobj);
+ complete(&kc->kc_unregister);
+}
+EXPORT_SYMBOL_GPL(kobj_completion_release);
+
+/**
+ * kobj_completion_del_and_wait - release the kobject and wait for it
+ * @kc: kobj_completion object to release
+ *
+ * Delete the kobject from sysfs and drop the reference count. Then wait
+ * until any other outstanding references are also dropped. This routine
+ * is only necessary once other references may have been taken on the
+ * kobject. Typically this happens when the kobject has been published
+ * to sysfs via kobject_add.
+ */
+void kobj_completion_del_and_wait(struct kobj_completion *kc)
+{
+ kobject_del(&kc->kc_kobj);
+ kobject_put(&kc->kc_kobj);
+ wait_for_completion(&kc->kc_unregister);
+}
+EXPORT_SYMBOL_GPL(kobj_completion_del_and_wait);
+
+/**
* kset_register - initialize and add a kset.
* @k: kset.
*/
@@ -933,10 +1015,7 @@ const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj)
bool kobj_ns_current_may_mount(enum kobj_ns_type type)
{
- bool may_mount = false;
-
- if (type == KOBJ_NS_TYPE_NONE)
- return true;
+ bool may_mount = true;
spin_lock(&kobj_ns_type_lock);
if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
diff --git a/lib/lockref.c b/lib/lockref.c
index 8ff162f..af6e95d 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -4,6 +4,22 @@
#ifdef CONFIG_CMPXCHG_LOCKREF
/*
+ * Allow weakly-ordered memory architectures to provide barrier-less
+ * cmpxchg semantics for lockref updates.
+ */
+#ifndef cmpxchg64_relaxed
+# define cmpxchg64_relaxed cmpxchg64
+#endif
+
+/*
+ * Allow architectures to override the default cpu_relax() within CMPXCHG_LOOP.
+ * This is useful for architectures with an expensive cpu_relax().
+ */
+#ifndef arch_mutex_cpu_relax
+# define arch_mutex_cpu_relax() cpu_relax()
+#endif
+
+/*
* Note that the "cmpxchg()" reloads the "old" value for the
* failure case.
*/
@@ -14,12 +30,13 @@
while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
struct lockref new = old, prev = old; \
CODE \
- old.lock_count = cmpxchg(&lockref->lock_count, \
- old.lock_count, new.lock_count); \
+ old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \
+ old.lock_count, \
+ new.lock_count); \
if (likely(old.lock_count == prev.lock_count)) { \
SUCCESS; \
} \
- cpu_relax(); \
+ arch_mutex_cpu_relax(); \
} \
} while (0)
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 7deeb62..1a53d49 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -53,6 +53,7 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release)
ref->release = release;
return 0;
}
+EXPORT_SYMBOL_GPL(percpu_ref_init);
/**
* percpu_ref_cancel_init - cancel percpu_ref_init()
@@ -84,6 +85,7 @@ void percpu_ref_cancel_init(struct percpu_ref *ref)
free_percpu(ref->pcpu_count);
}
}
+EXPORT_SYMBOL_GPL(percpu_ref_cancel_init);
static void percpu_ref_kill_rcu(struct rcu_head *rcu)
{
@@ -156,3 +158,4 @@ void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu);
}
+EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index a685c8a..d16fa29 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -577,7 +577,8 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
miter->__offset += miter->consumed;
miter->__remaining -= miter->consumed;
- if (miter->__flags & SG_MITER_TO_SG)
+ if ((miter->__flags & SG_MITER_TO_SG) &&
+ !PageSlab(miter->page))
flush_kernel_dcache_page(miter->page);
if (miter->__flags & SG_MITER_ATOMIC) {
OpenPOWER on IntegriCloud