summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2006-11-22 14:55:48 +0000
committerDavid Howells <dhowells@redhat.com>2006-11-22 14:55:48 +0000
commit65f27f38446e1976cc98fd3004b110fedcddd189 (patch)
tree68f8be93feae31dfa018c22db392a05546b63ee1 /kernel
parent365970a1ea76d81cb1ad2f652acb605f06dae256 (diff)
downloadop-kernel-dev-65f27f38446e1976cc98fd3004b110fedcddd189.zip
op-kernel-dev-65f27f38446e1976cc98fd3004b110fedcddd189.tar.gz
WorkStruct: Pass the work_struct pointer instead of context data
Pass the work_struct pointer to the work function rather than context data. The work function can use container_of() to work out the data. For the cases where the container of the work_struct may go away the moment the pending bit is cleared, it is made possible to defer the release of the structure by deferring the clearing of the pending bit. To make this work, an extra flag is introduced into the management side of the work_struct. This governs auto-release of the structure upon execution. Ordinarily, the work queue executor would release the work_struct for further scheduling or deallocation by clearing the pending bit prior to jumping to the work function. This means that, unless the driver makes some guarantee itself that the work_struct won't go away, the work function may not access anything else in the work_struct or its container lest they be deallocated.. This is a problem if the auxiliary data is taken away (as done by the last patch). However, if the pending bit is *not* cleared before jumping to the work function, then the work function *may* access the work_struct and its container with no problems. But then the work function must itself release the work_struct by calling work_release(). In most cases, automatic release is fine, so this is the default. Special initiators exist for the non-auto-release case (ending in _NAR). Signed-Off-By: David Howells <dhowells@redhat.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/kmod.c16
-rw-r--r--kernel/kthread.c13
-rw-r--r--kernel/power/poweroff.c4
-rw-r--r--kernel/sys.c4
-rw-r--r--kernel/workqueue.c19
5 files changed, 30 insertions, 26 deletions
diff --git a/kernel/kmod.c b/kernel/kmod.c
index bb4e29d..7dc7a9da 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -114,6 +114,7 @@ EXPORT_SYMBOL(request_module);
#endif /* CONFIG_KMOD */
struct subprocess_info {
+ struct work_struct work;
struct completion *complete;
char *path;
char **argv;
@@ -221,9 +222,10 @@ static int wait_for_helper(void *data)
}
/* This is run by khelper thread */
-static void __call_usermodehelper(void *data)
+static void __call_usermodehelper(struct work_struct *work)
{
- struct subprocess_info *sub_info = data;
+ struct subprocess_info *sub_info =
+ container_of(work, struct subprocess_info, work);
pid_t pid;
int wait = sub_info->wait;
@@ -264,6 +266,8 @@ int call_usermodehelper_keys(char *path, char **argv, char **envp,
{
DECLARE_COMPLETION_ONSTACK(done);
struct subprocess_info sub_info = {
+ .work = __WORK_INITIALIZER(sub_info.work,
+ __call_usermodehelper),
.complete = &done,
.path = path,
.argv = argv,
@@ -272,7 +276,6 @@ int call_usermodehelper_keys(char *path, char **argv, char **envp,
.wait = wait,
.retval = 0,
};
- DECLARE_WORK(work, __call_usermodehelper, &sub_info);
if (!khelper_wq)
return -EBUSY;
@@ -280,7 +283,7 @@ int call_usermodehelper_keys(char *path, char **argv, char **envp,
if (path[0] == '\0')
return 0;
- queue_work(khelper_wq, &work);
+ queue_work(khelper_wq, &sub_info.work);
wait_for_completion(&done);
return sub_info.retval;
}
@@ -291,6 +294,8 @@ int call_usermodehelper_pipe(char *path, char **argv, char **envp,
{
DECLARE_COMPLETION(done);
struct subprocess_info sub_info = {
+ .work = __WORK_INITIALIZER(sub_info.work,
+ __call_usermodehelper),
.complete = &done,
.path = path,
.argv = argv,
@@ -298,7 +303,6 @@ int call_usermodehelper_pipe(char *path, char **argv, char **envp,
.retval = 0,
};
struct file *f;
- DECLARE_WORK(work, __call_usermodehelper, &sub_info);
if (!khelper_wq)
return -EBUSY;
@@ -318,7 +322,7 @@ int call_usermodehelper_pipe(char *path, char **argv, char **envp,
}
sub_info.stdin = f;
- queue_work(khelper_wq, &work);
+ queue_work(khelper_wq, &sub_info.work);
wait_for_completion(&done);
return sub_info.retval;
}
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 4f9c60e..1db8c72 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -31,6 +31,8 @@ struct kthread_create_info
/* Result passed back to kthread_create() from keventd. */
struct task_struct *result;
struct completion done;
+
+ struct work_struct work;
};
struct kthread_stop_info
@@ -111,9 +113,10 @@ static int kthread(void *_create)
}
/* We are keventd: create a thread. */
-static void keventd_create_kthread(void *_create)
+static void keventd_create_kthread(struct work_struct *work)
{
- struct kthread_create_info *create = _create;
+ struct kthread_create_info *create =
+ container_of(work, struct kthread_create_info, work);
int pid;
/* We want our own signal handler (we take no signals by default). */
@@ -154,20 +157,20 @@ struct task_struct *kthread_create(int (*threadfn)(void *data),
...)
{
struct kthread_create_info create;
- DECLARE_WORK(work, keventd_create_kthread, &create);
create.threadfn = threadfn;
create.data = data;
init_completion(&create.started);
init_completion(&create.done);
+ INIT_WORK(&create.work, keventd_create_kthread);
/*
* The workqueue needs to start up first:
*/
if (!helper_wq)
- work.func(work.data);
+ create.work.func(&create.work);
else {
- queue_work(helper_wq, &work);
+ queue_work(helper_wq, &create.work);
wait_for_completion(&create.done);
}
if (!IS_ERR(create.result)) {
diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
index f1f900a..678ec73 100644
--- a/kernel/power/poweroff.c
+++ b/kernel/power/poweroff.c
@@ -16,12 +16,12 @@
* callback we use.
*/
-static void do_poweroff(void *dummy)
+static void do_poweroff(struct work_struct *dummy)
{
kernel_power_off();
}
-static DECLARE_WORK(poweroff_work, do_poweroff, NULL);
+static DECLARE_WORK(poweroff_work, do_poweroff);
static void handle_poweroff(int key, struct tty_struct *tty)
{
diff --git a/kernel/sys.c b/kernel/sys.c
index 98489d8..c87b461 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -880,7 +880,7 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user
return 0;
}
-static void deferred_cad(void *dummy)
+static void deferred_cad(struct work_struct *dummy)
{
kernel_restart(NULL);
}
@@ -892,7 +892,7 @@ static void deferred_cad(void *dummy)
*/
void ctrl_alt_del(void)
{
- static DECLARE_WORK(cad_work, deferred_cad, NULL);
+ static DECLARE_WORK(cad_work, deferred_cad);
if (C_A_D)
schedule_work(&cad_work);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 9674797..8d1e7cb 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -241,14 +241,14 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
struct work_struct *work = list_entry(cwq->worklist.next,
struct work_struct, entry);
work_func_t f = work->func;
- void *data = work->data;
list_del_init(cwq->worklist.next);
spin_unlock_irqrestore(&cwq->lock, flags);
BUG_ON(get_wq_data(work) != cwq);
- clear_bit(WORK_STRUCT_PENDING, &work->management);
- f(data);
+ if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management))
+ work_release(work);
+ f(work);
spin_lock_irqsave(&cwq->lock, flags);
cwq->remove_sequence++;
@@ -527,7 +527,6 @@ EXPORT_SYMBOL(schedule_delayed_work_on);
/**
* schedule_on_each_cpu - call a function on each online CPU from keventd
* @func: the function to call
- * @info: a pointer to pass to func()
*
* Returns zero on success.
* Returns -ve errno on failure.
@@ -536,7 +535,7 @@ EXPORT_SYMBOL(schedule_delayed_work_on);
*
* schedule_on_each_cpu() is very slow.
*/
-int schedule_on_each_cpu(work_func_t func, void *info)
+int schedule_on_each_cpu(work_func_t func)
{
int cpu;
struct work_struct *works;
@@ -547,7 +546,7 @@ int schedule_on_each_cpu(work_func_t func, void *info)
mutex_lock(&workqueue_mutex);
for_each_online_cpu(cpu) {
- INIT_WORK(per_cpu_ptr(works, cpu), func, info);
+ INIT_WORK(per_cpu_ptr(works, cpu), func);
__queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
per_cpu_ptr(works, cpu));
}
@@ -591,7 +590,6 @@ EXPORT_SYMBOL(cancel_rearming_delayed_work);
/**
* execute_in_process_context - reliably execute the routine with user context
* @fn: the function to execute
- * @data: data to pass to the function
* @ew: guaranteed storage for the execute work structure (must
* be available when the work executes)
*
@@ -601,15 +599,14 @@ EXPORT_SYMBOL(cancel_rearming_delayed_work);
* Returns: 0 - function was executed
* 1 - function was scheduled for execution
*/
-int execute_in_process_context(work_func_t fn, void *data,
- struct execute_work *ew)
+int execute_in_process_context(work_func_t fn, struct execute_work *ew)
{
if (!in_interrupt()) {
- fn(data);
+ fn(&ew->work);
return 0;
}
- INIT_WORK(&ew->work, fn, data);
+ INIT_WORK(&ew->work, fn);
schedule_work(&ew->work);
return 1;
OpenPOWER on IntegriCloud