summaryrefslogtreecommitdiffstats
path: root/fs/afs
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2014-03-07 10:24:50 -0500
committerTejun Heo <tj@kernel.org>2014-03-07 10:24:50 -0500
commit059499453a9abd1857d442b44da8b4c126dc72a8 (patch)
treedc3760562ee2238aaffbc7cdf84458dfb458e021 /fs/afs
parent9ca9737444f1a8602f74b85018d881e7e54b5bd1 (diff)
downloadop-kernel-dev-059499453a9abd1857d442b44da8b4c126dc72a8.zip
op-kernel-dev-059499453a9abd1857d442b44da8b4c126dc72a8.tar.gz
afs: don't use PREPARE_WORK
PREPARE_[DELAYED_]WORK() are being phased out. They have few users and a nasty surprise in terms of reentrancy guarantee as workqueue considers work items to be different if they don't have the same work function. afs_call->async_work is multiplexed with multiple work functions. Introduce afs_async_workfn() which invokes afs_call->async_workfn and always use it as the work function and update the users to set the ->async_workfn field instead of overriding the work function using PREPARE_WORK(). It would probably be best to route this with other related updates through the workqueue tree. Compile tested. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: David Howells <dhowells@redhat.com> Cc: linux-afs@lists.infradead.org
Diffstat (limited to 'fs/afs')
-rw-r--r--fs/afs/internal.h1
-rw-r--r--fs/afs/rxrpc.c12
2 files changed, 11 insertions, 2 deletions
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 6621f80..be75b50 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -75,6 +75,7 @@ struct afs_call {
const struct afs_call_type *type; /* type of call */
const struct afs_wait_mode *wait_mode; /* completion wait mode */
wait_queue_head_t waitq; /* processes awaiting completion */
+ work_func_t async_workfn;
struct work_struct async_work; /* asynchronous work processor */
struct work_struct work; /* actual work processor */
struct sk_buff_head rx_queue; /* received packets */
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 8ad8c2a..ef943df 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -644,7 +644,7 @@ static void afs_process_async_call(struct work_struct *work)
/* we can't just delete the call because the work item may be
* queued */
- PREPARE_WORK(&call->async_work, afs_delete_async_call);
+ call->async_workfn = afs_delete_async_call;
queue_work(afs_async_calls, &call->async_work);
}
@@ -663,6 +663,13 @@ void afs_transfer_reply(struct afs_call *call, struct sk_buff *skb)
call->reply_size += len;
}
+static void afs_async_workfn(struct work_struct *work)
+{
+ struct afs_call *call = container_of(work, struct afs_call, async_work);
+
+ call->async_workfn(work);
+}
+
/*
* accept the backlog of incoming calls
*/
@@ -685,7 +692,8 @@ static void afs_collect_incoming_call(struct work_struct *work)
return;
}
- INIT_WORK(&call->async_work, afs_process_async_call);
+ call->async_workfn = afs_process_async_call;
+ INIT_WORK(&call->async_work, afs_async_workfn);
call->wait_mode = &afs_async_incoming_call;
call->type = &afs_RXCMxxxx;
init_waitqueue_head(&call->waitq);
OpenPOWER on IntegriCloud