summaryrefslogtreecommitdiffstats
path: root/fs/aio.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/aio.c')
-rw-r--r--fs/aio.c47
1 files changed, 22 insertions, 25 deletions
diff --git a/fs/aio.c b/fs/aio.c
index 20bb919..aec2b19 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -29,7 +29,6 @@
#include <linux/highmem.h>
#include <linux/workqueue.h>
#include <linux/security.h>
-#include <linux/rcuref.h>
#include <asm/kmap_types.h>
#include <asm/uaccess.h>
@@ -457,6 +456,8 @@ static inline struct kiocb *aio_get_req(struct kioctx *ctx)
static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
{
+ assert_spin_locked(&ctx->ctx_lock);
+
if (req->ki_dtor)
req->ki_dtor(req);
kmem_cache_free(kiocb_cachep, req);
@@ -498,6 +499,8 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
dprintk(KERN_DEBUG "aio_put(%p): f_count=%d\n",
req, atomic_read(&req->ki_filp->f_count));
+ assert_spin_locked(&ctx->ctx_lock);
+
req->ki_users --;
if (unlikely(req->ki_users < 0))
BUG();
@@ -510,7 +513,7 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
/* Must be done under the lock to serialise against cancellation.
* Call this aio_fput as it duplicates fput via the fput_work.
*/
- if (unlikely(rcuref_dec_and_test(&req->ki_filp->f_count))) {
+ if (unlikely(atomic_dec_and_test(&req->ki_filp->f_count))) {
get_ioctx(ctx);
spin_lock(&fput_lock);
list_add(&req->ki_list, &fput_head);
@@ -619,14 +622,13 @@ static void unuse_mm(struct mm_struct *mm)
* the kiocb (to tell the caller to activate the work
* queue to process it), or 0, if it found that it was
* already queued.
- *
- * Should be called with the spin lock iocb->ki_ctx->ctx_lock
- * held
*/
static inline int __queue_kicked_iocb(struct kiocb *iocb)
{
struct kioctx *ctx = iocb->ki_ctx;
+ assert_spin_locked(&ctx->ctx_lock);
+
if (list_empty(&iocb->ki_run_list)) {
list_add_tail(&iocb->ki_run_list,
&ctx->run_list);
@@ -771,13 +773,15 @@ out:
* Process all pending retries queued on the ioctx
* run list.
* Assumes it is operating within the aio issuer's mm
- * context. Expects to be called with ctx->ctx_lock held
+ * context.
*/
static int __aio_run_iocbs(struct kioctx *ctx)
{
struct kiocb *iocb;
LIST_HEAD(run_list);
+ assert_spin_locked(&ctx->ctx_lock);
+
list_splice_init(&ctx->run_list, &run_list);
while (!list_empty(&run_list)) {
iocb = list_entry(run_list.next, struct kiocb,
@@ -937,28 +941,19 @@ int fastcall aio_complete(struct kiocb *iocb, long res, long res2)
unsigned long tail;
int ret;
- /* Special case handling for sync iocbs: events go directly
- * into the iocb for fast handling. Note that this will not
- * work if we allow sync kiocbs to be cancelled. in which
- * case the usage count checks will have to move under ctx_lock
- * for all cases.
+ /*
+ * Special case handling for sync iocbs:
+ * - events go directly into the iocb for fast handling
+ * - the sync task with the iocb in its stack holds the single iocb
+ * ref, no other paths have a way to get another ref
+ * - the sync task helpfully left a reference to itself in the iocb
*/
if (is_sync_kiocb(iocb)) {
- int ret;
-
+ BUG_ON(iocb->ki_users != 1);
iocb->ki_user_data = res;
- if (iocb->ki_users == 1) {
- iocb->ki_users = 0;
- ret = 1;
- } else {
- spin_lock_irq(&ctx->ctx_lock);
- iocb->ki_users--;
- ret = (0 == iocb->ki_users);
- spin_unlock_irq(&ctx->ctx_lock);
- }
- /* sync iocbs put the task here for us */
+ iocb->ki_users = 0;
wake_up_process(iocb->ki_obj.tsk);
- return ret;
+ return 1;
}
info = &ctx->ring_info;
@@ -1613,12 +1608,14 @@ asmlinkage long sys_io_submit(aio_context_t ctx_id, long nr,
/* lookup_kiocb
* Finds a given iocb for cancellation.
- * MUST be called with ctx->ctx_lock held.
*/
static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb,
u32 key)
{
struct list_head *pos;
+
+ assert_spin_locked(&ctx->ctx_lock);
+
/* TODO: use a hash or array, this sucks. */
list_for_each(pos, &ctx->active_reqs) {
struct kiocb *kiocb = list_kiocb(pos);
OpenPOWER on IntegriCloud