summaryrefslogtreecommitdiffstats
path: root/fs/aio.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/aio.c')
-rw-r--r--fs/aio.c123
1 files changed, 46 insertions, 77 deletions
diff --git a/fs/aio.c b/fs/aio.c
index 38f6268..9fe7216 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -398,7 +398,7 @@ static struct kiocb fastcall *__aio_get_req(struct kioctx *ctx)
if (unlikely(!req))
return NULL;
- req->ki_flags = 1 << KIF_LOCKED;
+ req->ki_flags = 0;
req->ki_users = 2;
req->ki_key = 0;
req->ki_ctx = ctx;
@@ -547,24 +547,6 @@ struct kioctx *lookup_ioctx(unsigned long ctx_id)
return ioctx;
}
-static int lock_kiocb_action(void *param)
-{
- schedule();
- return 0;
-}
-
-static inline void lock_kiocb(struct kiocb *iocb)
-{
- wait_on_bit_lock(&iocb->ki_flags, KIF_LOCKED, lock_kiocb_action,
- TASK_UNINTERRUPTIBLE);
-}
-
-static inline void unlock_kiocb(struct kiocb *iocb)
-{
- kiocbClearLocked(iocb);
- wake_up_bit(&iocb->ki_flags, KIF_LOCKED);
-}
-
/*
* use_mm
* Makes the calling kernel thread take on the specified
@@ -740,19 +722,9 @@ static ssize_t aio_run_iocb(struct kiocb *iocb)
ret = retry(iocb);
current->io_wait = NULL;
- if (-EIOCBRETRY != ret) {
- if (-EIOCBQUEUED != ret) {
- BUG_ON(!list_empty(&iocb->ki_wait.task_list));
- aio_complete(iocb, ret, 0);
- /* must not access the iocb after this */
- }
- } else {
- /*
- * Issue an additional retry to avoid waiting forever if
- * no waits were queued (e.g. in case of a short read).
- */
- if (list_empty(&iocb->ki_wait.task_list))
- kiocbSetKicked(iocb);
+ if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) {
+ BUG_ON(!list_empty(&iocb->ki_wait.task_list));
+ aio_complete(iocb, ret, 0);
}
out:
spin_lock_irq(&ctx->ctx_lock);
@@ -805,9 +777,7 @@ static int __aio_run_iocbs(struct kioctx *ctx)
* Hold an extra reference while retrying i/o.
*/
iocb->ki_users++; /* grab extra reference */
- lock_kiocb(iocb);
aio_run_iocb(iocb);
- unlock_kiocb(iocb);
if (__aio_put_req(ctx, iocb)) /* drop extra ref */
put_ioctx(ctx);
}
@@ -898,16 +868,24 @@ static void aio_kick_handler(void *data)
* and if required activate the aio work queue to process
* it
*/
-static void queue_kicked_iocb(struct kiocb *iocb)
+static void try_queue_kicked_iocb(struct kiocb *iocb)
{
struct kioctx *ctx = iocb->ki_ctx;
unsigned long flags;
int run = 0;
- WARN_ON((!list_empty(&iocb->ki_wait.task_list)));
+ /* We're supposed to be the only path putting the iocb back on the run
+ * list. If we find that the iocb is *back* on a wait queue already
+ * than retry has happened before we could queue the iocb. This also
+ * means that the retry could have completed and freed our iocb, no
+ * good. */
+ BUG_ON((!list_empty(&iocb->ki_wait.task_list)));
spin_lock_irqsave(&ctx->ctx_lock, flags);
- run = __queue_kicked_iocb(iocb);
+ /* set this inside the lock so that we can't race with aio_run_iocb()
+ * testing it and putting the iocb on the run list under the lock */
+ if (!kiocbTryKick(iocb))
+ run = __queue_kicked_iocb(iocb);
spin_unlock_irqrestore(&ctx->ctx_lock, flags);
if (run)
aio_queue_work(ctx);
@@ -930,10 +908,7 @@ void fastcall kick_iocb(struct kiocb *iocb)
return;
}
- /* If its already kicked we shouldn't queue it again */
- if (!kiocbTryKick(iocb)) {
- queue_kicked_iocb(iocb);
- }
+ try_queue_kicked_iocb(iocb);
}
EXPORT_SYMBOL(kick_iocb);
@@ -1321,8 +1296,11 @@ asmlinkage long sys_io_destroy(aio_context_t ctx)
}
/*
- * Default retry method for aio_read (also used for first time submit)
- * Responsible for updating iocb state as retries progress
+ * aio_p{read,write} are the default ki_retry methods for
+ * IO_CMD_P{READ,WRITE}. They maintains kiocb retry state around potentially
+ * multiple calls to f_op->aio_read(). They loop around partial progress
+ * instead of returning -EIOCBRETRY because they don't have the means to call
+ * kick_iocb().
*/
static ssize_t aio_pread(struct kiocb *iocb)
{
@@ -1331,25 +1309,25 @@ static ssize_t aio_pread(struct kiocb *iocb)
struct inode *inode = mapping->host;
ssize_t ret = 0;
- ret = file->f_op->aio_read(iocb, iocb->ki_buf,
- iocb->ki_left, iocb->ki_pos);
+ do {
+ ret = file->f_op->aio_read(iocb, iocb->ki_buf,
+ iocb->ki_left, iocb->ki_pos);
+ /*
+ * Can't just depend on iocb->ki_left to determine
+ * whether we are done. This may have been a short read.
+ */
+ if (ret > 0) {
+ iocb->ki_buf += ret;
+ iocb->ki_left -= ret;
+ }
- /*
- * Can't just depend on iocb->ki_left to determine
- * whether we are done. This may have been a short read.
- */
- if (ret > 0) {
- iocb->ki_buf += ret;
- iocb->ki_left -= ret;
/*
- * For pipes and sockets we return once we have
- * some data; for regular files we retry till we
- * complete the entire read or find that we can't
- * read any more data (e.g short reads).
+ * For pipes and sockets we return once we have some data; for
+ * regular files we retry till we complete the entire read or
+ * find that we can't read any more data (e.g short reads).
*/
- if (!S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode))
- ret = -EIOCBRETRY;
- }
+ } while (ret > 0 && iocb->ki_left > 0 &&
+ !S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode));
/* This means we must have transferred all that we could */
/* No need to retry anymore */
@@ -1359,27 +1337,21 @@ static ssize_t aio_pread(struct kiocb *iocb)
return ret;
}
-/*
- * Default retry method for aio_write (also used for first time submit)
- * Responsible for updating iocb state as retries progress
- */
+/* see aio_pread() */
static ssize_t aio_pwrite(struct kiocb *iocb)
{
struct file *file = iocb->ki_filp;
ssize_t ret = 0;
- ret = file->f_op->aio_write(iocb, iocb->ki_buf,
- iocb->ki_left, iocb->ki_pos);
-
- if (ret > 0) {
- iocb->ki_buf += ret;
- iocb->ki_left -= ret;
-
- ret = -EIOCBRETRY;
- }
+ do {
+ ret = file->f_op->aio_write(iocb, iocb->ki_buf,
+ iocb->ki_left, iocb->ki_pos);
+ if (ret > 0) {
+ iocb->ki_buf += ret;
+ iocb->ki_left -= ret;
+ }
+ } while (ret > 0 && iocb->ki_left > 0);
- /* This means we must have transferred all that we could */
- /* No need to retry anymore */
if ((ret == 0) || (iocb->ki_left == 0))
ret = iocb->ki_nbytes - iocb->ki_left;
@@ -1549,7 +1521,6 @@ int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
spin_lock_irq(&ctx->ctx_lock);
aio_run_iocb(req);
- unlock_kiocb(req);
if (!list_empty(&ctx->run_list)) {
/* drain the run list */
while (__aio_run_iocbs(ctx))
@@ -1681,7 +1652,6 @@ asmlinkage long sys_io_cancel(aio_context_t ctx_id, struct iocb __user *iocb,
if (NULL != cancel) {
struct io_event tmp;
pr_debug("calling cancel\n");
- lock_kiocb(kiocb);
memset(&tmp, 0, sizeof(tmp));
tmp.obj = (u64)(unsigned long)kiocb->ki_obj.user;
tmp.data = kiocb->ki_user_data;
@@ -1693,7 +1663,6 @@ asmlinkage long sys_io_cancel(aio_context_t ctx_id, struct iocb __user *iocb,
if (copy_to_user(result, &tmp, sizeof(tmp)))
ret = -EFAULT;
}
- unlock_kiocb(kiocb);
} else
ret = -EINVAL;
OpenPOWER on IntegriCloud