diff options
author | Christoph Hellwig <hch@infradead.org> | 2012-02-29 09:53:48 +0000 |
---|---|---|
committer | Ben Myers <bpm@sgi.com> | 2012-03-05 11:07:42 -0600 |
commit | aa6bf01d391935a8929333bc2e243084ea0c58db (patch) | |
tree | 686c204328f00ae91466267a9f1e85c3c8d767cb /fs/xfs/xfs_aops.c | |
parent | 4b217ed9e30f94b6e8e5e262020ef0ceab6113af (diff) | |
download | op-kernel-dev-aa6bf01d391935a8929333bc2e243084ea0c58db.zip op-kernel-dev-aa6bf01d391935a8929333bc2e243084ea0c58db.tar.gz |
xfs: use per-filesystem I/O completion workqueues
The new concurrency managed workqueues are cheap enough that we can create
per-filesystem instead of global workqueues. This allows us to remove the
trylock or defer scheme on the ilock, which is not helpful once we have
outstanding log reservations until finishing a size update.
Also allow the default concurrency on this workqueues so that I/O completions
blocking on the ilock for one inode do not block process for another inode.
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Ben Myers <bpm@sgi.com>
Diffstat (limited to 'fs/xfs/xfs_aops.c')
-rw-r--r-- | fs/xfs/xfs_aops.c | 39 |
1 files changed, 10 insertions, 29 deletions
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 74b9baf..540a0174 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -126,21 +126,15 @@ static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend) /* * Update on-disk file size now that data has been written to disk. - * - * This function does not block as blocking on the inode lock in IO completion - * can lead to IO completion order dependency deadlocks.. If it can't get the - * inode ilock it will return EAGAIN. Callers must handle this. */ -STATIC int +STATIC void xfs_setfilesize( - xfs_ioend_t *ioend) + struct xfs_ioend *ioend) { - xfs_inode_t *ip = XFS_I(ioend->io_inode); + struct xfs_inode *ip = XFS_I(ioend->io_inode); xfs_fsize_t isize; - if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) - return EAGAIN; - + xfs_ilock(ip, XFS_ILOCK_EXCL); isize = xfs_ioend_new_eof(ioend); if (isize) { trace_xfs_setfilesize(ip, ioend->io_offset, ioend->io_size); @@ -149,7 +143,6 @@ xfs_setfilesize( } xfs_iunlock(ip, XFS_ILOCK_EXCL); - return 0; } /* @@ -163,10 +156,12 @@ xfs_finish_ioend( struct xfs_ioend *ioend) { if (atomic_dec_and_test(&ioend->io_remaining)) { + struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount; + if (ioend->io_type == IO_UNWRITTEN) - queue_work(xfsconvertd_workqueue, &ioend->io_work); + queue_work(mp->m_unwritten_workqueue, &ioend->io_work); else if (xfs_ioend_is_append(ioend)) - queue_work(xfsdatad_workqueue, &ioend->io_work); + queue_work(mp->m_data_workqueue, &ioend->io_work); else xfs_destroy_ioend(ioend); } @@ -207,23 +202,9 @@ xfs_end_io( * We might have to update the on-disk file size after extending * writes. */ - error = xfs_setfilesize(ioend); - ASSERT(!error || error == EAGAIN); - + xfs_setfilesize(ioend); done: - /* - * If we didn't complete processing of the ioend, requeue it to the - * tail of the workqueue for another attempt later. Otherwise destroy - * it. - */ - if (error == EAGAIN) { - atomic_inc(&ioend->io_remaining); - xfs_finish_ioend(ioend); - /* ensure we don't spin on blocked ioends */ - delay(1); - } else { - xfs_destroy_ioend(ioend); - } + xfs_destroy_ioend(ioend); } /* |