summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2007-10-12 11:12:39 +1000
committerLachlan McIlroy <lachlan@redback.melbourne.sgi.com>2008-02-07 18:12:07 +1100
commitc40ea74101ab75a8f320d057e7cf4b772b090110 (patch)
tree047b725cf7347e4111b65edd532cf9b1ee8010d1
parent0771fb4515229821b7d74865b87a430de9fc1113 (diff)
downloadop-kernel-dev-c40ea74101ab75a8f320d057e7cf4b772b090110.zip
op-kernel-dev-c40ea74101ab75a8f320d057e7cf4b772b090110.tar.gz
[XFS] kill superflous buffer locking
There is no need to lock any page in xfs_buf.c because we operate on our own address_space and all locking is covered by the buffer semaphore. If we ever switch back to main blockdeive address_space as suggested e.g. for fsblock with a similar scheme the locking will have to be totally revised anyway because the current scheme is neither correct nor coherent with itself. SGI-PV: 971186 SGI-Modid: xfs-linux-melb:xfs-kern:29845a Signed-off-by: Christoph Hellwig <hch@infradead.org> Signed-off-by: Lachlan McIlroy <lachlan@sgi.com> Signed-off-by: Tim Shimmin <tes@sgi.com>
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c53
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.h1
2 files changed, 5 insertions, 49 deletions
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 522cfaa..a7c7cb2 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -409,6 +409,7 @@ _xfs_buf_lookup_pages(
congestion_wait(WRITE, HZ/50);
goto retry;
}
+ unlock_page(page);
XFS_STATS_INC(xb_page_found);
@@ -418,10 +419,7 @@ _xfs_buf_lookup_pages(
ASSERT(!PagePrivate(page));
if (!PageUptodate(page)) {
page_count--;
- if (blocksize >= PAGE_CACHE_SIZE) {
- if (flags & XBF_READ)
- bp->b_locked = 1;
- } else if (!PagePrivate(page)) {
+ if (blocksize < PAGE_CACHE_SIZE && !PagePrivate(page)) {
if (test_page_region(page, offset, nbytes))
page_count++;
}
@@ -431,11 +429,6 @@ _xfs_buf_lookup_pages(
offset = 0;
}
- if (!bp->b_locked) {
- for (i = 0; i < bp->b_page_count; i++)
- unlock_page(bp->b_pages[i]);
- }
-
if (page_count == bp->b_page_count)
bp->b_flags |= XBF_DONE;
@@ -752,7 +745,6 @@ xfs_buf_associate_memory(
bp->b_pages[i] = mem_to_page((void *)pageaddr);
pageaddr += PAGE_CACHE_SIZE;
}
- bp->b_locked = 0;
bp->b_count_desired = len;
bp->b_buffer_length = buflen;
@@ -1099,25 +1091,13 @@ xfs_buf_iostart(
return status;
}
-STATIC_INLINE int
-_xfs_buf_iolocked(
- xfs_buf_t *bp)
-{
- ASSERT(bp->b_flags & (XBF_READ | XBF_WRITE));
- if (bp->b_flags & XBF_READ)
- return bp->b_locked;
- return 0;
-}
-
STATIC_INLINE void
_xfs_buf_ioend(
xfs_buf_t *bp,
int schedule)
{
- if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
- bp->b_locked = 0;
+ if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
xfs_buf_ioend(bp, schedule);
- }
}
STATIC void
@@ -1148,10 +1128,6 @@ xfs_buf_bio_end_io(
if (--bvec >= bio->bi_io_vec)
prefetchw(&bvec->bv_page->flags);
-
- if (_xfs_buf_iolocked(bp)) {
- unlock_page(page);
- }
} while (bvec >= bio->bi_io_vec);
_xfs_buf_ioend(bp, 1);
@@ -1162,13 +1138,12 @@ STATIC void
_xfs_buf_ioapply(
xfs_buf_t *bp)
{
- int i, rw, map_i, total_nr_pages, nr_pages;
+ int rw, map_i, total_nr_pages, nr_pages;
struct bio *bio;
int offset = bp->b_offset;
int size = bp->b_count_desired;
sector_t sector = bp->b_bn;
unsigned int blocksize = bp->b_target->bt_bsize;
- int locking = _xfs_buf_iolocked(bp);
total_nr_pages = bp->b_page_count;
map_i = 0;
@@ -1191,7 +1166,7 @@ _xfs_buf_ioapply(
* filesystem block size is not smaller than the page size.
*/
if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
- (bp->b_flags & XBF_READ) && locking &&
+ (bp->b_flags & XBF_READ) &&
(blocksize >= PAGE_CACHE_SIZE)) {
bio = bio_alloc(GFP_NOIO, 1);
@@ -1208,24 +1183,6 @@ _xfs_buf_ioapply(
goto submit_io;
}
- /* Lock down the pages which we need to for the request */
- if (locking && (bp->b_flags & XBF_WRITE) && (bp->b_locked == 0)) {
- for (i = 0; size; i++) {
- int nbytes = PAGE_CACHE_SIZE - offset;
- struct page *page = bp->b_pages[i];
-
- if (nbytes > size)
- nbytes = size;
-
- lock_page(page);
-
- size -= nbytes;
- offset = 0;
- }
- offset = bp->b_offset;
- size = bp->b_count_desired;
- }
-
next_chunk:
atomic_inc(&bp->b_io_remaining);
nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index b5908a3..a3d207d 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -143,7 +143,6 @@ typedef struct xfs_buf {
void *b_fspriv2;
void *b_fspriv3;
unsigned short b_error; /* error code on I/O */
- unsigned short b_locked; /* page array is locked */
unsigned int b_page_count; /* size of page array */
unsigned int b_offset; /* page offset in first page */
struct page **b_pages; /* array of page pointers */
OpenPOWER on IntegriCloud