summaryrefslogtreecommitdiffstats
path: root/fs/cifs
diff options
context:
space:
mode:
authorSachin Prabhu <sprabhu@redhat.com>2013-09-13 14:11:57 +0100
committerSteve French <smfrench@gmail.com>2013-09-13 16:24:49 -0500
commit466bd31bbda9e1dd2ace1d72c8de5045bf6f3bf6 (patch)
treea9f0d34082a17ed39a237dfb843441766c550a08 /fs/cifs
parenta9e9b7bc15a32ec5b0679704e70f3ffeecfaadd8 (diff)
downloadop-kernel-dev-466bd31bbda9e1dd2ace1d72c8de5045bf6f3bf6.zip
op-kernel-dev-466bd31bbda9e1dd2ace1d72c8de5045bf6f3bf6.tar.gz
cifs: Avoid calling unlock_page() twice in cifs_readpage() when using fscache
When reading a single page with cifs_readpage(), we make a call to fscache_read_or_alloc_page() which once done, asynchronously calls the completion function cifs_readpage_from_fscache_complete(). This completion function unlocks the page once it has been populated from cache. The module then attempts to unlock the page a second time in cifs_readpage() which leads to warning messages. In case of a successful call to fscache_read_or_alloc_page() we should skip the second unlock_page() since this will be called by the cifs_readpage_from_fscache_complete() once the page has been populated by fscache. With the modifications to cifs_readpage_worker(), we will need to re-grab the page lock in cifs_write_begin(). The problem was first noticed when testing new fscache patches for cifs. https://bugzilla.redhat.com/show_bug.cgi?id=1005737 Signed-off-by: Sachin Prabhu <sprabhu@redhat.com> Reviewed-by: Jeff Layton <jlayton@redhat.com> Signed-off-by: Steve French <smfrench@gmail.com>
Diffstat (limited to 'fs/cifs')
-rw-r--r--fs/cifs/file.c10
1 files changed, 7 insertions, 3 deletions
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 5f99ee5..eb955b5 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -3419,6 +3419,7 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
io_error:
kunmap(page);
+ unlock_page(page);
read_complete:
return rc;
@@ -3443,8 +3444,6 @@ static int cifs_readpage(struct file *file, struct page *page)
rc = cifs_readpage_worker(file, page, &offset);
- unlock_page(page);
-
free_xid(xid);
return rc;
}
@@ -3498,6 +3497,7 @@ static int cifs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
+ int oncethru = 0;
pgoff_t index = pos >> PAGE_CACHE_SHIFT;
loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
loff_t page_start = pos & PAGE_MASK;
@@ -3507,6 +3507,7 @@ static int cifs_write_begin(struct file *file, struct address_space *mapping,
cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
+start:
page = grab_cache_page_write_begin(mapping, index, flags);
if (!page) {
rc = -ENOMEM;
@@ -3548,13 +3549,16 @@ static int cifs_write_begin(struct file *file, struct address_space *mapping,
}
}
- if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
+ if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
/*
* might as well read a page, it is fast enough. If we get
* an error, we don't need to return it. cifs_write_end will
* do a sync write instead since PG_uptodate isn't set.
*/
cifs_readpage_worker(file, page, &page_start);
+ page_cache_release(page);
+ oncethru = 1;
+ goto start;
} else {
/* we could try using another file handle if there is one -
but how would we lock it to prevent close of that handle
OpenPOWER on IntegriCloud