diff options
author | Kirill A. Shutemov <kirill.shutemov@linux.intel.com> | 2016-04-01 15:29:47 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-04-04 10:41:08 -0700 |
commit | 09cbfeaf1a5a67bfb3201e0c83c810cecb2efa5a (patch) | |
tree | 6cdf210c9c0f981cd22544feeba701892ec19464 /fs/ceph/addr.c | |
parent | c05c2ec96bb8b7310da1055c7b9d786a3ec6dc0c (diff) | |
download | op-kernel-dev-09cbfeaf1a5a67bfb3201e0c83c810cecb2efa5a.zip op-kernel-dev-09cbfeaf1a5a67bfb3201e0c83c810cecb2efa5a.tar.gz |
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/ceph/addr.c')
-rw-r--r-- | fs/ceph/addr.c | 114 |
1 files changed, 57 insertions, 57 deletions
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index fc5cae2..4801571 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -143,7 +143,7 @@ static void ceph_invalidatepage(struct page *page, unsigned int offset, inode = page->mapping->host; ci = ceph_inode(inode); - if (offset != 0 || length != PAGE_CACHE_SIZE) { + if (offset != 0 || length != PAGE_SIZE) { dout("%p invalidatepage %p idx %lu partial dirty page %u~%u\n", inode, page, page->index, offset, length); return; @@ -197,10 +197,10 @@ static int readpage_nounlock(struct file *filp, struct page *page) &ceph_inode_to_client(inode)->client->osdc; int err = 0; u64 off = page_offset(page); - u64 len = PAGE_CACHE_SIZE; + u64 len = PAGE_SIZE; if (off >= i_size_read(inode)) { - zero_user_segment(page, 0, PAGE_CACHE_SIZE); + zero_user_segment(page, 0, PAGE_SIZE); SetPageUptodate(page); return 0; } @@ -212,7 +212,7 @@ static int readpage_nounlock(struct file *filp, struct page *page) */ if (off == 0) return -EINVAL; - zero_user_segment(page, 0, PAGE_CACHE_SIZE); + zero_user_segment(page, 0, PAGE_SIZE); SetPageUptodate(page); return 0; } @@ -234,9 +234,9 @@ static int readpage_nounlock(struct file *filp, struct page *page) ceph_fscache_readpage_cancel(inode, page); goto out; } - if (err < PAGE_CACHE_SIZE) + if (err < PAGE_SIZE) /* zero fill remainder of page */ - zero_user_segment(page, err, PAGE_CACHE_SIZE); + zero_user_segment(page, err, PAGE_SIZE); else flush_dcache_page(page); @@ -278,10 +278,10 @@ static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg) if (rc < 0 && rc != -ENOENT) goto unlock; - if (bytes < (int)PAGE_CACHE_SIZE) { + if (bytes < (int)PAGE_SIZE) { /* zero (remainder of) page */ int s = bytes < 0 ? 0 : bytes; - zero_user_segment(page, s, PAGE_CACHE_SIZE); + zero_user_segment(page, s, PAGE_SIZE); } dout("finish_read %p uptodate %p idx %lu\n", inode, page, page->index); @@ -290,8 +290,8 @@ static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg) ceph_readpage_to_fscache(inode, page); unlock: unlock_page(page); - page_cache_release(page); - bytes -= PAGE_CACHE_SIZE; + put_page(page); + bytes -= PAGE_SIZE; } kfree(osd_data->pages); } @@ -336,7 +336,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max) if (max && nr_pages == max) break; } - len = nr_pages << PAGE_CACHE_SHIFT; + len = nr_pages << PAGE_SHIFT; dout("start_read %p nr_pages %d is %lld~%lld\n", inode, nr_pages, off, len); vino = ceph_vino(inode); @@ -364,7 +364,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max) if (add_to_page_cache_lru(page, &inode->i_data, page->index, GFP_KERNEL)) { ceph_fscache_uncache_page(inode, page); - page_cache_release(page); + put_page(page); dout("start_read %p add_to_page_cache failed %p\n", inode, page); nr_pages = i; @@ -415,8 +415,8 @@ static int ceph_readpages(struct file *file, struct address_space *mapping, if (rc == 0) goto out; - if (fsc->mount_options->rsize >= PAGE_CACHE_SIZE) - max = (fsc->mount_options->rsize + PAGE_CACHE_SIZE - 1) + if (fsc->mount_options->rsize >= PAGE_SIZE) + max = (fsc->mount_options->rsize + PAGE_SIZE - 1) >> PAGE_SHIFT; dout("readpages %p file %p nr_pages %d max %d\n", inode, @@ -484,7 +484,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc) long writeback_stat; u64 truncate_size; u32 truncate_seq; - int err = 0, len = PAGE_CACHE_SIZE; + int err = 0, len = PAGE_SIZE; dout("writepage %p idx %lu\n", page, page->index); @@ -725,9 +725,9 @@ static int ceph_writepages_start(struct address_space *mapping, } if (fsc->mount_options->wsize && fsc->mount_options->wsize < wsize) wsize = fsc->mount_options->wsize; - if (wsize < PAGE_CACHE_SIZE) - wsize = PAGE_CACHE_SIZE; - max_pages_ever = wsize >> PAGE_CACHE_SHIFT; + if (wsize < PAGE_SIZE) + wsize = PAGE_SIZE; + max_pages_ever = wsize >> PAGE_SHIFT; pagevec_init(&pvec, 0); @@ -737,8 +737,8 @@ static int ceph_writepages_start(struct address_space *mapping, end = -1; dout(" cyclic, start at %lu\n", start); } else { - start = wbc->range_start >> PAGE_CACHE_SHIFT; - end = wbc->range_end >> PAGE_CACHE_SHIFT; + start = wbc->range_start >> PAGE_SHIFT; + end = wbc->range_end >> PAGE_SHIFT; if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) range_whole = 1; should_loop = 0; @@ -887,7 +887,7 @@ get_more_pages: num_ops = 1 + do_sync; strip_unit_end = page->index + - ((len - 1) >> PAGE_CACHE_SHIFT); + ((len - 1) >> PAGE_SHIFT); BUG_ON(pages); max_pages = calc_pages_for(0, (u64)len); @@ -901,7 +901,7 @@ get_more_pages: len = 0; } else if (page->index != - (offset + len) >> PAGE_CACHE_SHIFT) { + (offset + len) >> PAGE_SHIFT) { if (num_ops >= (pool ? CEPH_OSD_SLAB_OPS : CEPH_OSD_MAX_OPS)) { redirty_page_for_writepage(wbc, page); @@ -929,7 +929,7 @@ get_more_pages: pages[locked_pages] = page; locked_pages++; - len += PAGE_CACHE_SIZE; + len += PAGE_SIZE; } /* did we get anything? */ @@ -981,7 +981,7 @@ new_request: BUG_ON(IS_ERR(req)); } BUG_ON(len < page_offset(pages[locked_pages - 1]) + - PAGE_CACHE_SIZE - offset); + PAGE_SIZE - offset); req->r_callback = writepages_finish; req->r_inode = inode; @@ -1011,7 +1011,7 @@ new_request: } set_page_writeback(pages[i]); - len += PAGE_CACHE_SIZE; + len += PAGE_SIZE; } if (snap_size != -1) { @@ -1020,7 +1020,7 @@ new_request: /* writepages_finish() clears writeback pages * according to the data length, so make sure * data length covers all locked pages */ - u64 min_len = len + 1 - PAGE_CACHE_SIZE; + u64 min_len = len + 1 - PAGE_SIZE; len = min(len, (u64)i_size_read(inode) - offset); len = max(len, min_len); } @@ -1135,8 +1135,8 @@ static int ceph_update_writeable_page(struct file *file, { struct inode *inode = file_inode(file); struct ceph_inode_info *ci = ceph_inode(inode); - loff_t page_off = pos & PAGE_CACHE_MASK; - int pos_in_page = pos & ~PAGE_CACHE_MASK; + loff_t page_off = pos & PAGE_MASK; + int pos_in_page = pos & ~PAGE_MASK; int end_in_page = pos_in_page + len; loff_t i_size; int r; @@ -1191,7 +1191,7 @@ retry_locked: } /* full page? */ - if (pos_in_page == 0 && len == PAGE_CACHE_SIZE) + if (pos_in_page == 0 && len == PAGE_SIZE) return 0; /* past end of file? */ @@ -1199,12 +1199,12 @@ retry_locked: if (page_off >= i_size || (pos_in_page == 0 && (pos+len) >= i_size && - end_in_page - pos_in_page != PAGE_CACHE_SIZE)) { + end_in_page - pos_in_page != PAGE_SIZE)) { dout(" zeroing %p 0 - %d and %d - %d\n", - page, pos_in_page, end_in_page, (int)PAGE_CACHE_SIZE); + page, pos_in_page, end_in_page, (int)PAGE_SIZE); zero_user_segments(page, 0, pos_in_page, - end_in_page, PAGE_CACHE_SIZE); + end_in_page, PAGE_SIZE); return 0; } @@ -1228,7 +1228,7 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping, { struct inode *inode = file_inode(file); struct page *page; - pgoff_t index = pos >> PAGE_CACHE_SHIFT; + pgoff_t index = pos >> PAGE_SHIFT; int r; do { @@ -1242,7 +1242,7 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping, r = ceph_update_writeable_page(file, pos, len, page); if (r < 0) - page_cache_release(page); + put_page(page); else *pagep = page; } while (r == -EAGAIN); @@ -1259,7 +1259,7 @@ static int ceph_write_end(struct file *file, struct address_space *mapping, struct page *page, void *fsdata) { struct inode *inode = file_inode(file); - unsigned from = pos & (PAGE_CACHE_SIZE - 1); + unsigned from = pos & (PAGE_SIZE - 1); int check_cap = 0; dout("write_end file %p inode %p page %p %d~%d (%d)\n", file, @@ -1279,7 +1279,7 @@ static int ceph_write_end(struct file *file, struct address_space *mapping, set_page_dirty(page); unlock_page(page); - page_cache_release(page); + put_page(page); if (check_cap) ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL); @@ -1322,11 +1322,11 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_file_info *fi = vma->vm_file->private_data; struct page *pinned_page = NULL; - loff_t off = vmf->pgoff << PAGE_CACHE_SHIFT; + loff_t off = vmf->pgoff << PAGE_SHIFT; int want, got, ret; dout("filemap_fault %p %llx.%llx %llu~%zd trying to get caps\n", - inode, ceph_vinop(inode), off, (size_t)PAGE_CACHE_SIZE); + inode, ceph_vinop(inode), off, (size_t)PAGE_SIZE); if (fi->fmode & CEPH_FILE_MODE_LAZY) want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; else @@ -1343,7 +1343,7 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } } dout("filemap_fault %p %llu~%zd got cap refs on %s\n", - inode, off, (size_t)PAGE_CACHE_SIZE, ceph_cap_string(got)); + inode, off, (size_t)PAGE_SIZE, ceph_cap_string(got)); if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) || ci->i_inline_version == CEPH_INLINE_NONE) @@ -1352,16 +1352,16 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ret = -EAGAIN; dout("filemap_fault %p %llu~%zd dropping cap refs on %s ret %d\n", - inode, off, (size_t)PAGE_CACHE_SIZE, ceph_cap_string(got), ret); + inode, off, (size_t)PAGE_SIZE, ceph_cap_string(got), ret); if (pinned_page) - page_cache_release(pinned_page); + put_page(pinned_page); ceph_put_cap_refs(ci, got); if (ret != -EAGAIN) return ret; /* read inline data */ - if (off >= PAGE_CACHE_SIZE) { + if (off >= PAGE_SIZE) { /* does not support inline data > PAGE_SIZE */ ret = VM_FAULT_SIGBUS; } else { @@ -1378,12 +1378,12 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) CEPH_STAT_CAP_INLINE_DATA, true); if (ret1 < 0 || off >= i_size_read(inode)) { unlock_page(page); - page_cache_release(page); + put_page(page); ret = VM_FAULT_SIGBUS; goto out; } - if (ret1 < PAGE_CACHE_SIZE) - zero_user_segment(page, ret1, PAGE_CACHE_SIZE); + if (ret1 < PAGE_SIZE) + zero_user_segment(page, ret1, PAGE_SIZE); else flush_dcache_page(page); SetPageUptodate(page); @@ -1392,7 +1392,7 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } out: dout("filemap_fault %p %llu~%zd read inline data ret %d\n", - inode, off, (size_t)PAGE_CACHE_SIZE, ret); + inode, off, (size_t)PAGE_SIZE, ret); return ret; } @@ -1430,10 +1430,10 @@ static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) } } - if (off + PAGE_CACHE_SIZE <= size) - len = PAGE_CACHE_SIZE; + if (off + PAGE_SIZE <= size) + len = PAGE_SIZE; else - len = size & ~PAGE_CACHE_MASK; + len = size & ~PAGE_MASK; dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n", inode, ceph_vinop(inode), off, len, size); @@ -1519,7 +1519,7 @@ void ceph_fill_inline_data(struct inode *inode, struct page *locked_page, return; if (PageUptodate(page)) { unlock_page(page); - page_cache_release(page); + put_page(page); return; } } @@ -1534,14 +1534,14 @@ void ceph_fill_inline_data(struct inode *inode, struct page *locked_page, } if (page != locked_page) { - if (len < PAGE_CACHE_SIZE) - zero_user_segment(page, len, PAGE_CACHE_SIZE); + if (len < PAGE_SIZE) + zero_user_segment(page, len, PAGE_SIZE); else flush_dcache_page(page); SetPageUptodate(page); unlock_page(page); - page_cache_release(page); + put_page(page); } } @@ -1578,7 +1578,7 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page) from_pagecache = true; lock_page(page); } else { - page_cache_release(page); + put_page(page); page = NULL; } } @@ -1586,8 +1586,8 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page) if (page) { len = i_size_read(inode); - if (len > PAGE_CACHE_SIZE) - len = PAGE_CACHE_SIZE; + if (len > PAGE_SIZE) + len = PAGE_SIZE; } else { page = __page_cache_alloc(GFP_NOFS); if (!page) { @@ -1670,7 +1670,7 @@ out: if (page && page != locked_page) { if (from_pagecache) { unlock_page(page); - page_cache_release(page); + put_page(page); } else __free_pages(page, 0); } |