diff options
author | Hugh Dickins <hughd@google.com> | 2011-06-27 16:18:04 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-06-27 18:00:12 -0700 |
commit | d9d90e5eb70e09903dadff42099b6c948f814050 (patch) | |
tree | c3ab73df6dee61f9403bfd819a6b0cb9f3ca6085 | |
parent | 94c1e62df4494b79782cb9c7279f827212d1de70 (diff) | |
download | op-kernel-dev-d9d90e5eb70e09903dadff42099b6c948f814050.zip op-kernel-dev-d9d90e5eb70e09903dadff42099b6c948f814050.tar.gz |
tmpfs: add shmem_read_mapping_page_gfp
Although it is used (by i915) on nothing but tmpfs, read_cache_page_gfp()
is unsuited to tmpfs, because it inserts a page into pagecache before
calling the filesystem's ->readpage: tmpfs may have pages in swapcache
which only it knows how to locate and switch to filecache.
At present tmpfs provides a ->readpage method, and copes with this by
copying pages; but soon we can simplify it by removing its ->readpage.
Provide shmem_read_mapping_page_gfp() now, ready for that transition,
Export shmem_read_mapping_page_gfp() and add it to list in shmem_fs.h,
with shmem_read_mapping_page() inline for the common mapping_gfp case.
(shmem_read_mapping_page_gfp or shmem_read_cache_page_gfp? Generally the
read_mapping_page functions use the mapping's ->readpage, and the
read_cache_page functions use the supplied filler, so I think
read_cache_page_gfp was slightly misnamed.)
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/shmem_fs.h | 17 | ||||
-rw-r--r-- | mm/shmem.c | 23 |
2 files changed, 33 insertions, 7 deletions
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 22a20af..aa08fa8 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -3,15 +3,9 @@ #include <linux/swap.h> #include <linux/mempolicy.h> +#include <linux/pagemap.h> #include <linux/percpu_counter.h> -struct page; -struct file; -struct inode; -struct super_block; -struct user_struct; -struct vm_area_struct; - /* inode in-kernel data */ #define SHMEM_NR_DIRECT 16 @@ -61,9 +55,18 @@ extern struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags); extern int shmem_zero_setup(struct vm_area_struct *); extern int shmem_lock(struct file *file, int lock, struct user_struct *user); +extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, + pgoff_t index, gfp_t gfp_mask); extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end); extern int shmem_unuse(swp_entry_t entry, struct page *page); extern void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff, struct page **pagep, swp_entry_t *ent); +static inline struct page *shmem_read_mapping_page( + struct address_space *mapping, pgoff_t index) +{ + return shmem_read_mapping_page_gfp(mapping, index, + mapping_gfp_mask(mapping)); +} + #endif @@ -3035,3 +3035,26 @@ int shmem_zero_setup(struct vm_area_struct *vma) vma->vm_flags |= VM_CAN_NONLINEAR; return 0; } + +/** + * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags. + * @mapping: the page's address_space + * @index: the page index + * @gfp: the page allocator flags to use if allocating + * + * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)", + * with any new page allocations done using the specified allocation flags. + * But read_cache_page_gfp() uses the ->readpage() method: which does not + * suit tmpfs, since it may have pages in swapcache, and needs to find those + * for itself; although drivers/gpu/drm i915 and ttm rely upon this support. + * + * Provide a stub for those callers to start using now, then later + * flesh it out to call shmem_getpage() with additional gfp mask, when + * shmem_file_splice_read() is added and shmem_readpage() is removed. + */ +struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, + pgoff_t index, gfp_t gfp) +{ + return read_cache_page_gfp(mapping, index, gfp); +} +EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp); |