diff options
Diffstat (limited to 'fs')
557 files changed, 13165 insertions, 8145 deletions
diff --git a/fs/9p/acl.c b/fs/9p/acl.c index 8482f2d..31c0103 100644 --- a/fs/9p/acl.c +++ b/fs/9p/acl.c @@ -247,7 +247,7 @@ static int v9fs_xattr_get_acl(struct dentry *dentry, const char *name, if ((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT) return v9fs_remote_get_acl(dentry, name, buffer, size, type); - acl = v9fs_get_cached_acl(dentry->d_inode, type); + acl = v9fs_get_cached_acl(d_inode(dentry), type); if (IS_ERR(acl)) return PTR_ERR(acl); if (acl == NULL) @@ -285,7 +285,7 @@ static int v9fs_xattr_set_acl(struct dentry *dentry, const char *name, int retval; struct posix_acl *acl; struct v9fs_session_info *v9ses; - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); if (strcmp(name, "") != 0) return -EINVAL; diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h index 099c771..fb9ffcb 100644 --- a/fs/9p/v9fs.h +++ b/fs/9p/v9fs.h @@ -78,7 +78,6 @@ enum p9_cache_modes { * @cache: cache mode of type &p9_cache_modes * @cachetag: the tag of the cache associated with this session * @fscache: session cookie associated with FS-Cache - * @options: copy of options string given by user * @uname: string user name to mount hierarchy as * @aname: mount specifier for remote hierarchy * @maxdata: maximum data to be sent/recvd per protocol message diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h index b83ebfb..5a0db6d 100644 --- a/fs/9p/v9fs_vfs.h +++ b/fs/9p/v9fs_vfs.h @@ -68,14 +68,10 @@ int v9fs_file_open(struct inode *inode, struct file *file); void v9fs_inode2stat(struct inode *inode, struct p9_wstat *stat); int v9fs_uflags2omode(int uflags, int extended); -ssize_t v9fs_file_readn(struct file *, char *, char __user *, u32, u64); -ssize_t v9fs_fid_readn(struct p9_fid *, char *, char __user *, u32, u64); void v9fs_blank_wstat(struct p9_wstat *wstat); int v9fs_vfs_setattr_dotl(struct dentry *, struct iattr *); int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end, int datasync); -ssize_t v9fs_file_write_internal(struct inode *, struct p9_fid *, - const char __user *, size_t, loff_t *, int); int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode); int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode); static inline void v9fs_invalidate_inode_attr(struct inode *inode) diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c index eb14e05..e9e0437 100644 --- a/fs/9p/vfs_addr.c +++ b/fs/9p/vfs_addr.c @@ -33,7 +33,7 @@ #include <linux/pagemap.h> #include <linux/idr.h> #include <linux/sched.h> -#include <linux/aio.h> +#include <linux/uio.h> #include <net/9p/9p.h> #include <net/9p/client.h> @@ -51,12 +51,11 @@ */ static int v9fs_fid_readpage(struct p9_fid *fid, struct page *page) { - int retval; - loff_t offset; - char *buffer; - struct inode *inode; + struct inode *inode = page->mapping->host; + struct bio_vec bvec = {.bv_page = page, .bv_len = PAGE_SIZE}; + struct iov_iter to; + int retval, err; - inode = page->mapping->host; p9_debug(P9_DEBUG_VFS, "\n"); BUG_ON(!PageLocked(page)); @@ -65,16 +64,16 @@ static int v9fs_fid_readpage(struct p9_fid *fid, struct page *page) if (retval == 0) return retval; - buffer = kmap(page); - offset = page_offset(page); + iov_iter_bvec(&to, ITER_BVEC | READ, &bvec, 1, PAGE_SIZE); - retval = v9fs_fid_readn(fid, buffer, NULL, PAGE_CACHE_SIZE, offset); - if (retval < 0) { + retval = p9_client_read(fid, page_offset(page), &to, &err); + if (err) { v9fs_uncache_page(inode, page); + retval = err; goto done; } - memset(buffer + retval, 0, PAGE_CACHE_SIZE - retval); + zero_user(page, retval, PAGE_SIZE - retval); flush_dcache_page(page); SetPageUptodate(page); @@ -82,7 +81,6 @@ static int v9fs_fid_readpage(struct p9_fid *fid, struct page *page) retval = 0; done: - kunmap(page); unlock_page(page); return retval; } @@ -161,41 +159,32 @@ static void v9fs_invalidate_page(struct page *page, unsigned int offset, static int v9fs_vfs_writepage_locked(struct page *page) { - char *buffer; - int retval, len; - loff_t offset, size; - mm_segment_t old_fs; - struct v9fs_inode *v9inode; struct inode *inode = page->mapping->host; + struct v9fs_inode *v9inode = V9FS_I(inode); + loff_t size = i_size_read(inode); + struct iov_iter from; + struct bio_vec bvec; + int err, len; - v9inode = V9FS_I(inode); - size = i_size_read(inode); if (page->index == size >> PAGE_CACHE_SHIFT) len = size & ~PAGE_CACHE_MASK; else len = PAGE_CACHE_SIZE; - set_page_writeback(page); - - buffer = kmap(page); - offset = page_offset(page); + bvec.bv_page = page; + bvec.bv_offset = 0; + bvec.bv_len = len; + iov_iter_bvec(&from, ITER_BVEC | WRITE, &bvec, 1, len); - old_fs = get_fs(); - set_fs(get_ds()); /* We should have writeback_fid always set */ BUG_ON(!v9inode->writeback_fid); - retval = v9fs_file_write_internal(inode, - v9inode->writeback_fid, - (__force const char __user *)buffer, - len, &offset, 0); - if (retval > 0) - retval = 0; + set_page_writeback(page); + + p9_client_write(v9inode->writeback_fid, page_offset(page), &from, &err); - set_fs(old_fs); - kunmap(page); end_page_writeback(page); - return retval; + return err; } static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc) @@ -241,11 +230,8 @@ static int v9fs_launder_page(struct page *page) /** * v9fs_direct_IO - 9P address space operation for direct I/O - * @rw: direction (read or write) * @iocb: target I/O control block - * @iov: array of vectors that define I/O buffer * @pos: offset in file to begin the operation - * @nr_segs: size of iovec array * * The presence of v9fs_direct_IO() in the address space ops vector * allowes open() O_DIRECT flags which would have failed otherwise. @@ -259,18 +245,23 @@ static int v9fs_launder_page(struct page *page) * */ static ssize_t -v9fs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t pos) +v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t pos) { - /* - * FIXME - * Now that we do caching with cache mode enabled, We need - * to support direct IO - */ - p9_debug(P9_DEBUG_VFS, "v9fs_direct_IO: v9fs_direct_IO (%pD) off/no(%lld/%lu) EINVAL\n", - iocb->ki_filp, - (long long)pos, iter->nr_segs); - - return -EINVAL; + struct file *file = iocb->ki_filp; + ssize_t n; + int err = 0; + if (iov_iter_rw(iter) == WRITE) { + n = p9_client_write(file->private_data, pos, iter, &err); + if (n) { + struct inode *inode = file_inode(file); + loff_t i_size = i_size_read(inode); + if (pos + n > i_size) + inode_add_bytes(inode, pos + n - i_size); + } + } else { + n = p9_client_read(file->private_data, pos, iter, &err); + } + return n ? n : err; } static int v9fs_write_begin(struct file *filp, struct address_space *mapping, diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c index a345b2d..bd456c6 100644 --- a/fs/9p/vfs_dentry.c +++ b/fs/9p/vfs_dentry.c @@ -53,7 +53,7 @@ static int v9fs_cached_dentry_delete(const struct dentry *dentry) dentry, dentry); /* Don't cache negative dentries */ - if (!dentry->d_inode) + if (d_really_is_negative(dentry)) return 1; return 0; } @@ -83,7 +83,7 @@ static int v9fs_lookup_revalidate(struct dentry *dentry, unsigned int flags) if (flags & LOOKUP_RCU) return -ECHILD; - inode = dentry->d_inode; + inode = d_inode(dentry); if (!inode) goto out_valid; diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c index 4f11510..5cc00e5 100644 --- a/fs/9p/vfs_dir.c +++ b/fs/9p/vfs_dir.c @@ -33,6 +33,7 @@ #include <linux/inet.h> #include <linux/idr.h> #include <linux/slab.h> +#include <linux/uio.h> #include <net/9p/9p.h> #include <net/9p/client.h> @@ -115,6 +116,7 @@ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx) int buflen; int reclen = 0; struct p9_rdir *rdir; + struct kvec kvec; p9_debug(P9_DEBUG_VFS, "name %pD\n", file); fid = file->private_data; @@ -124,16 +126,23 @@ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx) rdir = v9fs_alloc_rdir_buf(file, buflen); if (!rdir) return -ENOMEM; + kvec.iov_base = rdir->buf; + kvec.iov_len = buflen; while (1) { if (rdir->tail == rdir->head) { - err = v9fs_file_readn(file, rdir->buf, NULL, - buflen, ctx->pos); - if (err <= 0) + struct iov_iter to; + int n; + iov_iter_kvec(&to, READ | ITER_KVEC, &kvec, 1, buflen); + n = p9_client_read(file->private_data, ctx->pos, &to, + &err); + if (err) return err; + if (n == 0) + return 0; rdir->head = 0; - rdir->tail = err; + rdir->tail = n; } while (rdir->head < rdir->tail) { p9stat_init(&st); diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c index b401337..1ef16bd 100644 --- a/fs/9p/vfs_file.c +++ b/fs/9p/vfs_file.c @@ -36,6 +36,8 @@ #include <linux/utsname.h> #include <asm/uaccess.h> #include <linux/idr.h> +#include <linux/uio.h> +#include <linux/slab.h> #include <net/9p/9p.h> #include <net/9p/client.h> @@ -149,7 +151,7 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl) { struct p9_flock flock; struct p9_fid *fid; - uint8_t status; + uint8_t status = P9_LOCK_ERROR; int res = 0; unsigned char fl_type; @@ -194,7 +196,7 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl) for (;;) { res = p9_client_lock_dotl(fid, &flock, &status); if (res < 0) - break; + goto out_unlock; if (status != P9_LOCK_BLOCKED) break; @@ -212,14 +214,16 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl) case P9_LOCK_BLOCKED: res = -EAGAIN; break; + default: + WARN_ONCE(1, "unknown lock status code: %d\n", status); + /* fallthough */ case P9_LOCK_ERROR: case P9_LOCK_GRACE: res = -ENOLCK; break; - default: - BUG(); } +out_unlock: /* * incase server returned error for lock request, revert * it locally @@ -285,6 +289,7 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl) fl->fl_end = glock.start + glock.length - 1; fl->fl_pid = glock.proc_id; } + kfree(glock.client_id); return res; } @@ -364,63 +369,6 @@ out_err: } /** - * v9fs_fid_readn - read from a fid - * @fid: fid to read - * @data: data buffer to read data into - * @udata: user data buffer to read data into - * @count: size of buffer - * @offset: offset at which to read data - * - */ -ssize_t -v9fs_fid_readn(struct p9_fid *fid, char *data, char __user *udata, u32 count, - u64 offset) -{ - int n, total, size; - - p9_debug(P9_DEBUG_VFS, "fid %d offset %llu count %d\n", - fid->fid, (long long unsigned)offset, count); - n = 0; - total = 0; - size = fid->iounit ? fid->iounit : fid->clnt->msize - P9_IOHDRSZ; - do { - n = p9_client_read(fid, data, udata, offset, count); - if (n <= 0) - break; - - if (data) - data += n; - if (udata) - udata += n; - - offset += n; - count -= n; - total += n; - } while (count > 0 && n == size); - - if (n < 0) - total = n; - - return total; -} - -/** - * v9fs_file_readn - read from a file - * @filp: file pointer to read - * @data: data buffer to read data into - * @udata: user data buffer to read data into - * @count: size of buffer - * @offset: offset at which to read data - * - */ -ssize_t -v9fs_file_readn(struct file *filp, char *data, char __user *udata, u32 count, - u64 offset) -{ - return v9fs_fid_readn(filp->private_data, data, udata, count, offset); -} - -/** * v9fs_file_read - read from a file * @filp: file pointer to read * @udata: user data buffer to read data into @@ -430,69 +378,22 @@ v9fs_file_readn(struct file *filp, char *data, char __user *udata, u32 count, */ static ssize_t -v9fs_file_read(struct file *filp, char __user *udata, size_t count, - loff_t * offset) +v9fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to) { - int ret; - struct p9_fid *fid; - size_t size; - - p9_debug(P9_DEBUG_VFS, "count %zu offset %lld\n", count, *offset); - fid = filp->private_data; + struct p9_fid *fid = iocb->ki_filp->private_data; + int ret, err; - size = fid->iounit ? fid->iounit : fid->clnt->msize - P9_IOHDRSZ; - if (count > size) - ret = v9fs_file_readn(filp, NULL, udata, count, *offset); - else - ret = p9_client_read(fid, NULL, udata, *offset, count); + p9_debug(P9_DEBUG_VFS, "count %zu offset %lld\n", + iov_iter_count(to), iocb->ki_pos); - if (ret > 0) - *offset += ret; + ret = p9_client_read(fid, iocb->ki_pos, to, &err); + if (!ret) + return err; + iocb->ki_pos += ret; return ret; } -ssize_t -v9fs_file_write_internal(struct inode *inode, struct p9_fid *fid, - const char __user *data, size_t count, - loff_t *offset, int invalidate) -{ - int n; - loff_t i_size; - size_t total = 0; - loff_t origin = *offset; - unsigned long pg_start, pg_end; - - p9_debug(P9_DEBUG_VFS, "data %p count %d offset %x\n", - data, (int)count, (int)*offset); - - do { - n = p9_client_write(fid, NULL, data+total, origin+total, count); - if (n <= 0) - break; - count -= n; - total += n; - } while (count > 0); - - if (invalidate && (total > 0)) { - pg_start = origin >> PAGE_CACHE_SHIFT; - pg_end = (origin + total - 1) >> PAGE_CACHE_SHIFT; - if (inode->i_mapping && inode->i_mapping->nrpages) - invalidate_inode_pages2_range(inode->i_mapping, - pg_start, pg_end); - *offset += total; - i_size = i_size_read(inode); - if (*offset > i_size) { - inode_add_bytes(inode, *offset - i_size); - i_size_write(inode, *offset); - } - } - if (n < 0) - return n; - - return total; -} - /** * v9fs_file_write - write to a file * @filp: file pointer to write @@ -502,35 +403,39 @@ v9fs_file_write_internal(struct inode *inode, struct p9_fid *fid, * */ static ssize_t -v9fs_file_write(struct file *filp, const char __user * data, - size_t count, loff_t *offset) +v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) { - ssize_t retval = 0; - loff_t origin = *offset; - - - retval = generic_write_checks(filp, &origin, &count, 0); - if (retval) - goto out; + struct file *file = iocb->ki_filp; + ssize_t retval; + loff_t origin; + int err = 0; - retval = -EINVAL; - if ((ssize_t) count < 0) - goto out; - retval = 0; - if (!count) - goto out; + retval = generic_write_checks(iocb, from); + if (retval <= 0) + return retval; - retval = v9fs_file_write_internal(file_inode(filp), - filp->private_data, - data, count, &origin, 1); - /* update offset on successful write */ - if (retval > 0) - *offset = origin; -out: - return retval; + origin = iocb->ki_pos; + retval = p9_client_write(file->private_data, iocb->ki_pos, from, &err); + if (retval > 0) { + struct inode *inode = file_inode(file); + loff_t i_size; + unsigned long pg_start, pg_end; + pg_start = origin >> PAGE_CACHE_SHIFT; + pg_end = (origin + retval - 1) >> PAGE_CACHE_SHIFT; + if (inode->i_mapping && inode->i_mapping->nrpages) + invalidate_inode_pages2_range(inode->i_mapping, + pg_start, pg_end); + iocb->ki_pos += retval; + i_size = i_size_read(inode); + if (iocb->ki_pos > i_size) { + inode_add_bytes(inode, iocb->ki_pos - i_size); + i_size_write(inode, iocb->ki_pos); + } + return retval; + } + return err; } - static int v9fs_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync) { @@ -657,44 +562,6 @@ out_unlock: return VM_FAULT_NOPAGE; } -static ssize_t -v9fs_direct_read(struct file *filp, char __user *udata, size_t count, - loff_t *offsetp) -{ - loff_t size, offset; - struct inode *inode; - struct address_space *mapping; - - offset = *offsetp; - mapping = filp->f_mapping; - inode = mapping->host; - if (!count) - return 0; - size = i_size_read(inode); - if (offset < size) - filemap_write_and_wait_range(mapping, offset, - offset + count - 1); - - return v9fs_file_read(filp, udata, count, offsetp); -} - -/** - * v9fs_cached_file_read - read from a file - * @filp: file pointer to read - * @data: user data buffer to read data into - * @count: size of buffer - * @offset: offset at which to read data - * - */ -static ssize_t -v9fs_cached_file_read(struct file *filp, char __user *data, size_t count, - loff_t *offset) -{ - if (filp->f_flags & O_DIRECT) - return v9fs_direct_read(filp, data, count, offset); - return new_sync_read(filp, data, count, offset); -} - /** * v9fs_mmap_file_read - read from a file * @filp: file pointer to read @@ -704,84 +571,12 @@ v9fs_cached_file_read(struct file *filp, char __user *data, size_t count, * */ static ssize_t -v9fs_mmap_file_read(struct file *filp, char __user *data, size_t count, - loff_t *offset) +v9fs_mmap_file_read_iter(struct kiocb *iocb, struct iov_iter *to) { /* TODO: Check if there are dirty pages */ - return v9fs_file_read(filp, data, count, offset); -} - -static ssize_t -v9fs_direct_write(struct file *filp, const char __user * data, - size_t count, loff_t *offsetp) -{ - loff_t offset; - ssize_t retval; - struct inode *inode; - struct address_space *mapping; - - offset = *offsetp; - mapping = filp->f_mapping; - inode = mapping->host; - if (!count) - return 0; - - mutex_lock(&inode->i_mutex); - retval = filemap_write_and_wait_range(mapping, offset, - offset + count - 1); - if (retval) - goto err_out; - /* - * After a write we want buffered reads to be sure to go to disk to get - * the new data. We invalidate clean cached page from the region we're - * about to write. We do this *before* the write so that if we fail - * here we fall back to buffered write - */ - if (mapping->nrpages) { - pgoff_t pg_start = offset >> PAGE_CACHE_SHIFT; - pgoff_t pg_end = (offset + count - 1) >> PAGE_CACHE_SHIFT; - - retval = invalidate_inode_pages2_range(mapping, - pg_start, pg_end); - /* - * If a page can not be invalidated, fall back - * to buffered write. - */ - if (retval) { - if (retval == -EBUSY) - goto buff_write; - goto err_out; - } - } - retval = v9fs_file_write(filp, data, count, offsetp); -err_out: - mutex_unlock(&inode->i_mutex); - return retval; - -buff_write: - mutex_unlock(&inode->i_mutex); - return new_sync_write(filp, data, count, offsetp); -} - -/** - * v9fs_cached_file_write - write to a file - * @filp: file pointer to write - * @data: data buffer to write data from - * @count: size of buffer - * @offset: offset at which to write data - * - */ -static ssize_t -v9fs_cached_file_write(struct file *filp, const char __user * data, - size_t count, loff_t *offset) -{ - - if (filp->f_flags & O_DIRECT) - return v9fs_direct_write(filp, data, count, offset); - return new_sync_write(filp, data, count, offset); + return v9fs_file_read_iter(iocb, to); } - /** * v9fs_mmap_file_write - write to a file * @filp: file pointer to write @@ -791,14 +586,13 @@ v9fs_cached_file_write(struct file *filp, const char __user * data, * */ static ssize_t -v9fs_mmap_file_write(struct file *filp, const char __user *data, - size_t count, loff_t *offset) +v9fs_mmap_file_write_iter(struct kiocb *iocb, struct iov_iter *from) { /* * TODO: invalidate mmaps on filp's inode between * offset and offset+count */ - return v9fs_file_write(filp, data, count, offset); + return v9fs_file_write_iter(iocb, from); } static void v9fs_mmap_vm_close(struct vm_area_struct *vma) @@ -843,8 +637,6 @@ static const struct vm_operations_struct v9fs_mmap_file_vm_ops = { const struct file_operations v9fs_cached_file_operations = { .llseek = generic_file_llseek, - .read = v9fs_cached_file_read, - .write = v9fs_cached_file_write, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, .open = v9fs_file_open, @@ -856,8 +648,6 @@ const struct file_operations v9fs_cached_file_operations = { const struct file_operations v9fs_cached_file_operations_dotl = { .llseek = generic_file_llseek, - .read = v9fs_cached_file_read, - .write = v9fs_cached_file_write, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, .open = v9fs_file_open, @@ -870,8 +660,8 @@ const struct file_operations v9fs_cached_file_operations_dotl = { const struct file_operations v9fs_file_operations = { .llseek = generic_file_llseek, - .read = v9fs_file_read, - .write = v9fs_file_write, + .read_iter = v9fs_file_read_iter, + .write_iter = v9fs_file_write_iter, .open = v9fs_file_open, .release = v9fs_dir_release, .lock = v9fs_file_lock, @@ -881,8 +671,8 @@ const struct file_operations v9fs_file_operations = { const struct file_operations v9fs_file_operations_dotl = { .llseek = generic_file_llseek, - .read = v9fs_file_read, - .write = v9fs_file_write, + .read_iter = v9fs_file_read_iter, + .write_iter = v9fs_file_write_iter, .open = v9fs_file_open, .release = v9fs_dir_release, .lock = v9fs_file_lock_dotl, @@ -893,8 +683,8 @@ const struct file_operations v9fs_file_operations_dotl = { const struct file_operations v9fs_mmap_file_operations = { .llseek = generic_file_llseek, - .read = v9fs_mmap_file_read, - .write = v9fs_mmap_file_write, + .read_iter = v9fs_mmap_file_read_iter, + .write_iter = v9fs_mmap_file_write_iter, .open = v9fs_file_open, .release = v9fs_dir_release, .lock = v9fs_file_lock, @@ -904,8 +694,8 @@ const struct file_operations v9fs_mmap_file_operations = { const struct file_operations v9fs_mmap_file_operations_dotl = { .llseek = generic_file_llseek, - .read = v9fs_mmap_file_read, - .write = v9fs_mmap_file_write, + .read_iter = v9fs_mmap_file_read_iter, + .write_iter = v9fs_mmap_file_write_iter, .open = v9fs_file_open, .release = v9fs_dir_release, .lock = v9fs_file_lock_dotl, diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 3662f1d..703342e 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -595,7 +595,7 @@ static int v9fs_remove(struct inode *dir, struct dentry *dentry, int flags) dir, dentry, flags); v9ses = v9fs_inode2v9ses(dir); - inode = dentry->d_inode; + inode = d_inode(dentry); dfid = v9fs_fid_lookup(dentry->d_parent); if (IS_ERR(dfid)) { retval = PTR_ERR(dfid); @@ -864,7 +864,7 @@ v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry, } /* Only creates */ - if (!(flags & O_CREAT) || dentry->d_inode) + if (!(flags & O_CREAT) || d_really_is_positive(dentry)) return finish_no_open(file, res); err = 0; @@ -881,7 +881,7 @@ v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry, } v9fs_invalidate_inode_attr(dir); - v9inode = V9FS_I(dentry->d_inode); + v9inode = V9FS_I(d_inode(dentry)); mutex_lock(&v9inode->v_mutex); if ((v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) && !v9inode->writeback_fid && @@ -908,7 +908,7 @@ v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry, file->private_data = fid; if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) - v9fs_cache_inode_set_cookie(dentry->d_inode, file); + v9fs_cache_inode_set_cookie(d_inode(dentry), file); *opened |= FILE_CREATED; out: @@ -969,8 +969,8 @@ v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry, p9_debug(P9_DEBUG_VFS, "\n"); retval = 0; - old_inode = old_dentry->d_inode; - new_inode = new_dentry->d_inode; + old_inode = d_inode(old_dentry); + new_inode = d_inode(new_dentry); v9ses = v9fs_inode2v9ses(old_inode); oldfid = v9fs_fid_lookup(old_dentry); if (IS_ERR(oldfid)) @@ -1061,7 +1061,7 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, p9_debug(P9_DEBUG_VFS, "dentry: %p\n", dentry); v9ses = v9fs_dentry2v9ses(dentry); if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { - generic_fillattr(dentry->d_inode, stat); + generic_fillattr(d_inode(dentry), stat); return 0; } fid = v9fs_fid_lookup(dentry); @@ -1072,8 +1072,8 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, if (IS_ERR(st)) return PTR_ERR(st); - v9fs_stat2inode(st, dentry->d_inode, dentry->d_inode->i_sb); - generic_fillattr(dentry->d_inode, stat); + v9fs_stat2inode(st, d_inode(dentry), d_inode(dentry)->i_sb); + generic_fillattr(d_inode(dentry), stat); p9stat_free(st); kfree(st); @@ -1095,7 +1095,7 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr) struct p9_wstat wstat; p9_debug(P9_DEBUG_VFS, "\n"); - retval = inode_change_ok(dentry->d_inode, iattr); + retval = inode_change_ok(d_inode(dentry), iattr); if (retval) return retval; @@ -1128,20 +1128,20 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr) /* Write all dirty data */ if (d_is_reg(dentry)) - filemap_write_and_wait(dentry->d_inode->i_mapping); + filemap_write_and_wait(d_inode(dentry)->i_mapping); retval = p9_client_wstat(fid, &wstat); if (retval < 0) return retval; if ((iattr->ia_valid & ATTR_SIZE) && - iattr->ia_size != i_size_read(dentry->d_inode)) - truncate_setsize(dentry->d_inode, iattr->ia_size); + iattr->ia_size != i_size_read(d_inode(dentry))) + truncate_setsize(d_inode(dentry), iattr->ia_size); - v9fs_invalidate_inode_attr(dentry->d_inode); + v9fs_invalidate_inode_attr(d_inode(dentry)); - setattr_copy(dentry->d_inode, iattr); - mark_inode_dirty(dentry->d_inode); + setattr_copy(d_inode(dentry), iattr); + mark_inode_dirty(d_inode(dentry)); return 0; } @@ -1403,7 +1403,7 @@ v9fs_vfs_link(struct dentry *old_dentry, struct inode *dir, retval = v9fs_vfs_mkspecial(dir, dentry, P9_DMLINK, name); __putname(name); if (!retval) { - v9fs_refresh_inode(oldfid, old_dentry->d_inode); + v9fs_refresh_inode(oldfid, d_inode(old_dentry)); v9fs_invalidate_inode_attr(dir); } clunk_fid: diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c index 6054c16b..9861c7c 100644 --- a/fs/9p/vfs_inode_dotl.c +++ b/fs/9p/vfs_inode_dotl.c @@ -265,7 +265,7 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry, } /* Only creates */ - if (!(flags & O_CREAT) || dentry->d_inode) + if (!(flags & O_CREAT) || d_really_is_positive(dentry)) return finish_no_open(file, res); v9ses = v9fs_inode2v9ses(dir); @@ -481,7 +481,7 @@ v9fs_vfs_getattr_dotl(struct vfsmount *mnt, struct dentry *dentry, p9_debug(P9_DEBUG_VFS, "dentry: %p\n", dentry); v9ses = v9fs_dentry2v9ses(dentry); if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { - generic_fillattr(dentry->d_inode, stat); + generic_fillattr(d_inode(dentry), stat); return 0; } fid = v9fs_fid_lookup(dentry); @@ -496,8 +496,8 @@ v9fs_vfs_getattr_dotl(struct vfsmount *mnt, struct dentry *dentry, if (IS_ERR(st)) return PTR_ERR(st); - v9fs_stat2inode_dotl(st, dentry->d_inode); - generic_fillattr(dentry->d_inode, stat); + v9fs_stat2inode_dotl(st, d_inode(dentry)); + generic_fillattr(d_inode(dentry), stat); /* Change block size to what the server returned */ stat->blksize = st->st_blksize; @@ -557,7 +557,7 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr) int retval; struct p9_fid *fid; struct p9_iattr_dotl p9attr; - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); p9_debug(P9_DEBUG_VFS, "\n"); @@ -795,10 +795,10 @@ v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir, if (IS_ERR(fid)) return PTR_ERR(fid); - v9fs_refresh_inode_dotl(fid, old_dentry->d_inode); + v9fs_refresh_inode_dotl(fid, d_inode(old_dentry)); } - ihold(old_dentry->d_inode); - d_instantiate(dentry, old_dentry->d_inode); + ihold(d_inode(old_dentry)); + d_instantiate(dentry, d_inode(old_dentry)); return err; } diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c index 0afd038..e99a338 100644 --- a/fs/9p/vfs_super.c +++ b/fs/9p/vfs_super.c @@ -168,8 +168,8 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags, retval = PTR_ERR(st); goto release_sb; } - root->d_inode->i_ino = v9fs_qid2ino(&st->qid); - v9fs_stat2inode_dotl(st, root->d_inode); + d_inode(root)->i_ino = v9fs_qid2ino(&st->qid); + v9fs_stat2inode_dotl(st, d_inode(root)); kfree(st); } else { struct p9_wstat *st = NULL; @@ -179,8 +179,8 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags, goto release_sb; } - root->d_inode->i_ino = v9fs_qid2ino(&st->qid); - v9fs_stat2inode(st, root->d_inode, sb); + d_inode(root)->i_ino = v9fs_qid2ino(&st->qid); + v9fs_stat2inode(st, d_inode(root), sb); p9stat_free(st); kfree(st); diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c index f95e01e..0cf44b6 100644 --- a/fs/9p/xattr.c +++ b/fs/9p/xattr.c @@ -15,6 +15,7 @@ #include <linux/module.h> #include <linux/fs.h> #include <linux/sched.h> +#include <linux/uio.h> #include <net/9p/9p.h> #include <net/9p/client.h> @@ -25,50 +26,34 @@ ssize_t v9fs_fid_xattr_get(struct p9_fid *fid, const char *name, void *buffer, size_t buffer_size) { ssize_t retval; - int msize, read_count; - u64 offset = 0, attr_size; + u64 attr_size; struct p9_fid *attr_fid; + struct kvec kvec = {.iov_base = buffer, .iov_len = buffer_size}; + struct iov_iter to; + int err; + + iov_iter_kvec(&to, READ | ITER_KVEC, &kvec, 1, buffer_size); attr_fid = p9_client_xattrwalk(fid, name, &attr_size); if (IS_ERR(attr_fid)) { retval = PTR_ERR(attr_fid); p9_debug(P9_DEBUG_VFS, "p9_client_attrwalk failed %zd\n", retval); - attr_fid = NULL; - goto error; - } - if (!buffer_size) { - /* request to get the attr_size */ - retval = attr_size; - goto error; + return retval; } if (attr_size > buffer_size) { - retval = -ERANGE; - goto error; - } - msize = attr_fid->clnt->msize; - while (attr_size) { - if (attr_size > (msize - P9_IOHDRSZ)) - read_count = msize - P9_IOHDRSZ; + if (!buffer_size) /* request to get the attr_size */ + retval = attr_size; else - read_count = attr_size; - read_count = p9_client_read(attr_fid, ((char *)buffer)+offset, - NULL, offset, read_count); - if (read_count < 0) { - /* error in xattr read */ - retval = read_count; - goto error; - } - offset += read_count; - attr_size -= read_count; + retval = -ERANGE; + } else { + iov_iter_truncate(&to, attr_size); + retval = p9_client_read(attr_fid, 0, &to, &err); + if (err) + retval = err; } - /* Total read xattr bytes */ - retval = offset; -error: - if (attr_fid) - p9_client_clunk(attr_fid); + p9_client_clunk(attr_fid); return retval; - } @@ -120,8 +105,11 @@ int v9fs_xattr_set(struct dentry *dentry, const char *name, int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name, const void *value, size_t value_len, int flags) { - u64 offset = 0; - int retval, msize, write_count; + struct kvec kvec = {.iov_base = (void *)value, .iov_len = value_len}; + struct iov_iter from; + int retval; + + iov_iter_kvec(&from, WRITE | ITER_KVEC, &kvec, 1, value_len); p9_debug(P9_DEBUG_VFS, "name = %s value_len = %zu flags = %d\n", name, value_len, flags); @@ -135,29 +123,11 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name, * On success fid points to xattr */ retval = p9_client_xattrcreate(fid, name, value_len, flags); - if (retval < 0) { + if (retval < 0) p9_debug(P9_DEBUG_VFS, "p9_client_xattrcreate failed %d\n", retval); - goto err; - } - msize = fid->clnt->msize; - while (value_len) { - if (value_len > (msize - P9_IOHDRSZ)) - write_count = msize - P9_IOHDRSZ; - else - write_count = value_len; - write_count = p9_client_write(fid, ((char *)value)+offset, - NULL, offset, write_count); - if (write_count < 0) { - /* error in xattr write */ - retval = write_count; - goto err; - } - offset += write_count; - value_len -= write_count; - } - retval = 0; -err: + else + p9_client_write(fid, 0, &from, &retval); p9_client_clunk(fid); return retval; } @@ -32,6 +32,7 @@ source "fs/gfs2/Kconfig" source "fs/ocfs2/Kconfig" source "fs/btrfs/Kconfig" source "fs/nilfs2/Kconfig" +source "fs/f2fs/Kconfig" config FS_DAX bool "Direct Access (DAX) support" @@ -217,7 +218,6 @@ source "fs/pstore/Kconfig" source "fs/sysv/Kconfig" source "fs/ufs/Kconfig" source "fs/exofs/Kconfig" -source "fs/f2fs/Kconfig" endif # MISC_FILESYSTEMS diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt index 270c481..2d0cbbd 100644 --- a/fs/Kconfig.binfmt +++ b/fs/Kconfig.binfmt @@ -27,9 +27,6 @@ config COMPAT_BINFMT_ELF bool depends on COMPAT && BINFMT_ELF -config ARCH_BINFMT_ELF_RANDOMIZE_PIE - bool - config ARCH_BINFMT_ELF_STATE bool diff --git a/fs/Makefile b/fs/Makefile index a88ac48..cb92fd4 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -118,6 +118,7 @@ obj-$(CONFIG_HOSTFS) += hostfs/ obj-$(CONFIG_HPPFS) += hppfs/ obj-$(CONFIG_CACHEFILES) += cachefiles/ obj-$(CONFIG_DEBUG_FS) += debugfs/ +obj-$(CONFIG_TRACING) += tracefs/ obj-$(CONFIG_OCFS2_FS) += ocfs2/ obj-$(CONFIG_BTRFS_FS) += btrfs/ obj-$(CONFIG_GFS2_FS) += gfs2/ diff --git a/fs/adfs/dir_fplus.c b/fs/adfs/dir_fplus.c index f2ba88a..82d14cd 100644 --- a/fs/adfs/dir_fplus.c +++ b/fs/adfs/dir_fplus.c @@ -61,6 +61,7 @@ adfs_fplus_read(struct super_block *sb, unsigned int id, unsigned int sz, struct kcalloc(size, sizeof(struct buffer_head *), GFP_KERNEL); if (!bh_fplus) { + ret = -ENOMEM; adfs_error(sb, "not enough memory for" " dir object %X (%d blocks)", id, size); goto out; diff --git a/fs/adfs/file.c b/fs/adfs/file.c index 07c9edc..46c0d56 100644 --- a/fs/adfs/file.c +++ b/fs/adfs/file.c @@ -23,11 +23,9 @@ const struct file_operations adfs_file_operations = { .llseek = generic_file_llseek, - .read = new_sync_read, .read_iter = generic_file_read_iter, .mmap = generic_file_mmap, .fsync = generic_file_fsync, - .write = new_sync_write, .write_iter = generic_file_write_iter, .splice_read = generic_file_splice_read, }; diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c index b9acada..335055d 100644 --- a/fs/adfs/inode.c +++ b/fs/adfs/inode.c @@ -298,7 +298,7 @@ out: int adfs_notify_change(struct dentry *dentry, struct iattr *attr) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct super_block *sb = inode->i_sb; unsigned int ia_valid = attr->ia_valid; int error; diff --git a/fs/adfs/super.c b/fs/adfs/super.c index 9852bdf..a19c31d 100644 --- a/fs/adfs/super.c +++ b/fs/adfs/super.c @@ -316,7 +316,7 @@ static struct adfs_discmap *adfs_read_map(struct super_block *sb, struct adfs_di dm = kmalloc(nzones * sizeof(*dm), GFP_KERNEL); if (dm == NULL) { adfs_error(sb, "not enough memory"); - return NULL; + return ERR_PTR(-ENOMEM); } for (zone = 0; zone < nzones; zone++, map_addr++) { @@ -349,7 +349,7 @@ error_free: brelse(dm[zone].dm_bh); kfree(dm); - return NULL; + return ERR_PTR(-EIO); } static inline unsigned long adfs_discsize(struct adfs_discrecord *dr, int block_bits) @@ -370,6 +370,7 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent) unsigned char *b_data; struct adfs_sb_info *asb; struct inode *root; + int ret = -EINVAL; sb->s_flags |= MS_NODIRATIME; @@ -391,6 +392,7 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent) sb_set_blocksize(sb, BLOCK_SIZE); if (!(bh = sb_bread(sb, ADFS_DISCRECORD / BLOCK_SIZE))) { adfs_error(sb, "unable to read superblock"); + ret = -EIO; goto error; } @@ -400,6 +402,7 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent) if (!silent) printk("VFS: Can't find an adfs filesystem on dev " "%s.\n", sb->s_id); + ret = -EINVAL; goto error_free_bh; } @@ -412,6 +415,7 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent) if (!silent) printk("VPS: Can't find an adfs filesystem on dev " "%s.\n", sb->s_id); + ret = -EINVAL; goto error_free_bh; } @@ -421,11 +425,13 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent) if (!bh) { adfs_error(sb, "couldn't read superblock on " "2nd try."); + ret = -EIO; goto error; } b_data = bh->b_data + (ADFS_DISCRECORD % sb->s_blocksize); if (adfs_checkbblk(b_data)) { adfs_error(sb, "disc record mismatch, very weird!"); + ret = -EINVAL; goto error_free_bh; } dr = (struct adfs_discrecord *)(b_data + ADFS_DR_OFFSET); @@ -433,6 +439,7 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent) if (!silent) printk(KERN_ERR "VFS: Unsupported blocksize on dev " "%s.\n", sb->s_id); + ret = -EINVAL; goto error; } @@ -447,10 +454,12 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent) asb->s_size = adfs_discsize(dr, sb->s_blocksize_bits); asb->s_version = dr->format_version; asb->s_log2sharesize = dr->log2sharesize; - + asb->s_map = adfs_read_map(sb, dr); - if (!asb->s_map) + if (IS_ERR(asb->s_map)) { + ret = PTR_ERR(asb->s_map); goto error_free_bh; + } brelse(bh); @@ -499,6 +508,7 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent) brelse(asb->s_map[i].dm_bh); kfree(asb->s_map); adfs_error(sb, "get root inode failed\n"); + ret = -EIO; goto error; } return 0; @@ -508,7 +518,7 @@ error_free_bh: error: sb->s_fs_info = NULL; kfree(asb); - return -EINVAL; + return ret; } static struct dentry *adfs_mount(struct file_system_type *fs_type, diff --git a/fs/affs/affs.h b/fs/affs/affs.h index c8764bd..cffe837 100644 --- a/fs/affs/affs.h +++ b/fs/affs/affs.h @@ -106,18 +106,22 @@ struct affs_sb_info { spinlock_t work_lock; /* protects sb_work and work_queued */ }; -#define SF_INTL 0x0001 /* International filesystem. */ -#define SF_BM_VALID 0x0002 /* Bitmap is valid. */ -#define SF_IMMUTABLE 0x0004 /* Protection bits cannot be changed */ -#define SF_QUIET 0x0008 /* chmod errors will be not reported */ -#define SF_SETUID 0x0010 /* Ignore Amiga uid */ -#define SF_SETGID 0x0020 /* Ignore Amiga gid */ -#define SF_SETMODE 0x0040 /* Ignore Amiga protection bits */ -#define SF_MUFS 0x0100 /* Use MUFS uid/gid mapping */ -#define SF_OFS 0x0200 /* Old filesystem */ -#define SF_PREFIX 0x0400 /* Buffer for prefix is allocated */ -#define SF_VERBOSE 0x0800 /* Talk about fs when mounting */ -#define SF_NO_TRUNCATE 0x1000 /* Don't truncate filenames */ +#define AFFS_MOUNT_SF_INTL 0x0001 /* International filesystem. */ +#define AFFS_MOUNT_SF_BM_VALID 0x0002 /* Bitmap is valid. */ +#define AFFS_MOUNT_SF_IMMUTABLE 0x0004 /* Protection bits cannot be changed */ +#define AFFS_MOUNT_SF_QUIET 0x0008 /* chmod errors will be not reported */ +#define AFFS_MOUNT_SF_SETUID 0x0010 /* Ignore Amiga uid */ +#define AFFS_MOUNT_SF_SETGID 0x0020 /* Ignore Amiga gid */ +#define AFFS_MOUNT_SF_SETMODE 0x0040 /* Ignore Amiga protection bits */ +#define AFFS_MOUNT_SF_MUFS 0x0100 /* Use MUFS uid/gid mapping */ +#define AFFS_MOUNT_SF_OFS 0x0200 /* Old filesystem */ +#define AFFS_MOUNT_SF_PREFIX 0x0400 /* Buffer for prefix is allocated */ +#define AFFS_MOUNT_SF_VERBOSE 0x0800 /* Talk about fs when mounting */ +#define AFFS_MOUNT_SF_NO_TRUNCATE 0x1000 /* Don't truncate filenames */ + +#define affs_clear_opt(o, opt) (o &= ~AFFS_MOUNT_##opt) +#define affs_set_opt(o, opt) (o |= AFFS_MOUNT_##opt) +#define affs_test_opt(o, opt) ((o) & AFFS_MOUNT_##opt) /* short cut to get to the affs specific sb data */ static inline struct affs_sb_info *AFFS_SB(struct super_block *sb) diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c index 388da1e..a8f463c 100644 --- a/fs/affs/amigaffs.c +++ b/fs/affs/amigaffs.c @@ -138,7 +138,7 @@ affs_fix_dcache(struct inode *inode, u32 entry_ino) static int affs_remove_link(struct dentry *dentry) { - struct inode *dir, *inode = dentry->d_inode; + struct inode *dir, *inode = d_inode(dentry); struct super_block *sb = inode->i_sb; struct buffer_head *bh = NULL, *link_bh = NULL; u32 link_ino, ino; @@ -268,11 +268,11 @@ affs_remove_header(struct dentry *dentry) struct buffer_head *bh = NULL; int retval; - dir = dentry->d_parent->d_inode; + dir = d_inode(dentry->d_parent); sb = dir->i_sb; retval = -ENOENT; - inode = dentry->d_inode; + inode = d_inode(dentry); if (!inode) goto done; @@ -471,9 +471,9 @@ affs_warning(struct super_block *sb, const char *function, const char *fmt, ...) bool affs_nofilenametruncate(const struct dentry *dentry) { - struct inode *inode = dentry->d_inode; - return AFFS_SB(inode->i_sb)->s_flags & SF_NO_TRUNCATE; + struct inode *inode = d_inode(dentry); + return affs_test_opt(AFFS_SB(inode->i_sb)->s_flags, SF_NO_TRUNCATE); } /* Check if the name is valid for a affs object. */ diff --git a/fs/affs/file.c b/fs/affs/file.c index d2468bf..659c579 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c @@ -12,7 +12,7 @@ * affs regular file handling primitives */ -#include <linux/aio.h> +#include <linux/uio.h> #include "affs.h" static struct buffer_head *affs_get_extblock_slow(struct inode *inode, u32 ext); @@ -389,8 +389,7 @@ static void affs_write_failed(struct address_space *mapping, loff_t to) } static ssize_t -affs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, - loff_t offset) +affs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; @@ -398,15 +397,15 @@ affs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, size_t count = iov_iter_count(iter); ssize_t ret; - if (rw == WRITE) { + if (iov_iter_rw(iter) == WRITE) { loff_t size = offset + count; if (AFFS_I(inode)->mmu_private < size) return 0; } - ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, affs_get_block); - if (ret < 0 && (rw & WRITE)) + ret = blockdev_direct_IO(iocb, inode, iter, offset, affs_get_block); + if (ret < 0 && iov_iter_rw(iter) == WRITE) affs_write_failed(mapping, offset + count); return ret; } @@ -699,8 +698,10 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping, boff = tmp % bsize; if (boff) { bh = affs_bread_ino(inode, bidx, 0); - if (IS_ERR(bh)) - return PTR_ERR(bh); + if (IS_ERR(bh)) { + written = PTR_ERR(bh); + goto err_first_bh; + } tmp = min(bsize - boff, to - from); BUG_ON(boff + tmp > bsize || tmp > bsize); memcpy(AFFS_DATA(bh) + boff, data + from, tmp); @@ -712,14 +713,16 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping, bidx++; } else if (bidx) { bh = affs_bread_ino(inode, bidx - 1, 0); - if (IS_ERR(bh)) - return PTR_ERR(bh); + if (IS_ERR(bh)) { + written = PTR_ERR(bh); + goto err_first_bh; + } } while (from + bsize <= to) { prev_bh = bh; bh = affs_getemptyblk_ino(inode, bidx); if (IS_ERR(bh)) - goto out; + goto err_bh; memcpy(AFFS_DATA(bh), data + from, bsize); if (buffer_new(bh)) { AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); @@ -751,7 +754,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping, prev_bh = bh; bh = affs_bread_ino(inode, bidx, 1); if (IS_ERR(bh)) - goto out; + goto err_bh; tmp = min(bsize, to - from); BUG_ON(tmp > bsize); memcpy(AFFS_DATA(bh), data + from, tmp); @@ -790,12 +793,13 @@ done: if (tmp > inode->i_size) inode->i_size = AFFS_I(inode)->mmu_private = tmp; +err_first_bh: unlock_page(page); page_cache_release(page); return written; -out: +err_bh: bh = prev_bh; if (!written) written = PTR_ERR(bh); @@ -910,7 +914,7 @@ affs_truncate(struct inode *inode) if (inode->i_size) { AFFS_I(inode)->i_blkcnt = last_blk + 1; AFFS_I(inode)->i_extcnt = ext + 1; - if (AFFS_SB(sb)->s_flags & SF_OFS) { + if (affs_test_opt(AFFS_SB(sb)->s_flags, SF_OFS)) { struct buffer_head *bh = affs_bread_ino(inode, last_blk, 0); u32 tmp; if (IS_ERR(bh)) { @@ -964,9 +968,7 @@ int affs_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync) } const struct file_operations affs_file_operations = { .llseek = generic_file_llseek, - .read = new_sync_read, .read_iter = generic_file_read_iter, - .write = new_sync_write, .write_iter = generic_file_write_iter, .mmap = generic_file_mmap, .open = affs_file_open, diff --git a/fs/affs/inode.c b/fs/affs/inode.c index 6f34510..a022f4a 100644 --- a/fs/affs/inode.c +++ b/fs/affs/inode.c @@ -66,23 +66,23 @@ struct inode *affs_iget(struct super_block *sb, unsigned long ino) AFFS_I(inode)->i_lastalloc = 0; AFFS_I(inode)->i_pa_cnt = 0; - if (sbi->s_flags & SF_SETMODE) + if (affs_test_opt(sbi->s_flags, SF_SETMODE)) inode->i_mode = sbi->s_mode; else inode->i_mode = prot_to_mode(prot); id = be16_to_cpu(tail->uid); - if (id == 0 || sbi->s_flags & SF_SETUID) + if (id == 0 || affs_test_opt(sbi->s_flags, SF_SETUID)) inode->i_uid = sbi->s_uid; - else if (id == 0xFFFF && sbi->s_flags & SF_MUFS) + else if (id == 0xFFFF && affs_test_opt(sbi->s_flags, SF_MUFS)) i_uid_write(inode, 0); else i_uid_write(inode, id); id = be16_to_cpu(tail->gid); - if (id == 0 || sbi->s_flags & SF_SETGID) + if (id == 0 || affs_test_opt(sbi->s_flags, SF_SETGID)) inode->i_gid = sbi->s_gid; - else if (id == 0xFFFF && sbi->s_flags & SF_MUFS) + else if (id == 0xFFFF && affs_test_opt(sbi->s_flags, SF_MUFS)) i_gid_write(inode, 0); else i_gid_write(inode, id); @@ -94,7 +94,7 @@ struct inode *affs_iget(struct super_block *sb, unsigned long ino) /* fall through */ case ST_USERDIR: if (be32_to_cpu(tail->stype) == ST_USERDIR || - sbi->s_flags & SF_SETMODE) { + affs_test_opt(sbi->s_flags, SF_SETMODE)) { if (inode->i_mode & S_IRUSR) inode->i_mode |= S_IXUSR; if (inode->i_mode & S_IRGRP) @@ -133,7 +133,8 @@ struct inode *affs_iget(struct super_block *sb, unsigned long ino) } if (tail->link_chain) set_nlink(inode, 2); - inode->i_mapping->a_ops = (sbi->s_flags & SF_OFS) ? &affs_aops_ofs : &affs_aops; + inode->i_mapping->a_ops = affs_test_opt(sbi->s_flags, SF_OFS) ? + &affs_aops_ofs : &affs_aops; inode->i_op = &affs_file_inode_operations; inode->i_fop = &affs_file_operations; break; @@ -190,15 +191,15 @@ affs_write_inode(struct inode *inode, struct writeback_control *wbc) if (!(inode->i_ino == AFFS_SB(sb)->s_root_block)) { uid = i_uid_read(inode); gid = i_gid_read(inode); - if (AFFS_SB(sb)->s_flags & SF_MUFS) { + if (affs_test_opt(AFFS_SB(sb)->s_flags, SF_MUFS)) { if (uid == 0 || uid == 0xFFFF) uid = uid ^ ~0; if (gid == 0 || gid == 0xFFFF) gid = gid ^ ~0; } - if (!(AFFS_SB(sb)->s_flags & SF_SETUID)) + if (!affs_test_opt(AFFS_SB(sb)->s_flags, SF_SETUID)) tail->uid = cpu_to_be16(uid); - if (!(AFFS_SB(sb)->s_flags & SF_SETGID)) + if (!affs_test_opt(AFFS_SB(sb)->s_flags, SF_SETGID)) tail->gid = cpu_to_be16(gid); } } @@ -212,7 +213,7 @@ affs_write_inode(struct inode *inode, struct writeback_control *wbc) int affs_notify_change(struct dentry *dentry, struct iattr *attr) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); int error; pr_debug("notify_change(%lu,0x%x)\n", inode->i_ino, attr->ia_valid); @@ -221,11 +222,14 @@ affs_notify_change(struct dentry *dentry, struct iattr *attr) if (error) goto out; - if (((attr->ia_valid & ATTR_UID) && (AFFS_SB(inode->i_sb)->s_flags & SF_SETUID)) || - ((attr->ia_valid & ATTR_GID) && (AFFS_SB(inode->i_sb)->s_flags & SF_SETGID)) || + if (((attr->ia_valid & ATTR_UID) && + affs_test_opt(AFFS_SB(inode->i_sb)->s_flags, SF_SETUID)) || + ((attr->ia_valid & ATTR_GID) && + affs_test_opt(AFFS_SB(inode->i_sb)->s_flags, SF_SETGID)) || ((attr->ia_valid & ATTR_MODE) && - (AFFS_SB(inode->i_sb)->s_flags & (SF_SETMODE | SF_IMMUTABLE)))) { - if (!(AFFS_SB(inode->i_sb)->s_flags & SF_QUIET)) + (AFFS_SB(inode->i_sb)->s_flags & + (AFFS_MOUNT_SF_SETMODE | AFFS_MOUNT_SF_IMMUTABLE)))) { + if (!affs_test_opt(AFFS_SB(inode->i_sb)->s_flags, SF_QUIET)) error = -EPERM; goto out; } diff --git a/fs/affs/namei.c b/fs/affs/namei.c index ffb7bd8..181e05b 100644 --- a/fs/affs/namei.c +++ b/fs/affs/namei.c @@ -53,7 +53,8 @@ affs_intl_toupper(int ch) static inline toupper_t affs_get_toupper(struct super_block *sb) { - return AFFS_SB(sb)->s_flags & SF_INTL ? affs_intl_toupper : affs_toupper; + return affs_test_opt(AFFS_SB(sb)->s_flags, SF_INTL) ? + affs_intl_toupper : affs_toupper; } /* @@ -250,7 +251,7 @@ int affs_unlink(struct inode *dir, struct dentry *dentry) { pr_debug("%s(dir=%lu, %lu \"%pd\")\n", __func__, dir->i_ino, - dentry->d_inode->i_ino, dentry); + d_inode(dentry)->i_ino, dentry); return affs_remove_header(dentry); } @@ -275,7 +276,8 @@ affs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) inode->i_op = &affs_file_inode_operations; inode->i_fop = &affs_file_operations; - inode->i_mapping->a_ops = (AFFS_SB(sb)->s_flags & SF_OFS) ? &affs_aops_ofs : &affs_aops; + inode->i_mapping->a_ops = affs_test_opt(AFFS_SB(sb)->s_flags, SF_OFS) ? + &affs_aops_ofs : &affs_aops; error = affs_add_entry(dir, inode, dentry, ST_FILE); if (error) { clear_nlink(inode); @@ -318,7 +320,7 @@ int affs_rmdir(struct inode *dir, struct dentry *dentry) { pr_debug("%s(dir=%lu, %lu \"%pd\")\n", __func__, dir->i_ino, - dentry->d_inode->i_ino, dentry); + d_inode(dentry)->i_ino, dentry); return affs_remove_header(dentry); } @@ -401,7 +403,7 @@ err: int affs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { - struct inode *inode = old_dentry->d_inode; + struct inode *inode = d_inode(old_dentry); pr_debug("%s(%lu, %lu, \"%pd\")\n", __func__, inode->i_ino, dir->i_ino, dentry); @@ -428,13 +430,13 @@ affs_rename(struct inode *old_dir, struct dentry *old_dentry, return retval; /* Unlink destination if it already exists */ - if (new_dentry->d_inode) { + if (d_really_is_positive(new_dentry)) { retval = affs_remove_header(new_dentry); if (retval) return retval; } - bh = affs_bread(sb, old_dentry->d_inode->i_ino); + bh = affs_bread(sb, d_inode(old_dentry)->i_ino); if (!bh) return -EIO; diff --git a/fs/affs/super.c b/fs/affs/super.c index 4cf0e91..3f89c9e 100644 --- a/fs/affs/super.c +++ b/fs/affs/super.c @@ -227,22 +227,22 @@ parse_options(char *options, kuid_t *uid, kgid_t *gid, int *mode, int *reserved, if (match_octal(&args[0], &option)) return 0; *mode = option & 0777; - *mount_opts |= SF_SETMODE; + affs_set_opt(*mount_opts, SF_SETMODE); break; case Opt_mufs: - *mount_opts |= SF_MUFS; + affs_set_opt(*mount_opts, SF_MUFS); break; case Opt_notruncate: - *mount_opts |= SF_NO_TRUNCATE; + affs_set_opt(*mount_opts, SF_NO_TRUNCATE); break; case Opt_prefix: *prefix = match_strdup(&args[0]); if (!*prefix) return 0; - *mount_opts |= SF_PREFIX; + affs_set_opt(*mount_opts, SF_PREFIX); break; case Opt_protect: - *mount_opts |= SF_IMMUTABLE; + affs_set_opt(*mount_opts, SF_IMMUTABLE); break; case Opt_reserved: if (match_int(&args[0], reserved)) @@ -258,7 +258,7 @@ parse_options(char *options, kuid_t *uid, kgid_t *gid, int *mode, int *reserved, *gid = make_kgid(current_user_ns(), option); if (!gid_valid(*gid)) return 0; - *mount_opts |= SF_SETGID; + affs_set_opt(*mount_opts, SF_SETGID); break; case Opt_setuid: if (match_int(&args[0], &option)) @@ -266,10 +266,10 @@ parse_options(char *options, kuid_t *uid, kgid_t *gid, int *mode, int *reserved, *uid = make_kuid(current_user_ns(), option); if (!uid_valid(*uid)) return 0; - *mount_opts |= SF_SETUID; + affs_set_opt(*mount_opts, SF_SETUID); break; case Opt_verbose: - *mount_opts |= SF_VERBOSE; + affs_set_opt(*mount_opts, SF_VERBOSE); break; case Opt_volume: { char *vol = match_strdup(&args[0]); @@ -435,30 +435,31 @@ got_root: case MUFS_FS: case MUFS_INTLFFS: case MUFS_DCFFS: - sbi->s_flags |= SF_MUFS; + affs_set_opt(sbi->s_flags, SF_MUFS); /* fall thru */ case FS_INTLFFS: case FS_DCFFS: - sbi->s_flags |= SF_INTL; + affs_set_opt(sbi->s_flags, SF_INTL); break; case MUFS_FFS: - sbi->s_flags |= SF_MUFS; + affs_set_opt(sbi->s_flags, SF_MUFS); break; case FS_FFS: break; case MUFS_OFS: - sbi->s_flags |= SF_MUFS; + affs_set_opt(sbi->s_flags, SF_MUFS); /* fall thru */ case FS_OFS: - sbi->s_flags |= SF_OFS; + affs_set_opt(sbi->s_flags, SF_OFS); sb->s_flags |= MS_NOEXEC; break; case MUFS_DCOFS: case MUFS_INTLOFS: - sbi->s_flags |= SF_MUFS; + affs_set_opt(sbi->s_flags, SF_MUFS); case FS_DCOFS: case FS_INTLOFS: - sbi->s_flags |= SF_INTL | SF_OFS; + affs_set_opt(sbi->s_flags, SF_INTL); + affs_set_opt(sbi->s_flags, SF_OFS); sb->s_flags |= MS_NOEXEC; break; default: @@ -467,7 +468,7 @@ got_root: return -EINVAL; } - if (mount_flags & SF_VERBOSE) { + if (affs_test_opt(mount_flags, SF_VERBOSE)) { u8 len = AFFS_ROOT_TAIL(sb, root_bh)->disk_name[0]; pr_notice("Mounting volume \"%.*s\": Type=%.3s\\%c, Blocksize=%d\n", len > 31 ? 31 : len, @@ -478,7 +479,7 @@ got_root: sb->s_flags |= MS_NODEV | MS_NOSUID; sbi->s_data_blksize = sb->s_blocksize; - if (sbi->s_flags & SF_OFS) + if (affs_test_opt(sbi->s_flags, SF_OFS)) sbi->s_data_blksize -= 24; tmp_flags = sb->s_flags; @@ -493,7 +494,7 @@ got_root: if (IS_ERR(root_inode)) return PTR_ERR(root_inode); - if (AFFS_SB(sb)->s_flags & SF_INTL) + if (affs_test_opt(AFFS_SB(sb)->s_flags, SF_INTL)) sb->s_d_op = &affs_intl_dentry_operations; else sb->s_d_op = &affs_dentry_operations; @@ -520,10 +521,14 @@ affs_remount(struct super_block *sb, int *flags, char *data) int root_block; unsigned long mount_flags; int res = 0; - char *new_opts = kstrdup(data, GFP_KERNEL); + char *new_opts; char volume[32]; char *prefix = NULL; + new_opts = kstrdup(data, GFP_KERNEL); + if (!new_opts) + return -ENOMEM; + pr_debug("%s(flags=0x%x,opts=\"%s\")\n", __func__, *flags, data); sync_filesystem(sb); diff --git a/fs/afs/dir.c b/fs/afs/dir.c index 4ec35e9..e10e1778 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -505,7 +505,7 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry, _enter("{%x:%u},%p{%pd},", vnode->fid.vid, vnode->fid.vnode, dentry, dentry); - ASSERTCMP(dentry->d_inode, ==, NULL); + ASSERTCMP(d_inode(dentry), ==, NULL); if (dentry->d_name.len >= AFSNAMEMAX) { _leave(" = -ENAMETOOLONG"); @@ -563,8 +563,8 @@ success: _leave(" = 0 { vn=%u u=%u } -> { ino=%lu v=%u }", fid.vnode, fid.unique, - dentry->d_inode->i_ino, - dentry->d_inode->i_generation); + d_inode(dentry)->i_ino, + d_inode(dentry)->i_generation); return NULL; } @@ -586,9 +586,9 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags) if (flags & LOOKUP_RCU) return -ECHILD; - vnode = AFS_FS_I(dentry->d_inode); + vnode = AFS_FS_I(d_inode(dentry)); - if (dentry->d_inode) + if (d_really_is_positive(dentry)) _enter("{v={%x:%u} n=%pd fl=%lx},", vnode->fid.vid, vnode->fid.vnode, dentry, vnode->flags); @@ -601,7 +601,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags) /* lock down the parent dentry so we can peer at it */ parent = dget_parent(dentry); - dir = AFS_FS_I(parent->d_inode); + dir = AFS_FS_I(d_inode(parent)); /* validate the parent directory */ if (test_bit(AFS_VNODE_MODIFIED, &dir->flags)) @@ -623,9 +623,9 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags) switch (ret) { case 0: /* the filename maps to something */ - if (!dentry->d_inode) + if (d_really_is_negative(dentry)) goto out_bad; - if (is_bad_inode(dentry->d_inode)) { + if (is_bad_inode(d_inode(dentry))) { printk("kAFS: afs_d_revalidate: %pd2 has bad inode\n", dentry); goto out_bad; @@ -647,7 +647,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags) _debug("%pd: file deleted (uq %u -> %u I:%u)", dentry, fid.unique, vnode->fid.unique, - dentry->d_inode->i_generation); + d_inode(dentry)->i_generation); spin_lock(&vnode->lock); set_bit(AFS_VNODE_DELETED, &vnode->flags); spin_unlock(&vnode->lock); @@ -658,7 +658,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags) case -ENOENT: /* the filename is unknown */ _debug("%pd: dirent not found", dentry); - if (dentry->d_inode) + if (d_really_is_positive(dentry)) goto not_found; goto out_valid; @@ -703,9 +703,9 @@ static int afs_d_delete(const struct dentry *dentry) if (dentry->d_flags & DCACHE_NFSFS_RENAMED) goto zap; - if (dentry->d_inode && - (test_bit(AFS_VNODE_DELETED, &AFS_FS_I(dentry->d_inode)->flags) || - test_bit(AFS_VNODE_PSEUDODIR, &AFS_FS_I(dentry->d_inode)->flags))) + if (d_really_is_positive(dentry) && + (test_bit(AFS_VNODE_DELETED, &AFS_FS_I(d_inode(dentry))->flags) || + test_bit(AFS_VNODE_PSEUDODIR, &AFS_FS_I(d_inode(dentry))->flags))) goto zap; _leave(" = 0 [keep]"); @@ -814,8 +814,8 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry) if (ret < 0) goto rmdir_error; - if (dentry->d_inode) { - vnode = AFS_FS_I(dentry->d_inode); + if (d_really_is_positive(dentry)) { + vnode = AFS_FS_I(d_inode(dentry)); clear_nlink(&vnode->vfs_inode); set_bit(AFS_VNODE_DELETED, &vnode->flags); afs_discard_callback_on_delete(vnode); @@ -856,8 +856,8 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry) goto error; } - if (dentry->d_inode) { - vnode = AFS_FS_I(dentry->d_inode); + if (d_really_is_positive(dentry)) { + vnode = AFS_FS_I(d_inode(dentry)); /* make sure we have a callback promise on the victim */ ret = afs_validate(vnode, key); @@ -869,7 +869,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry) if (ret < 0) goto remove_error; - if (dentry->d_inode) { + if (d_really_is_positive(dentry)) { /* if the file wasn't deleted due to excess hard links, the * fileserver will break the callback promise on the file - if * it had one - before it returns to us, and if it was deleted, @@ -879,7 +879,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry) * or it was outstanding on a different server, then it won't * break it either... */ - vnode = AFS_FS_I(dentry->d_inode); + vnode = AFS_FS_I(d_inode(dentry)); if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) _debug("AFS_VNODE_DELETED"); if (test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags)) @@ -977,7 +977,7 @@ static int afs_link(struct dentry *from, struct inode *dir, struct key *key; int ret; - vnode = AFS_FS_I(from->d_inode); + vnode = AFS_FS_I(d_inode(from)); dvnode = AFS_FS_I(dir); _enter("{%x:%u},{%x:%u},{%pd}", @@ -1089,7 +1089,7 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry, struct key *key; int ret; - vnode = AFS_FS_I(old_dentry->d_inode); + vnode = AFS_FS_I(d_inode(old_dentry)); orig_dvnode = AFS_FS_I(old_dir); new_dvnode = AFS_FS_I(new_dir); diff --git a/fs/afs/file.c b/fs/afs/file.c index 932ce07..999bc3c 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -31,8 +31,6 @@ const struct file_operations afs_file_operations = { .open = afs_open, .release = afs_release, .llseek = generic_file_llseek, - .read = new_sync_read, - .write = new_sync_write, .read_iter = generic_file_read_iter, .write_iter = afs_file_write, .mmap = generic_file_readonly_mmap, diff --git a/fs/afs/inode.c b/fs/afs/inode.c index 8a1d38e..e06f5a2 100644 --- a/fs/afs/inode.c +++ b/fs/afs/inode.c @@ -379,7 +379,7 @@ int afs_getattr(struct vfsmount *mnt, struct dentry *dentry, { struct inode *inode; - inode = dentry->d_inode; + inode = d_inode(dentry); _enter("{ ino=%lu v=%u }", inode->i_ino, inode->i_generation); @@ -458,7 +458,7 @@ void afs_evict_inode(struct inode *inode) */ int afs_setattr(struct dentry *dentry, struct iattr *attr) { - struct afs_vnode *vnode = AFS_FS_I(dentry->d_inode); + struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry)); struct key *key; int ret; diff --git a/fs/afs/misc.c b/fs/afs/misc.c index 0dd4daf..91ea1aa 100644 --- a/fs/afs/misc.c +++ b/fs/afs/misc.c @@ -22,9 +22,12 @@ int afs_abort_to_error(u32 abort_code) { switch (abort_code) { + /* low errno codes inserted into abort namespace */ case 13: return -EACCES; case 27: return -EFBIG; case 30: return -EROFS; + + /* VICE "special error" codes; 101 - 111 */ case VSALVAGE: return -EIO; case VNOVNODE: return -ENOENT; case VNOVOL: return -ENOMEDIUM; @@ -36,11 +39,18 @@ int afs_abort_to_error(u32 abort_code) case VOVERQUOTA: return -EDQUOT; case VBUSY: return -EBUSY; case VMOVED: return -ENXIO; - case 0x2f6df0a: return -EWOULDBLOCK; + + /* Unified AFS error table; ET "uae" == 0x2f6df00 */ + case 0x2f6df00: return -EPERM; + case 0x2f6df01: return -ENOENT; + case 0x2f6df04: return -EIO; + case 0x2f6df0a: return -EAGAIN; + case 0x2f6df0b: return -ENOMEM; case 0x2f6df0c: return -EACCES; case 0x2f6df0f: return -EBUSY; case 0x2f6df10: return -EEXIST; case 0x2f6df11: return -EXDEV; + case 0x2f6df12: return -ENODEV; case 0x2f6df13: return -ENOTDIR; case 0x2f6df14: return -EISDIR; case 0x2f6df15: return -EINVAL; @@ -54,8 +64,12 @@ int afs_abort_to_error(u32 abort_code) case 0x2f6df23: return -ENAMETOOLONG; case 0x2f6df24: return -ENOLCK; case 0x2f6df26: return -ENOTEMPTY; + case 0x2f6df28: return -EWOULDBLOCK; + case 0x2f6df69: return -ENOTCONN; + case 0x2f6df6c: return -ETIMEDOUT; case 0x2f6df78: return -EDQUOT; + /* RXKAD abort codes; from include/rxrpc/packet.h. ET "RXK" == 0x1260B00 */ case RXKADINCONSISTENCY: return -EPROTO; case RXKADPACKETSHORT: return -EPROTO; case RXKADLEVELFAIL: return -EKEYREJECTED; diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c index 938c5ab..ccd0b21 100644 --- a/fs/afs/mntpt.c +++ b/fs/afs/mntpt.c @@ -134,7 +134,7 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt) _enter("{%pd}", mntpt); - BUG_ON(!mntpt->d_inode); + BUG_ON(!d_inode(mntpt)); ret = -ENOMEM; devname = (char *) get_zeroed_page(GFP_KERNEL); @@ -145,7 +145,7 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt) if (!options) goto error_no_options; - vnode = AFS_FS_I(mntpt->d_inode); + vnode = AFS_FS_I(d_inode(mntpt)); if (test_bit(AFS_VNODE_PSEUDODIR, &vnode->flags)) { /* if the directory is a pseudo directory, use the d_name */ static const char afs_root_cell[] = ":root.cell."; @@ -169,14 +169,14 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt) } } else { /* read the contents of the AFS special symlink */ - loff_t size = i_size_read(mntpt->d_inode); + loff_t size = i_size_read(d_inode(mntpt)); char *buf; ret = -EINVAL; if (size > PAGE_SIZE - 1) goto error_no_page; - page = read_mapping_page(mntpt->d_inode->i_mapping, 0, NULL); + page = read_mapping_page(d_inode(mntpt)->i_mapping, 0, NULL); if (IS_ERR(page)) { ret = PTR_ERR(page); goto error_no_page; diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index dbc732e..3a57a1b 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -770,15 +770,12 @@ static int afs_deliver_cm_op_id(struct afs_call *call, struct sk_buff *skb, void afs_send_empty_reply(struct afs_call *call) { struct msghdr msg; - struct kvec iov[1]; _enter(""); - iov[0].iov_base = NULL; - iov[0].iov_len = 0; msg.msg_name = NULL; msg.msg_namelen = 0; - iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iov, 0, 0); /* WTF? */ + iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, NULL, 0, 0); msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; diff --git a/fs/afs/super.c b/fs/afs/super.c index c486155..1fb4a51 100644 --- a/fs/afs/super.c +++ b/fs/afs/super.c @@ -529,7 +529,7 @@ static void afs_destroy_inode(struct inode *inode) static int afs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct afs_volume_status vs; - struct afs_vnode *vnode = AFS_FS_I(dentry->d_inode); + struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry)); struct key *key; int ret; diff --git a/fs/afs/write.c b/fs/afs/write.c index c13cb08..0714abc 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -14,7 +14,6 @@ #include <linux/pagemap.h> #include <linux/writeback.h> #include <linux/pagevec.h> -#include <linux/aio.h> #include "internal.h" static int afs_write_back_from_locked_page(struct afs_writeback *wb, @@ -77,6 +77,11 @@ struct kioctx_cpu { unsigned reqs_available; }; +struct ctx_rq_wait { + struct completion comp; + atomic_t count; +}; + struct kioctx { struct percpu_ref users; atomic_t dead; @@ -115,7 +120,7 @@ struct kioctx { /* * signals when all in-flight requests are done */ - struct completion *requests_done; + struct ctx_rq_wait *rq_wait; struct { /* @@ -151,6 +156,38 @@ struct kioctx { unsigned id; }; +/* + * We use ki_cancel == KIOCB_CANCELLED to indicate that a kiocb has been either + * cancelled or completed (this makes a certain amount of sense because + * successful cancellation - io_cancel() - does deliver the completion to + * userspace). + * + * And since most things don't implement kiocb cancellation and we'd really like + * kiocb completion to be lockless when possible, we use ki_cancel to + * synchronize cancellation and completion - we only set it to KIOCB_CANCELLED + * with xchg() or cmpxchg(), see batch_complete_aio() and kiocb_cancel(). + */ +#define KIOCB_CANCELLED ((void *) (~0ULL)) + +struct aio_kiocb { + struct kiocb common; + + struct kioctx *ki_ctx; + kiocb_cancel_fn *ki_cancel; + + struct iocb __user *ki_user_iocb; /* user's aiocb */ + __u64 ki_user_data; /* user's data for completion */ + + struct list_head ki_list; /* the aio core uses this + * for cancellation */ + + /* + * If the aio_resfd field of the userspace iocb is not zero, + * this is the underlying eventfd context to deliver events to. + */ + struct eventfd_ctx *ki_eventfd; +}; + /*------ sysctl variables----*/ static DEFINE_SPINLOCK(aio_nr_lock); unsigned long aio_nr; /* current system wide number of aio requests */ @@ -220,7 +257,7 @@ static int __init aio_setup(void) if (IS_ERR(aio_mnt)) panic("Failed to create aio fs mount."); - kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); + kiocb_cachep = KMEM_CACHE(aio_kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); pr_debug("sizeof(struct page) = %zu\n", sizeof(struct page)); @@ -278,11 +315,11 @@ static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma) return 0; } -static void aio_ring_remap(struct file *file, struct vm_area_struct *vma) +static int aio_ring_remap(struct file *file, struct vm_area_struct *vma) { struct mm_struct *mm = vma->vm_mm; struct kioctx_table *table; - int i; + int i, res = -EINVAL; spin_lock(&mm->ioctx_lock); rcu_read_lock(); @@ -292,13 +329,17 @@ static void aio_ring_remap(struct file *file, struct vm_area_struct *vma) ctx = table->table[i]; if (ctx && ctx->aio_ring_file == file) { - ctx->user_id = ctx->mmap_base = vma->vm_start; + if (!atomic_read(&ctx->dead)) { + ctx->user_id = ctx->mmap_base = vma->vm_start; + res = 0; + } break; } } rcu_read_unlock(); spin_unlock(&mm->ioctx_lock); + return res; } static const struct file_operations aio_ring_fops = { @@ -480,8 +521,9 @@ static int aio_setup_ring(struct kioctx *ctx) #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event)) #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE) -void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel) +void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel) { + struct aio_kiocb *req = container_of(iocb, struct aio_kiocb, common); struct kioctx *ctx = req->ki_ctx; unsigned long flags; @@ -496,7 +538,7 @@ void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel) } EXPORT_SYMBOL(kiocb_set_cancel_fn); -static int kiocb_cancel(struct kiocb *kiocb) +static int kiocb_cancel(struct aio_kiocb *kiocb) { kiocb_cancel_fn *old, *cancel; @@ -514,7 +556,7 @@ static int kiocb_cancel(struct kiocb *kiocb) cancel = cmpxchg(&kiocb->ki_cancel, old, KIOCB_CANCELLED); } while (cancel != old); - return cancel(kiocb); + return cancel(&kiocb->common); } static void free_ioctx(struct work_struct *work) @@ -535,8 +577,8 @@ static void free_ioctx_reqs(struct percpu_ref *ref) struct kioctx *ctx = container_of(ref, struct kioctx, reqs); /* At this point we know that there are no any in-flight requests */ - if (ctx->requests_done) - complete(ctx->requests_done); + if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count)) + complete(&ctx->rq_wait->comp); INIT_WORK(&ctx->free_work, free_ioctx); schedule_work(&ctx->free_work); @@ -550,13 +592,13 @@ static void free_ioctx_reqs(struct percpu_ref *ref) static void free_ioctx_users(struct percpu_ref *ref) { struct kioctx *ctx = container_of(ref, struct kioctx, users); - struct kiocb *req; + struct aio_kiocb *req; spin_lock_irq(&ctx->ctx_lock); while (!list_empty(&ctx->active_reqs)) { req = list_first_entry(&ctx->active_reqs, - struct kiocb, ki_list); + struct aio_kiocb, ki_list); list_del_init(&req->ki_list); kiocb_cancel(req); @@ -655,8 +697,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) nr_events *= 2; /* Prevent overflows */ - if ((nr_events > (0x10000000U / sizeof(struct io_event))) || - (nr_events > (0x10000000U / sizeof(struct kiocb)))) { + if (nr_events > (0x10000000U / sizeof(struct io_event))) { pr_debug("ENOMEM: nr_events too high\n"); return ERR_PTR(-EINVAL); } @@ -727,6 +768,9 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) err_cleanup: aio_nr_sub(ctx->max_reqs); err_ctx: + atomic_set(&ctx->dead, 1); + if (ctx->mmap_size) + vm_munmap(ctx->mmap_base, ctx->mmap_size); aio_free_ring(ctx); err: mutex_unlock(&ctx->ring_lock); @@ -744,15 +788,16 @@ err: * the rapid destruction of the kioctx. */ static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx, - struct completion *requests_done) + struct ctx_rq_wait *wait) { struct kioctx_table *table; - if (atomic_xchg(&ctx->dead, 1)) + spin_lock(&mm->ioctx_lock); + if (atomic_xchg(&ctx->dead, 1)) { + spin_unlock(&mm->ioctx_lock); return -EINVAL; + } - - spin_lock(&mm->ioctx_lock); table = rcu_dereference_raw(mm->ioctx_table); WARN_ON(ctx != table->table[ctx->id]); table->table[ctx->id] = NULL; @@ -773,27 +818,11 @@ static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx, if (ctx->mmap_size) vm_munmap(ctx->mmap_base, ctx->mmap_size); - ctx->requests_done = requests_done; + ctx->rq_wait = wait; percpu_ref_kill(&ctx->users); return 0; } -/* wait_on_sync_kiocb: - * Waits on the given sync kiocb to complete. - */ -ssize_t wait_on_sync_kiocb(struct kiocb *req) -{ - while (!req->ki_ctx) { - set_current_state(TASK_UNINTERRUPTIBLE); - if (req->ki_ctx) - break; - io_schedule(); - } - __set_current_state(TASK_RUNNING); - return req->ki_user_data; -} -EXPORT_SYMBOL(wait_on_sync_kiocb); - /* * exit_aio: called when the last user of mm goes away. At this point, there is * no way for any new requests to be submited or any of the io_* syscalls to be @@ -805,18 +834,24 @@ EXPORT_SYMBOL(wait_on_sync_kiocb); void exit_aio(struct mm_struct *mm) { struct kioctx_table *table = rcu_dereference_raw(mm->ioctx_table); - int i; + struct ctx_rq_wait wait; + int i, skipped; if (!table) return; + atomic_set(&wait.count, table->nr); + init_completion(&wait.comp); + + skipped = 0; for (i = 0; i < table->nr; ++i) { struct kioctx *ctx = table->table[i]; - struct completion requests_done = - COMPLETION_INITIALIZER_ONSTACK(requests_done); - if (!ctx) + if (!ctx) { + skipped++; continue; + } + /* * We don't need to bother with munmap() here - exit_mmap(mm) * is coming and it'll unmap everything. And we simply can't, @@ -825,10 +860,12 @@ void exit_aio(struct mm_struct *mm) * that it needs to unmap the area, just set it to 0. */ ctx->mmap_size = 0; - kill_ioctx(mm, ctx, &requests_done); + kill_ioctx(mm, ctx, &wait); + } + if (!atomic_sub_and_test(skipped, &wait.count)) { /* Wait until all IO for the context are done. */ - wait_for_completion(&requests_done); + wait_for_completion(&wait.comp); } RCU_INIT_POINTER(mm->ioctx_table, NULL); @@ -948,9 +985,9 @@ static void user_refill_reqs_available(struct kioctx *ctx) * Allocate a slot for an aio request. * Returns NULL if no requests are free. */ -static inline struct kiocb *aio_get_req(struct kioctx *ctx) +static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx) { - struct kiocb *req; + struct aio_kiocb *req; if (!get_reqs_available(ctx)) { user_refill_reqs_available(ctx); @@ -971,10 +1008,10 @@ out_put: return NULL; } -static void kiocb_free(struct kiocb *req) +static void kiocb_free(struct aio_kiocb *req) { - if (req->ki_filp) - fput(req->ki_filp); + if (req->common.ki_filp) + fput(req->common.ki_filp); if (req->ki_eventfd != NULL) eventfd_ctx_put(req->ki_eventfd); kmem_cache_free(kiocb_cachep, req); @@ -1010,8 +1047,9 @@ out: /* aio_complete * Called when the io request on the given iocb is complete. */ -void aio_complete(struct kiocb *iocb, long res, long res2) +static void aio_complete(struct kiocb *kiocb, long res, long res2) { + struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, common); struct kioctx *ctx = iocb->ki_ctx; struct aio_ring *ring; struct io_event *ev_page, *event; @@ -1025,13 +1063,7 @@ void aio_complete(struct kiocb *iocb, long res, long res2) * ref, no other paths have a way to get another ref * - the sync task helpfully left a reference to itself in the iocb */ - if (is_sync_kiocb(iocb)) { - iocb->ki_user_data = res; - smp_wmb(); - iocb->ki_ctx = ERR_PTR(-EXDEV); - wake_up_process(iocb->ki_obj.tsk); - return; - } + BUG_ON(is_sync_kiocb(kiocb)); if (iocb->ki_list.next) { unsigned long flags; @@ -1057,7 +1089,7 @@ void aio_complete(struct kiocb *iocb, long res, long res2) ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); event = ev_page + pos % AIO_EVENTS_PER_PAGE; - event->obj = (u64)(unsigned long)iocb->ki_obj.user; + event->obj = (u64)(unsigned long)iocb->ki_user_iocb; event->data = iocb->ki_user_data; event->res = res; event->res2 = res2; @@ -1066,7 +1098,7 @@ void aio_complete(struct kiocb *iocb, long res, long res2) flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n", - ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data, + ctx, tail, iocb, iocb->ki_user_iocb, iocb->ki_user_data, res, res2); /* after flagging the request as done, we @@ -1113,7 +1145,6 @@ void aio_complete(struct kiocb *iocb, long res, long res2) percpu_ref_put(&ctx->reqs); } -EXPORT_SYMBOL(aio_complete); /* aio_read_events_ring * Pull an event off of the ioctx's event ring. Returns the number of @@ -1313,15 +1344,17 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) { struct kioctx *ioctx = lookup_ioctx(ctx); if (likely(NULL != ioctx)) { - struct completion requests_done = - COMPLETION_INITIALIZER_ONSTACK(requests_done); + struct ctx_rq_wait wait; int ret; + init_completion(&wait.comp); + atomic_set(&wait.count, 1); + /* Pass requests_done to kill_ioctx() where it can be set * in a thread-safe way. If we try to set it here then we have * a race condition if two io_destroy() called simultaneously. */ - ret = kill_ioctx(current->mm, ioctx, &requests_done); + ret = kill_ioctx(current->mm, ioctx, &wait); percpu_ref_put(&ioctx->users); /* Wait until all IO for the context are done. Otherwise kernel @@ -1329,7 +1362,7 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) * is destroyed. */ if (!ret) - wait_for_completion(&requests_done); + wait_for_completion(&wait.comp); return ret; } @@ -1337,50 +1370,21 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) return -EINVAL; } -typedef ssize_t (aio_rw_op)(struct kiocb *, const struct iovec *, - unsigned long, loff_t); typedef ssize_t (rw_iter_op)(struct kiocb *, struct iov_iter *); -static ssize_t aio_setup_vectored_rw(struct kiocb *kiocb, - int rw, char __user *buf, - unsigned long *nr_segs, - struct iovec **iovec, - bool compat) +static int aio_setup_vectored_rw(int rw, char __user *buf, size_t len, + struct iovec **iovec, + bool compat, + struct iov_iter *iter) { - ssize_t ret; - - *nr_segs = kiocb->ki_nbytes; - #ifdef CONFIG_COMPAT if (compat) - ret = compat_rw_copy_check_uvector(rw, + return compat_import_iovec(rw, (struct compat_iovec __user *)buf, - *nr_segs, UIO_FASTIOV, *iovec, iovec); - else + len, UIO_FASTIOV, iovec, iter); #endif - ret = rw_copy_check_uvector(rw, - (struct iovec __user *)buf, - *nr_segs, UIO_FASTIOV, *iovec, iovec); - if (ret < 0) - return ret; - - /* ki_nbytes now reflect bytes instead of segs */ - kiocb->ki_nbytes = ret; - return 0; -} - -static ssize_t aio_setup_single_vector(struct kiocb *kiocb, - int rw, char __user *buf, - unsigned long *nr_segs, - struct iovec *iovec) -{ - if (unlikely(!access_ok(!rw, buf, kiocb->ki_nbytes))) - return -EFAULT; - - iovec->iov_base = buf; - iovec->iov_len = kiocb->ki_nbytes; - *nr_segs = 1; - return 0; + return import_iovec(rw, (struct iovec __user *)buf, + len, UIO_FASTIOV, iovec, iter); } /* @@ -1388,14 +1392,12 @@ static ssize_t aio_setup_single_vector(struct kiocb *kiocb, * Performs the initial checks and io submission. */ static ssize_t aio_run_iocb(struct kiocb *req, unsigned opcode, - char __user *buf, bool compat) + char __user *buf, size_t len, bool compat) { struct file *file = req->ki_filp; ssize_t ret; - unsigned long nr_segs; int rw; fmode_t mode; - aio_rw_op *rw_op; rw_iter_op *iter_op; struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; struct iov_iter iter; @@ -1405,7 +1407,6 @@ static ssize_t aio_run_iocb(struct kiocb *req, unsigned opcode, case IOCB_CMD_PREADV: mode = FMODE_READ; rw = READ; - rw_op = file->f_op->aio_read; iter_op = file->f_op->read_iter; goto rw_common; @@ -1413,51 +1414,40 @@ static ssize_t aio_run_iocb(struct kiocb *req, unsigned opcode, case IOCB_CMD_PWRITEV: mode = FMODE_WRITE; rw = WRITE; - rw_op = file->f_op->aio_write; iter_op = file->f_op->write_iter; goto rw_common; rw_common: if (unlikely(!(file->f_mode & mode))) return -EBADF; - if (!rw_op && !iter_op) + if (!iter_op) return -EINVAL; - ret = (opcode == IOCB_CMD_PREADV || - opcode == IOCB_CMD_PWRITEV) - ? aio_setup_vectored_rw(req, rw, buf, &nr_segs, - &iovec, compat) - : aio_setup_single_vector(req, rw, buf, &nr_segs, - iovec); + if (opcode == IOCB_CMD_PREADV || opcode == IOCB_CMD_PWRITEV) + ret = aio_setup_vectored_rw(rw, buf, len, + &iovec, compat, &iter); + else { + ret = import_single_range(rw, buf, len, iovec, &iter); + iovec = NULL; + } if (!ret) - ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes); + ret = rw_verify_area(rw, file, &req->ki_pos, + iov_iter_count(&iter)); if (ret < 0) { - if (iovec != inline_vecs) - kfree(iovec); + kfree(iovec); return ret; } - req->ki_nbytes = ret; - - /* XXX: move/kill - rw_verify_area()? */ - /* This matches the pread()/pwrite() logic */ - if (req->ki_pos < 0) { - ret = -EINVAL; - break; - } + len = ret; if (rw == WRITE) file_start_write(file); - if (iter_op) { - iov_iter_init(&iter, rw, iovec, nr_segs, req->ki_nbytes); - ret = iter_op(req, &iter); - } else { - ret = rw_op(req, iovec, nr_segs, req->ki_pos); - } + ret = iter_op(req, &iter); if (rw == WRITE) file_end_write(file); + kfree(iovec); break; case IOCB_CMD_FDSYNC: @@ -1479,9 +1469,6 @@ rw_common: return -EINVAL; } - if (iovec != inline_vecs) - kfree(iovec); - if (ret != -EIOCBQUEUED) { /* * There's no easy way to restart the syscall since other AIO's @@ -1500,7 +1487,7 @@ rw_common: static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, struct iocb *iocb, bool compat) { - struct kiocb *req; + struct aio_kiocb *req; ssize_t ret; /* enforce forwards compatibility on users */ @@ -1523,11 +1510,14 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, if (unlikely(!req)) return -EAGAIN; - req->ki_filp = fget(iocb->aio_fildes); - if (unlikely(!req->ki_filp)) { + req->common.ki_filp = fget(iocb->aio_fildes); + if (unlikely(!req->common.ki_filp)) { ret = -EBADF; goto out_put_req; } + req->common.ki_pos = iocb->aio_offset; + req->common.ki_complete = aio_complete; + req->common.ki_flags = iocb_flags(req->common.ki_filp); if (iocb->aio_flags & IOCB_FLAG_RESFD) { /* @@ -1542,6 +1532,8 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, req->ki_eventfd = NULL; goto out_put_req; } + + req->common.ki_flags |= IOCB_EVENTFD; } ret = put_user(KIOCB_KEY, &user_iocb->aio_key); @@ -1550,13 +1542,12 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, goto out_put_req; } - req->ki_obj.user = user_iocb; + req->ki_user_iocb = user_iocb; req->ki_user_data = iocb->aio_data; - req->ki_pos = iocb->aio_offset; - req->ki_nbytes = iocb->aio_nbytes; - ret = aio_run_iocb(req, iocb->aio_lio_opcode, + ret = aio_run_iocb(&req->common, iocb->aio_lio_opcode, (char __user *)(unsigned long)iocb->aio_buf, + iocb->aio_nbytes, compat); if (ret) goto out_put_req; @@ -1643,10 +1634,10 @@ SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr, /* lookup_kiocb * Finds a given iocb for cancellation. */ -static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb, - u32 key) +static struct aio_kiocb * +lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb, u32 key) { - struct list_head *pos; + struct aio_kiocb *kiocb; assert_spin_locked(&ctx->ctx_lock); @@ -1654,9 +1645,8 @@ static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb, return NULL; /* TODO: use a hash or array, this sucks. */ - list_for_each(pos, &ctx->active_reqs) { - struct kiocb *kiocb = list_kiocb(pos); - if (kiocb->ki_obj.user == iocb) + list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) { + if (kiocb->ki_user_iocb == iocb) return kiocb; } return NULL; @@ -1676,7 +1666,7 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, struct io_event __user *, result) { struct kioctx *ctx; - struct kiocb *kiocb; + struct aio_kiocb *kiocb; u32 key; int ret; diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index 8e98cf9..5b700ef 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h @@ -213,7 +213,7 @@ void autofs4_clean_ino(struct autofs_info *); static inline int autofs_prepare_pipe(struct file *pipe) { - if (!pipe->f_op->write) + if (!(pipe->f_mode & FMODE_CAN_WRITE)) return -EINVAL; if (!S_ISFIFO(file_inode(pipe)->i_mode)) return -EINVAL; @@ -235,12 +235,12 @@ static inline u32 autofs4_get_dev(struct autofs_sb_info *sbi) static inline u64 autofs4_get_ino(struct autofs_sb_info *sbi) { - return sbi->sb->s_root->d_inode->i_ino; + return d_inode(sbi->sb->s_root)->i_ino; } static inline int simple_positive(struct dentry *dentry) { - return dentry->d_inode && !d_unhashed(dentry); + return d_really_is_positive(dentry) && !d_unhashed(dentry); } static inline void __autofs4_add_expiring(struct dentry *dentry) diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c index 11dd118..1cebc3c 100644 --- a/fs/autofs4/expire.c +++ b/fs/autofs4/expire.c @@ -374,7 +374,7 @@ static struct dentry *should_expire(struct dentry *dentry, return NULL; } - if (dentry->d_inode && d_is_symlink(dentry)) { + if (d_really_is_positive(dentry) && d_is_symlink(dentry)) { DPRINTK("checking symlink %p %pd", dentry, dentry); /* * A symlink can't be "busy" in the usual sense so diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c index 1c55388..a3ae0b2 100644 --- a/fs/autofs4/inode.c +++ b/fs/autofs4/inode.c @@ -71,7 +71,7 @@ void autofs4_kill_sb(struct super_block *sb) static int autofs4_show_options(struct seq_file *m, struct dentry *root) { struct autofs_sb_info *sbi = autofs4_sbi(root->d_sb); - struct inode *root_inode = root->d_sb->s_root->d_inode; + struct inode *root_inode = d_inode(root->d_sb->s_root); if (!sbi) return 0; @@ -352,8 +352,8 @@ struct inode *autofs4_get_inode(struct super_block *sb, umode_t mode) inode->i_mode = mode; if (sb->s_root) { - inode->i_uid = sb->s_root->d_inode->i_uid; - inode->i_gid = sb->s_root->d_inode->i_gid; + inode->i_uid = d_inode(sb->s_root)->i_uid; + inode->i_gid = d_inode(sb->s_root)->i_gid; } inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; inode->i_ino = get_next_ino(); diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index 7e44fdd..c6d7d3d 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c @@ -240,7 +240,7 @@ static struct dentry *autofs4_lookup_expiring(struct dentry *dentry, spin_lock(&expiring->d_lock); /* We've already been dentry_iput or unlinked */ - if (!expiring->d_inode) + if (d_really_is_negative(expiring)) goto next; qstr = &expiring->d_name; @@ -371,7 +371,7 @@ static struct vfsmount *autofs4_d_automount(struct path *path) * having d_mountpoint() true, so there's no need to call back * to the daemon. */ - if (dentry->d_inode && d_is_symlink(dentry)) { + if (d_really_is_positive(dentry) && d_is_symlink(dentry)) { spin_unlock(&sbi->fs_lock); goto done; } @@ -459,7 +459,7 @@ static int autofs4_d_manage(struct dentry *dentry, bool rcu_walk) return 0; if (d_mountpoint(dentry)) return 0; - inode = ACCESS_ONCE(dentry->d_inode); + inode = d_inode_rcu(dentry); if (inode && S_ISLNK(inode->i_mode)) return -EISDIR; if (list_empty(&dentry->d_subdirs)) @@ -485,7 +485,7 @@ static int autofs4_d_manage(struct dentry *dentry, bool rcu_walk) * an incorrect ELOOP error return. */ if ((!d_mountpoint(dentry) && !simple_empty(dentry)) || - (dentry->d_inode && d_is_symlink(dentry))) + (d_really_is_positive(dentry) && d_is_symlink(dentry))) status = -EISDIR; } spin_unlock(&sbi->fs_lock); @@ -625,8 +625,8 @@ static int autofs4_dir_unlink(struct inode *dir, struct dentry *dentry) } dput(ino->dentry); - dentry->d_inode->i_size = 0; - clear_nlink(dentry->d_inode); + d_inode(dentry)->i_size = 0; + clear_nlink(d_inode(dentry)); dir->i_mtime = CURRENT_TIME; @@ -719,8 +719,8 @@ static int autofs4_dir_rmdir(struct inode *dir, struct dentry *dentry) atomic_dec(&p_ino->count); } dput(ino->dentry); - dentry->d_inode->i_size = 0; - clear_nlink(dentry->d_inode); + d_inode(dentry)->i_size = 0; + clear_nlink(d_inode(dentry)); if (dir->i_nlink) drop_nlink(dir); @@ -839,7 +839,7 @@ static inline int autofs4_ask_umount(struct vfsmount *mnt, int __user *p) */ int is_autofs4_dentry(struct dentry *dentry) { - return dentry && dentry->d_inode && + return dentry && d_really_is_positive(dentry) && dentry->d_op == &autofs4_dentry_operations && dentry->d_fsdata != NULL; } diff --git a/fs/autofs4/symlink.c b/fs/autofs4/symlink.c index 1e8ea19..de58cc7 100644 --- a/fs/autofs4/symlink.c +++ b/fs/autofs4/symlink.c @@ -18,7 +18,7 @@ static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd) struct autofs_info *ino = autofs4_dentry_ino(dentry); if (ino && !autofs4_oz_mode(sbi)) ino->last_used = jiffies; - nd_set_link(nd, dentry->d_inode->i_private); + nd_set_link(nd, d_inode(dentry)->i_private); return NULL; } diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c index 116fd38..35b755e 100644 --- a/fs/autofs4/waitq.c +++ b/fs/autofs4/waitq.c @@ -70,7 +70,7 @@ static int autofs4_write(struct autofs_sb_info *sbi, mutex_lock(&sbi->pipe_mutex); while (bytes && - (wr = file->f_op->write(file,data,bytes,&file->f_pos)) > 0) { + (wr = __vfs_write(file,data,bytes,&file->f_pos)) > 0) { data += wr; bytes -= wr; } @@ -322,7 +322,7 @@ static int validate_request(struct autofs_wait_queue **wait, * continue on and create a new request. */ if (!IS_ROOT(dentry)) { - if (dentry->d_inode && d_unhashed(dentry)) { + if (d_really_is_positive(dentry) && d_unhashed(dentry)) { struct dentry *parent = dentry->d_parent; new = d_lookup(parent, &dentry->d_name); if (new) @@ -364,7 +364,7 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, if (pid == 0 || tgid == 0) return -ENOENT; - if (!dentry->d_inode) { + if (d_really_is_negative(dentry)) { /* * A wait for a negative dentry is invalid for certain * cases. A direct or offset mount "always" has its mount diff --git a/fs/befs/befs.h b/fs/befs/befs.h index 3a7813a..1fead8d 100644 --- a/fs/befs/befs.h +++ b/fs/befs/befs.h @@ -19,16 +19,16 @@ typedef u64 befs_blocknr_t; * BeFS in memory structures */ -typedef struct befs_mount_options { +struct befs_mount_options { kgid_t gid; kuid_t uid; int use_gid; int use_uid; int debug; char *iocharset; -} befs_mount_options; +}; -typedef struct befs_sb_info { +struct befs_sb_info { u32 magic1; u32 block_size; u32 block_shift; @@ -52,12 +52,11 @@ typedef struct befs_sb_info { befs_inode_addr indices; u32 magic3; - befs_mount_options mount_opts; + struct befs_mount_options mount_opts; struct nls_table *nls; +}; -} befs_sb_info; - -typedef struct befs_inode_info { +struct befs_inode_info { u32 i_flags; u32 i_type; @@ -71,8 +70,7 @@ typedef struct befs_inode_info { } i_data; struct inode vfs_inode; - -} befs_inode_info; +}; enum befs_err { BEFS_OK, @@ -105,13 +103,13 @@ void befs_dump_index_node(const struct super_block *sb, befs_btree_nodehead *); /* Gets a pointer to the private portion of the super_block * structure from the public part */ -static inline befs_sb_info * +static inline struct befs_sb_info * BEFS_SB(const struct super_block *super) { - return (befs_sb_info *) super->s_fs_info; + return (struct befs_sb_info *) super->s_fs_info; } -static inline befs_inode_info * +static inline struct befs_inode_info * BEFS_I(const struct inode *inode) { return list_entry(inode, struct befs_inode_info, vfs_inode); diff --git a/fs/befs/datastream.c b/fs/befs/datastream.c index 1e8e0b8..ebd5071 100644 --- a/fs/befs/datastream.c +++ b/fs/befs/datastream.c @@ -168,7 +168,7 @@ befs_count_blocks(struct super_block * sb, befs_data_stream * ds) befs_blocknr_t blocks; befs_blocknr_t datablocks; /* File data blocks */ befs_blocknr_t metablocks; /* FS metadata blocks */ - befs_sb_info *befs_sb = BEFS_SB(sb); + struct befs_sb_info *befs_sb = BEFS_SB(sb); befs_debug(sb, "---> %s", __func__); @@ -428,7 +428,7 @@ befs_find_brun_dblindirect(struct super_block *sb, struct buffer_head *indir_block; befs_block_run indir_run; befs_disk_inode_addr *iaddr_array = NULL; - befs_sb_info *befs_sb = BEFS_SB(sb); + struct befs_sb_info *befs_sb = BEFS_SB(sb); befs_blocknr_t indir_start_blk = data->max_indirect_range >> befs_sb->block_shift; diff --git a/fs/befs/io.c b/fs/befs/io.c index 0408a3d..7a5b4ec 100644 --- a/fs/befs/io.c +++ b/fs/befs/io.c @@ -28,7 +28,7 @@ befs_bread_iaddr(struct super_block *sb, befs_inode_addr iaddr) { struct buffer_head *bh = NULL; befs_blocknr_t block = 0; - befs_sb_info *befs_sb = BEFS_SB(sb); + struct befs_sb_info *befs_sb = BEFS_SB(sb); befs_debug(sb, "---> Enter %s " "[%u, %hu, %hu]", __func__, iaddr.allocation_group, diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index e089f19..7943533 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c @@ -51,7 +51,7 @@ static int befs_nls2utf(struct super_block *sb, const char *in, int in_len, static void befs_put_super(struct super_block *); static int befs_remount(struct super_block *, int *, char *); static int befs_statfs(struct dentry *, struct kstatfs *); -static int parse_options(char *, befs_mount_options *); +static int parse_options(char *, struct befs_mount_options *); static const struct super_operations befs_sops = { .alloc_inode = befs_alloc_inode, /* allocate a new inode */ @@ -304,9 +304,8 @@ static struct inode *befs_iget(struct super_block *sb, unsigned long ino) { struct buffer_head *bh = NULL; befs_inode *raw_inode = NULL; - - befs_sb_info *befs_sb = BEFS_SB(sb); - befs_inode_info *befs_ino = NULL; + struct befs_sb_info *befs_sb = BEFS_SB(sb); + struct befs_inode_info *befs_ino = NULL; struct inode *inode; long ret = -EIO; @@ -472,7 +471,7 @@ static void * befs_follow_link(struct dentry *dentry, struct nameidata *nd) { struct super_block *sb = dentry->d_sb; - befs_inode_info *befs_ino = BEFS_I(dentry->d_inode); + struct befs_inode_info *befs_ino = BEFS_I(d_inode(dentry)); befs_data_stream *data = &befs_ino->i_data.ds; befs_off_t len = data->size; char *link; @@ -502,7 +501,8 @@ befs_follow_link(struct dentry *dentry, struct nameidata *nd) static void * befs_fast_follow_link(struct dentry *dentry, struct nameidata *nd) { - befs_inode_info *befs_ino = BEFS_I(dentry->d_inode); + struct befs_inode_info *befs_ino = BEFS_I(d_inode(dentry)); + nd_set_link(nd, befs_ino->i_data.symlink); return NULL; } @@ -669,7 +669,7 @@ static const match_table_t befs_tokens = { }; static int -parse_options(char *options, befs_mount_options * opts) +parse_options(char *options, struct befs_mount_options *opts) { char *p; substring_t args[MAX_OPT_ARGS]; @@ -769,7 +769,7 @@ static int befs_fill_super(struct super_block *sb, void *data, int silent) { struct buffer_head *bh; - befs_sb_info *befs_sb; + struct befs_sb_info *befs_sb; befs_super_block *disk_sb; struct inode *root; long ret = -EINVAL; diff --git a/fs/befs/super.c b/fs/befs/super.c index ca40f82..aeafc4d 100644 --- a/fs/befs/super.c +++ b/fs/befs/super.c @@ -24,7 +24,7 @@ int befs_load_sb(struct super_block *sb, befs_super_block * disk_sb) { - befs_sb_info *befs_sb = BEFS_SB(sb); + struct befs_sb_info *befs_sb = BEFS_SB(sb); /* Check the byte order of the filesystem */ if (disk_sb->fs_byte_order == BEFS_BYTEORDER_NATIVE_LE) @@ -59,7 +59,7 @@ befs_load_sb(struct super_block *sb, befs_super_block * disk_sb) int befs_check_sb(struct super_block *sb) { - befs_sb_info *befs_sb = BEFS_SB(sb); + struct befs_sb_info *befs_sb = BEFS_SB(sb); /* Check magic headers of super block */ if ((befs_sb->magic1 != BEFS_SUPER_MAGIC1) diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c index 08063ae..3ec6113 100644 --- a/fs/bfs/dir.c +++ b/fs/bfs/dir.c @@ -86,7 +86,7 @@ static int bfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, inode = new_inode(s); if (!inode) - return -ENOSPC; + return -ENOMEM; mutex_lock(&info->bfs_lock); ino = find_first_zero_bit(info->si_imap, info->si_lasti + 1); if (ino > info->si_lasti) { @@ -153,7 +153,7 @@ static struct dentry *bfs_lookup(struct inode *dir, struct dentry *dentry, static int bfs_link(struct dentry *old, struct inode *dir, struct dentry *new) { - struct inode *inode = old->d_inode; + struct inode *inode = d_inode(old); struct bfs_sb_info *info = BFS_SB(inode->i_sb); int err; @@ -176,7 +176,7 @@ static int bfs_link(struct dentry *old, struct inode *dir, static int bfs_unlink(struct inode *dir, struct dentry *dentry) { int error = -ENOENT; - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct buffer_head *bh; struct bfs_dirent *de; struct bfs_sb_info *info = BFS_SB(inode->i_sb); @@ -216,7 +216,7 @@ static int bfs_rename(struct inode *old_dir, struct dentry *old_dentry, int error = -ENOENT; old_bh = new_bh = NULL; - old_inode = old_dentry->d_inode; + old_inode = d_inode(old_dentry); if (S_ISDIR(old_inode->i_mode)) return -EINVAL; @@ -231,7 +231,7 @@ static int bfs_rename(struct inode *old_dir, struct dentry *old_dentry, goto end_rename; error = -EPERM; - new_inode = new_dentry->d_inode; + new_inode = d_inode(new_dentry); new_bh = bfs_find_entry(new_dir, new_dentry->d_name.name, new_dentry->d_name.len, &new_de); @@ -293,7 +293,7 @@ static int bfs_add_entry(struct inode *dir, const unsigned char *name, for (block = sblock; block <= eblock; block++) { bh = sb_bread(dir->i_sb, block); if (!bh) - return -ENOSPC; + return -EIO; for (off = 0; off < BFS_BSIZE; off += BFS_DIRENT_SIZE) { de = (struct bfs_dirent *)(bh->b_data + off); if (!de->ino) { diff --git a/fs/bfs/file.c b/fs/bfs/file.c index e7f88ac..97f1b51 100644 --- a/fs/bfs/file.c +++ b/fs/bfs/file.c @@ -23,9 +23,7 @@ const struct file_operations bfs_file_operations = { .llseek = generic_file_llseek, - .read = new_sync_read, .read_iter = generic_file_read_iter, - .write = new_sync_write, .write_iter = generic_file_write_iter, .mmap = generic_file_mmap, .splice_read = generic_file_splice_read, diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c index 90bc079..fdcb4d6 100644 --- a/fs/bfs/inode.c +++ b/fs/bfs/inode.c @@ -15,6 +15,7 @@ #include <linux/buffer_head.h> #include <linux/vfs.h> #include <linux/writeback.h> +#include <linux/uio.h> #include <asm/uaccess.h> #include "bfs.h" diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 995986b..241ef68 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -31,6 +31,7 @@ #include <linux/security.h> #include <linux/random.h> #include <linux/elf.h> +#include <linux/elf-randomize.h> #include <linux/utsname.h> #include <linux/coredump.h> #include <linux/sched.h> @@ -862,6 +863,7 @@ static int load_elf_binary(struct linux_binprm *bprm) i < loc->elf_ex.e_phnum; i++, elf_ppnt++) { int elf_prot = 0, elf_flags; unsigned long k, vaddr; + unsigned long total_size = 0; if (elf_ppnt->p_type != PT_LOAD) continue; @@ -909,25 +911,20 @@ static int load_elf_binary(struct linux_binprm *bprm) * default mmap base, as well as whatever program they * might try to exec. This is because the brk will * follow the loader, and is not movable. */ -#ifdef CONFIG_ARCH_BINFMT_ELF_RANDOMIZE_PIE - /* Memory randomization might have been switched off - * in runtime via sysctl or explicit setting of - * personality flags. - * If that is the case, retain the original non-zero - * load_bias value in order to establish proper - * non-randomized mappings. - */ + load_bias = ELF_ET_DYN_BASE - vaddr; if (current->flags & PF_RANDOMIZE) - load_bias = 0; - else - load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr); -#else - load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr); -#endif + load_bias += arch_mmap_rnd(); + load_bias = ELF_PAGESTART(load_bias); + total_size = total_mapping_size(elf_phdata, + loc->elf_ex.e_phnum); + if (!total_size) { + error = -EINVAL; + goto out_free_dentry; + } } error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, - elf_prot, elf_flags, 0); + elf_prot, elf_flags, total_size); if (BAD_ADDR(error)) { retval = IS_ERR((void *)error) ? PTR_ERR((void*)error) : -EINVAL; @@ -1053,15 +1050,13 @@ static int load_elf_binary(struct linux_binprm *bprm) current->mm->end_data = end_data; current->mm->start_stack = bprm->p; -#ifdef arch_randomize_brk if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) { current->mm->brk = current->mm->start_brk = arch_randomize_brk(current->mm); -#ifdef CONFIG_COMPAT_BRK +#ifdef compat_brk_randomized current->brk_randomized = 1; #endif } -#endif if (current->personality & MMAP_PAGE_ZERO) { /* Why this, you ask??? Well SVr4 maps page 0 as read-only, diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c index 97aff28..78f005f 100644 --- a/fs/binfmt_misc.c +++ b/fs/binfmt_misc.c @@ -9,6 +9,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> @@ -521,9 +522,8 @@ static int parse_command(const char __user *buffer, size_t count) static void entry_status(Node *e, char *page) { - char *dp; - char *status = "disabled"; - const char *flags = "flags: "; + char *dp = page; + const char *status = "disabled"; if (test_bit(Enabled, &e->flags)) status = "enabled"; @@ -533,12 +533,10 @@ static void entry_status(Node *e, char *page) return; } - sprintf(page, "%s\ninterpreter %s\n", status, e->interpreter); - dp = page + strlen(page); + dp += sprintf(dp, "%s\ninterpreter %s\n", status, e->interpreter); /* print the special flags */ - sprintf(dp, "%s", flags); - dp += strlen(flags); + dp += sprintf(dp, "flags: "); if (e->flags & MISC_FMT_PRESERVE_ARGV0) *dp++ = 'P'; if (e->flags & MISC_FMT_OPEN_BINARY) @@ -550,21 +548,11 @@ static void entry_status(Node *e, char *page) if (!test_bit(Magic, &e->flags)) { sprintf(dp, "extension .%s\n", e->magic); } else { - int i; - - sprintf(dp, "offset %i\nmagic ", e->offset); - dp = page + strlen(page); - for (i = 0; i < e->size; i++) { - sprintf(dp, "%02x", 0xff & (int) (e->magic[i])); - dp += 2; - } + dp += sprintf(dp, "offset %i\nmagic ", e->offset); + dp = bin2hex(dp, e->magic, e->size); if (e->mask) { - sprintf(dp, "\nmask "); - dp += 6; - for (i = 0; i < e->size; i++) { - sprintf(dp, "%02x", 0xff & (int) (e->mask[i])); - dp += 2; - } + dp += sprintf(dp, "\nmask "); + dp = bin2hex(dp, e->mask, e->size); } *dp++ = '\n'; *dp = '\0'; @@ -603,7 +591,7 @@ static void kill_node(Node *e) write_unlock(&entries_lock); if (dentry) { - drop_nlink(dentry->d_inode); + drop_nlink(d_inode(dentry)); d_drop(dentry); dput(dentry); simple_release_fs(&bm_mnt, &entry_count); @@ -650,11 +638,11 @@ static ssize_t bm_entry_write(struct file *file, const char __user *buffer, case 3: /* Delete this handler. */ root = dget(file->f_path.dentry->d_sb->s_root); - mutex_lock(&root->d_inode->i_mutex); + mutex_lock(&d_inode(root)->i_mutex); kill_node(e); - mutex_unlock(&root->d_inode->i_mutex); + mutex_unlock(&d_inode(root)->i_mutex); dput(root); break; default: @@ -687,14 +675,14 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer, return PTR_ERR(e); root = dget(sb->s_root); - mutex_lock(&root->d_inode->i_mutex); + mutex_lock(&d_inode(root)->i_mutex); dentry = lookup_one_len(e->name, root, strlen(e->name)); err = PTR_ERR(dentry); if (IS_ERR(dentry)) goto out; err = -EEXIST; - if (dentry->d_inode) + if (d_really_is_positive(dentry)) goto out2; inode = bm_get_inode(sb, S_IFREG | 0644); @@ -723,7 +711,7 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer, out2: dput(dentry); out: - mutex_unlock(&root->d_inode->i_mutex); + mutex_unlock(&d_inode(root)->i_mutex); dput(root); if (err) { @@ -766,12 +754,12 @@ static ssize_t bm_status_write(struct file *file, const char __user *buffer, case 3: /* Delete all handlers. */ root = dget(file->f_path.dentry->d_sb->s_root); - mutex_lock(&root->d_inode->i_mutex); + mutex_lock(&d_inode(root)->i_mutex); while (!list_empty(&entries)) kill_node(list_entry(entries.next, Node, list)); - mutex_unlock(&root->d_inode->i_mutex); + mutex_unlock(&d_inode(root)->i_mutex); dput(root); break; default: diff --git a/fs/block_dev.c b/fs/block_dev.c index 975266b..c7e4163 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -27,7 +27,6 @@ #include <linux/namei.h> #include <linux/log2.h> #include <linux/cleancache.h> -#include <linux/aio.h> #include <asm/uaccess.h> #include "internal.h" @@ -147,15 +146,14 @@ blkdev_get_block(struct inode *inode, sector_t iblock, } static ssize_t -blkdev_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, - loff_t offset) +blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; - return __blockdev_direct_IO(rw, iocb, inode, I_BDEV(inode), iter, - offset, blkdev_get_block, - NULL, NULL, 0); + return __blockdev_direct_IO(iocb, inode, I_BDEV(inode), iter, offset, + blkdev_get_block, NULL, NULL, + DIO_SKIP_DIO_COUNT); } int __sync_blockdev(struct block_device *bdev, int wait) @@ -1598,9 +1596,22 @@ static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg) ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; + struct inode *bd_inode = file->f_mapping->host; + loff_t size = i_size_read(bd_inode); struct blk_plug plug; ssize_t ret; + if (bdev_read_only(I_BDEV(bd_inode))) + return -EPERM; + + if (!iov_iter_count(from)) + return 0; + + if (iocb->ki_pos >= size) + return -ENOSPC; + + iov_iter_truncate(from, size - iocb->ki_pos); + blk_start_plug(&plug); ret = __generic_file_write_iter(iocb, from); if (ret > 0) { @@ -1660,8 +1671,6 @@ const struct file_operations def_blk_fops = { .open = blkdev_open, .release = blkdev_close, .llseek = block_llseek, - .read = new_sync_read, - .write = new_sync_write, .read_iter = blkdev_read_iter, .write_iter = blkdev_write_iter, .mmap = generic_file_mmap, @@ -1708,7 +1717,7 @@ struct block_device *lookup_bdev(const char *pathname) if (error) return ERR_PTR(error); - inode = path.dentry->d_inode; + inode = d_backing_inode(path.dentry); error = -ENOTBLK; if (!S_ISBLK(inode->i_mode)) goto fail; diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index 37d1645..8d05220 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c @@ -152,7 +152,7 @@ static struct dentry *btrfs_fh_to_dentry(struct super_block *sb, struct fid *fh, static struct dentry *btrfs_get_parent(struct dentry *child) { - struct inode *dir = child->d_inode; + struct inode *dir = d_inode(child); struct btrfs_root *root = BTRFS_I(dir)->root; struct btrfs_path *path; struct extent_buffer *leaf; @@ -220,8 +220,8 @@ fail: static int btrfs_get_name(struct dentry *parent, char *name, struct dentry *child) { - struct inode *inode = child->d_inode; - struct inode *dir = parent->d_inode; + struct inode *inode = d_inode(child); + struct inode *dir = d_inode(parent); struct btrfs_path *path; struct btrfs_root *root = BTRFS_I(dir)->root; struct btrfs_inode_ref *iref; diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 23b6e03..b072e17 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -24,7 +24,6 @@ #include <linux/string.h> #include <linux/backing-dev.h> #include <linux/mpage.h> -#include <linux/aio.h> #include <linux/falloc.h> #include <linux/swap.h> #include <linux/writeback.h> @@ -32,6 +31,7 @@ #include <linux/compat.h> #include <linux/slab.h> #include <linux/btrfs.h> +#include <linux/uio.h> #include "ctree.h" #include "disk-io.h" #include "transaction.h" @@ -1735,27 +1735,19 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb, u64 start_pos; u64 end_pos; ssize_t num_written = 0; - ssize_t err = 0; - size_t count = iov_iter_count(from); bool sync = (file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host); - loff_t pos = iocb->ki_pos; + ssize_t err; + loff_t pos; + size_t count; mutex_lock(&inode->i_mutex); - - current->backing_dev_info = inode_to_bdi(inode); - err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); - if (err) { + err = generic_write_checks(iocb, from); + if (err <= 0) { mutex_unlock(&inode->i_mutex); - goto out; - } - - if (count == 0) { - mutex_unlock(&inode->i_mutex); - goto out; + return err; } - iov_iter_truncate(from, count); - + current->backing_dev_info = inode_to_bdi(inode); err = file_remove_suid(file); if (err) { mutex_unlock(&inode->i_mutex); @@ -1782,6 +1774,8 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb, */ update_time_for_write(inode); + pos = iocb->ki_pos; + count = iov_iter_count(from); start_pos = round_down(pos, root->sectorsize); if (start_pos > i_size_read(inode)) { /* Expand hole size to cover write data, preventing empty gap */ @@ -1796,7 +1790,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb, if (sync) atomic_inc(&BTRFS_I(inode)->sync_writers); - if (file->f_flags & O_DIRECT) { + if (iocb->ki_flags & IOCB_DIRECT) { num_written = __btrfs_direct_write(iocb, from, pos); } else { num_written = __btrfs_buffered_write(file, from, pos); @@ -1868,7 +1862,7 @@ static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end) int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) { struct dentry *dentry = file->f_path.dentry; - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_trans_handle *trans; struct btrfs_log_ctx ctx; @@ -2807,8 +2801,6 @@ out: const struct file_operations btrfs_file_operations = { .llseek = btrfs_file_llseek, - .read = new_sync_read, - .write = new_sync_write, .read_iter = generic_file_read_iter, .splice_read = generic_file_splice_read, .write_iter = btrfs_file_write_iter, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 27b59b83..ada4d24 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -32,7 +32,6 @@ #include <linux/writeback.h> #include <linux/statfs.h> #include <linux/compat.h> -#include <linux/aio.h> #include <linux/bit_spinlock.h> #include <linux/xattr.h> #include <linux/posix_acl.h> @@ -4021,16 +4020,16 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry) { struct btrfs_root *root = BTRFS_I(dir)->root; struct btrfs_trans_handle *trans; - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); int ret; trans = __unlink_start_trans(dir); if (IS_ERR(trans)) return PTR_ERR(trans); - btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0); + btrfs_record_unlink_dir(trans, dir, d_inode(dentry), 0); - ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode, + ret = btrfs_unlink_inode(trans, root, dir, d_inode(dentry), dentry->d_name.name, dentry->d_name.len); if (ret) goto out; @@ -4129,7 +4128,7 @@ out: static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); int err = 0; struct btrfs_root *root = BTRFS_I(dir)->root; struct btrfs_trans_handle *trans; @@ -4156,7 +4155,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) goto out; /* now the directory is empty */ - err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode, + err = btrfs_unlink_inode(trans, root, dir, d_inode(dentry), dentry->d_name.name, dentry->d_name.len); if (!err) btrfs_i_size_write(inode, 0); @@ -4916,7 +4915,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr) static int btrfs_setattr(struct dentry *dentry, struct iattr *attr) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct btrfs_root *root = BTRFS_I(inode)->root; int err; @@ -5548,10 +5547,10 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) static int btrfs_dentry_delete(const struct dentry *dentry) { struct btrfs_root *root; - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); if (!inode && !IS_ROOT(dentry)) - inode = dentry->d_parent->d_inode; + inode = d_inode(dentry->d_parent); if (inode) { root = BTRFS_I(inode)->root; @@ -6358,7 +6357,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, { struct btrfs_trans_handle *trans; struct btrfs_root *root = BTRFS_I(dir)->root; - struct inode *inode = old_dentry->d_inode; + struct inode *inode = d_inode(old_dentry); u64 index; int err; int drop_inode = 0; @@ -8213,7 +8212,7 @@ free_ordered: bio_endio(dio_bio, ret); } -static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb, +static ssize_t check_direct_IO(struct btrfs_root *root, struct kiocb *iocb, const struct iov_iter *iter, loff_t offset) { int seg; @@ -8228,7 +8227,7 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io goto out; /* If this is a write we don't need to check anymore */ - if (rw & WRITE) + if (iov_iter_rw(iter) == WRITE) return 0; /* * Check to make sure we don't have duplicate iov_base's in this @@ -8246,8 +8245,8 @@ out: return retval; } -static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, - struct iov_iter *iter, loff_t offset) +static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, + loff_t offset) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; @@ -8258,10 +8257,10 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, bool relock = false; ssize_t ret; - if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iter, offset)) + if (check_direct_IO(BTRFS_I(inode)->root, iocb, iter, offset)) return 0; - atomic_inc(&inode->i_dio_count); + inode_dio_begin(inode); smp_mb__after_atomic(); /* @@ -8276,7 +8275,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, filemap_fdatawrite_range(inode->i_mapping, offset, offset + count - 1); - if (rw & WRITE) { + if (iov_iter_rw(iter) == WRITE) { /* * If the write DIO is beyond the EOF, we need update * the isize, but it is protected by i_mutex. So we can @@ -8301,16 +8300,16 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, current->journal_info = &outstanding_extents; } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK, &BTRFS_I(inode)->runtime_flags)) { - inode_dio_done(inode); + inode_dio_end(inode); flags = DIO_LOCKING | DIO_SKIP_HOLES; wakeup = false; } - ret = __blockdev_direct_IO(rw, iocb, inode, - BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev, - iter, offset, btrfs_get_blocks_direct, NULL, - btrfs_submit_direct, flags); - if (rw & WRITE) { + ret = __blockdev_direct_IO(iocb, inode, + BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev, + iter, offset, btrfs_get_blocks_direct, NULL, + btrfs_submit_direct, flags); + if (iov_iter_rw(iter) == WRITE) { current->journal_info = NULL; if (ret < 0 && ret != -EIOCBQUEUED) btrfs_delalloc_release_space(inode, count); @@ -8320,7 +8319,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, } out: if (wakeup) - inode_dio_done(inode); + inode_dio_end(inode); if (relock) mutex_lock(&inode->i_mutex); @@ -9007,7 +9006,7 @@ static int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { u64 delalloc_bytes; - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); u32 blocksize = inode->i_sb->s_blocksize; generic_fillattr(inode, stat); @@ -9028,8 +9027,8 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct btrfs_trans_handle *trans; struct btrfs_root *root = BTRFS_I(old_dir)->root; struct btrfs_root *dest = BTRFS_I(new_dir)->root; - struct inode *new_inode = new_dentry->d_inode; - struct inode *old_inode = old_dentry->d_inode; + struct inode *new_inode = d_inode(new_dentry); + struct inode *old_inode = d_inode(old_dentry); struct timespec ctime = CURRENT_TIME; u64 index = 0; u64 root_objectid; @@ -9141,7 +9140,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, old_dentry->d_name.len); } else { ret = __btrfs_unlink_inode(trans, root, old_dir, - old_dentry->d_inode, + d_inode(old_dentry), old_dentry->d_name.name, old_dentry->d_name.len); if (!ret) @@ -9165,12 +9164,12 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, BUG_ON(new_inode->i_nlink == 0); } else { ret = btrfs_unlink_inode(trans, dest, new_dir, - new_dentry->d_inode, + d_inode(new_dentry), new_dentry->d_name.name, new_dentry->d_name.len); } if (!ret && new_inode->i_nlink == 0) - ret = btrfs_orphan_add(trans, new_dentry->d_inode); + ret = btrfs_orphan_add(trans, d_inode(new_dentry)); if (ret) { btrfs_abort_transaction(trans, root, ret); goto out_fail; diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index ca5d968f..b05653f 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -724,7 +724,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, if (ret) goto fail; - inode = btrfs_lookup_dentry(dentry->d_parent->d_inode, dentry); + inode = btrfs_lookup_dentry(d_inode(dentry->d_parent), dentry); if (IS_ERR(inode)) { ret = PTR_ERR(inode); goto fail; @@ -768,10 +768,10 @@ static int btrfs_may_delete(struct inode *dir, struct dentry *victim, int isdir) { int error; - if (!victim->d_inode) + if (d_really_is_negative(victim)) return -ENOENT; - BUG_ON(victim->d_parent->d_inode != dir); + BUG_ON(d_inode(victim->d_parent) != dir); audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE); error = inode_permission(dir, MAY_WRITE | MAY_EXEC); @@ -779,8 +779,8 @@ static int btrfs_may_delete(struct inode *dir, struct dentry *victim, int isdir) return error; if (IS_APPEND(dir)) return -EPERM; - if (check_sticky(dir, victim->d_inode) || IS_APPEND(victim->d_inode) || - IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode)) + if (check_sticky(dir, d_inode(victim)) || IS_APPEND(d_inode(victim)) || + IS_IMMUTABLE(d_inode(victim)) || IS_SWAPFILE(d_inode(victim))) return -EPERM; if (isdir) { if (!d_is_dir(victim)) @@ -799,7 +799,7 @@ static int btrfs_may_delete(struct inode *dir, struct dentry *victim, int isdir) /* copy of may_create in fs/namei.c() */ static inline int btrfs_may_create(struct inode *dir, struct dentry *child) { - if (child->d_inode) + if (d_really_is_positive(child)) return -EEXIST; if (IS_DEADDIR(dir)) return -ENOENT; @@ -817,7 +817,7 @@ static noinline int btrfs_mksubvol(struct path *parent, u64 *async_transid, bool readonly, struct btrfs_qgroup_inherit *inherit) { - struct inode *dir = parent->dentry->d_inode; + struct inode *dir = d_inode(parent->dentry); struct dentry *dentry; int error; @@ -831,7 +831,7 @@ static noinline int btrfs_mksubvol(struct path *parent, goto out_unlock; error = -EEXIST; - if (dentry->d_inode) + if (d_really_is_positive(dentry)) goto out_dput; error = btrfs_may_create(dir, dentry); @@ -2301,7 +2301,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file, { struct dentry *parent = file->f_path.dentry; struct dentry *dentry; - struct inode *dir = parent->d_inode; + struct inode *dir = d_inode(parent); struct inode *inode; struct btrfs_root *root = BTRFS_I(dir)->root; struct btrfs_root *dest = NULL; @@ -2340,12 +2340,12 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file, goto out_unlock_dir; } - if (!dentry->d_inode) { + if (d_really_is_negative(dentry)) { err = -ENOENT; goto out_dput; } - inode = dentry->d_inode; + inode = d_inode(dentry); dest = BTRFS_I(inode)->root; if (!capable(CAP_SYS_ADMIN)) { /* diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index f2c9f9d..9e66f5e 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -925,7 +925,7 @@ setup_root: * a reference to the dentry. We will have already gotten a reference * to the inode in btrfs_fill_super so we're good to go. */ - if (!new && sb->s_root->d_inode == inode) { + if (!new && d_inode(sb->s_root) == inode) { iput(inode); return dget(sb->s_root); } @@ -1230,7 +1230,7 @@ static struct dentry *mount_subvol(const char *subvol_name, int flags, root = mount_subtree(mnt, subvol_name); - if (!IS_ERR(root) && !is_subvolume_inode(root->d_inode)) { + if (!IS_ERR(root) && !is_subvolume_inode(d_inode(root))) { struct super_block *s = root->d_sb; dput(root); root = ERR_PTR(-EINVAL); @@ -1895,8 +1895,8 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf) buf->f_fsid.val[0] = be32_to_cpu(fsid[0]) ^ be32_to_cpu(fsid[2]); buf->f_fsid.val[1] = be32_to_cpu(fsid[1]) ^ be32_to_cpu(fsid[3]); /* Mask in the root object ID too, to disambiguate subvols */ - buf->f_fsid.val[0] ^= BTRFS_I(dentry->d_inode)->root->objectid >> 32; - buf->f_fsid.val[1] ^= BTRFS_I(dentry->d_inode)->root->objectid; + buf->f_fsid.val[0] ^= BTRFS_I(d_inode(dentry))->root->objectid >> 32; + buf->f_fsid.val[1] ^= BTRFS_I(d_inode(dentry))->root->objectid; return 0; } diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index a089b59..d049683 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -4474,9 +4474,9 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans, goto out; if (!S_ISDIR(inode->i_mode)) { - if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb) + if (!parent || d_really_is_negative(parent) || sb != d_inode(parent)->i_sb) goto out; - inode = parent->d_inode; + inode = d_inode(parent); } while (1) { @@ -4502,7 +4502,7 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans, break; } - if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb) + if (!parent || d_really_is_negative(parent) || sb != d_inode(parent)->i_sb) break; if (IS_ROOT(parent)) @@ -4511,7 +4511,7 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans, parent = dget_parent(parent); dput(old_parent); old_parent = parent; - inode = parent->d_inode; + inode = d_inode(parent); } dput(old_parent); @@ -4777,10 +4777,10 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, log_dentries = true; while (1) { - if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb) + if (!parent || d_really_is_negative(parent) || sb != d_inode(parent)->i_sb) break; - inode = parent->d_inode; + inode = d_inode(parent); if (root != BTRFS_I(inode)->root) break; @@ -4845,7 +4845,7 @@ int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans, struct dentry *parent = dget_parent(dentry); int ret; - ret = btrfs_log_inode_parent(trans, root, dentry->d_inode, parent, + ret = btrfs_log_inode_parent(trans, root, d_inode(dentry), parent, start, end, 0, ctx); dput(parent); diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c index 45ea704..6f518c9 100644 --- a/fs/btrfs/xattr.c +++ b/fs/btrfs/xattr.c @@ -261,7 +261,7 @@ out: ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size) { struct btrfs_key key, found_key; - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_path *path; struct extent_buffer *leaf; @@ -411,13 +411,13 @@ ssize_t btrfs_getxattr(struct dentry *dentry, const char *name, ret = btrfs_is_valid_xattr(name); if (ret) return ret; - return __btrfs_getxattr(dentry->d_inode, name, buffer, size); + return __btrfs_getxattr(d_inode(dentry), name, buffer, size); } int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { - struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root; + struct btrfs_root *root = BTRFS_I(d_inode(dentry))->root; int ret; /* @@ -440,19 +440,19 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value, return ret; if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN)) - return btrfs_set_prop(dentry->d_inode, name, + return btrfs_set_prop(d_inode(dentry), name, value, size, flags); if (size == 0) value = ""; /* empty EA, do not remove */ - return __btrfs_setxattr(NULL, dentry->d_inode, name, value, size, + return __btrfs_setxattr(NULL, d_inode(dentry), name, value, size, flags); } int btrfs_removexattr(struct dentry *dentry, const char *name) { - struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root; + struct btrfs_root *root = BTRFS_I(d_inode(dentry))->root; int ret; /* @@ -475,10 +475,10 @@ int btrfs_removexattr(struct dentry *dentry, const char *name) return ret; if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN)) - return btrfs_set_prop(dentry->d_inode, name, + return btrfs_set_prop(d_inode(dentry), name, NULL, 0, XATTR_REPLACE); - return __btrfs_setxattr(NULL, dentry->d_inode, name, NULL, 0, + return __btrfs_setxattr(NULL, d_inode(dentry), name, NULL, 0, XATTR_REPLACE); } diff --git a/fs/buffer.c b/fs/buffer.c index 20805db..c7a5602 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -3243,8 +3243,8 @@ int try_to_free_buffers(struct page *page) * to synchronise against __set_page_dirty_buffers and prevent the * dirty bit from being lost. */ - if (ret) - cancel_dirty_page(page, PAGE_CACHE_SIZE); + if (ret && TestClearPageDirty(page)) + account_page_cleaned(page, mapping); spin_unlock(&mapping->private_lock); out: if (buffers_to_free) { diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c index fbb08e9..6af790f 100644 --- a/fs/cachefiles/bind.c +++ b/fs/cachefiles/bind.c @@ -123,11 +123,11 @@ static int cachefiles_daemon_add_cache(struct cachefiles_cache *cache) /* check parameters */ ret = -EOPNOTSUPP; - if (!root->d_inode || - !root->d_inode->i_op->lookup || - !root->d_inode->i_op->mkdir || - !root->d_inode->i_op->setxattr || - !root->d_inode->i_op->getxattr || + if (d_is_negative(root) || + !d_backing_inode(root)->i_op->lookup || + !d_backing_inode(root)->i_op->mkdir || + !d_backing_inode(root)->i_op->setxattr || + !d_backing_inode(root)->i_op->getxattr || !root->d_sb->s_op->statfs || !root->d_sb->s_op->sync_fs) goto error_unsupported; diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c index 2324262..afa023d 100644 --- a/fs/cachefiles/interface.c +++ b/fs/cachefiles/interface.c @@ -441,12 +441,12 @@ static int cachefiles_attr_changed(struct fscache_object *_object) fscache_set_store_limit(&object->fscache, ni_size); - oi_size = i_size_read(object->backer->d_inode); + oi_size = i_size_read(d_backing_inode(object->backer)); if (oi_size == ni_size) return 0; cachefiles_begin_secure(cache, &saved_cred); - mutex_lock(&object->backer->d_inode->i_mutex); + mutex_lock(&d_inode(object->backer)->i_mutex); /* if there's an extension to a partial page at the end of the backing * file, we need to discard the partial page so that we pick up new @@ -465,7 +465,7 @@ static int cachefiles_attr_changed(struct fscache_object *_object) ret = notify_change(object->backer, &newattrs, NULL); truncate_failed: - mutex_unlock(&object->backer->d_inode->i_mutex); + mutex_unlock(&d_inode(object->backer)->i_mutex); cachefiles_end_secure(cache, saved_cred); if (ret == -EIO) { diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c index 1e51714e..ab857ab 100644 --- a/fs/cachefiles/namei.c +++ b/fs/cachefiles/namei.c @@ -286,13 +286,13 @@ static int cachefiles_bury_object(struct cachefiles_cache *cache, if (ret < 0) { cachefiles_io_error(cache, "Unlink security error"); } else { - ret = vfs_unlink(dir->d_inode, rep, NULL); + ret = vfs_unlink(d_inode(dir), rep, NULL); if (preemptive) cachefiles_mark_object_buried(cache, rep); } - mutex_unlock(&dir->d_inode->i_mutex); + mutex_unlock(&d_inode(dir)->i_mutex); if (ret == -EIO) cachefiles_io_error(cache, "Unlink failed"); @@ -303,7 +303,7 @@ static int cachefiles_bury_object(struct cachefiles_cache *cache, /* directories have to be moved to the graveyard */ _debug("move stale object to graveyard"); - mutex_unlock(&dir->d_inode->i_mutex); + mutex_unlock(&d_inode(dir)->i_mutex); try_again: /* first step is to make up a grave dentry in the graveyard */ @@ -355,7 +355,7 @@ try_again: return -EIO; } - if (grave->d_inode) { + if (d_is_positive(grave)) { unlock_rename(cache->graveyard, dir); dput(grave); grave = NULL; @@ -387,8 +387,8 @@ try_again: if (ret < 0) { cachefiles_io_error(cache, "Rename security error %d", ret); } else { - ret = vfs_rename(dir->d_inode, rep, - cache->graveyard->d_inode, grave, NULL, 0); + ret = vfs_rename(d_inode(dir), rep, + d_inode(cache->graveyard), grave, NULL, 0); if (ret != 0 && ret != -ENOMEM) cachefiles_io_error(cache, "Rename failed with error %d", ret); @@ -415,18 +415,18 @@ int cachefiles_delete_object(struct cachefiles_cache *cache, _enter(",OBJ%x{%p}", object->fscache.debug_id, object->dentry); ASSERT(object->dentry); - ASSERT(object->dentry->d_inode); + ASSERT(d_backing_inode(object->dentry)); ASSERT(object->dentry->d_parent); dir = dget_parent(object->dentry); - mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT); + mutex_lock_nested(&d_inode(dir)->i_mutex, I_MUTEX_PARENT); if (test_bit(CACHEFILES_OBJECT_BURIED, &object->flags)) { /* object allocation for the same key preemptively deleted this * object's file so that it could create its own file */ _debug("object preemptively buried"); - mutex_unlock(&dir->d_inode->i_mutex); + mutex_unlock(&d_inode(dir)->i_mutex); ret = 0; } else { /* we need to check that our parent is _still_ our parent - it @@ -438,7 +438,7 @@ int cachefiles_delete_object(struct cachefiles_cache *cache, /* it got moved, presumably by cachefilesd culling it, * so it's no longer in the key path and we can ignore * it */ - mutex_unlock(&dir->d_inode->i_mutex); + mutex_unlock(&d_inode(dir)->i_mutex); ret = 0; } } @@ -473,7 +473,7 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent, path.mnt = cache->mnt; ASSERT(parent->dentry); - ASSERT(parent->dentry->d_inode); + ASSERT(d_backing_inode(parent->dentry)); if (!(d_is_dir(parent->dentry))) { // TODO: convert file to dir @@ -497,7 +497,7 @@ lookup_again: /* search the current directory for the element name */ _debug("lookup '%s'", name); - mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT); + mutex_lock_nested(&d_inode(dir)->i_mutex, I_MUTEX_PARENT); start = jiffies; next = lookup_one_len(name, dir, nlen); @@ -505,21 +505,21 @@ lookup_again: if (IS_ERR(next)) goto lookup_error; - _debug("next -> %p %s", next, next->d_inode ? "positive" : "negative"); + _debug("next -> %p %s", next, d_backing_inode(next) ? "positive" : "negative"); if (!key) - object->new = !next->d_inode; + object->new = !d_backing_inode(next); /* if this element of the path doesn't exist, then the lookup phase * failed, and we can release any readers in the certain knowledge that * there's nothing for them to actually read */ - if (!next->d_inode) + if (d_is_negative(next)) fscache_object_lookup_negative(&object->fscache); /* we need to create the object if it's negative */ if (key || object->type == FSCACHE_COOKIE_TYPE_INDEX) { /* index objects and intervening tree levels must be subdirs */ - if (!next->d_inode) { + if (d_is_negative(next)) { ret = cachefiles_has_space(cache, 1, 0); if (ret < 0) goto create_error; @@ -529,26 +529,26 @@ lookup_again: if (ret < 0) goto create_error; start = jiffies; - ret = vfs_mkdir(dir->d_inode, next, 0); + ret = vfs_mkdir(d_inode(dir), next, 0); cachefiles_hist(cachefiles_mkdir_histogram, start); if (ret < 0) goto create_error; - ASSERT(next->d_inode); + ASSERT(d_backing_inode(next)); _debug("mkdir -> %p{%p{ino=%lu}}", - next, next->d_inode, next->d_inode->i_ino); + next, d_backing_inode(next), d_backing_inode(next)->i_ino); } else if (!d_can_lookup(next)) { pr_err("inode %lu is not a directory\n", - next->d_inode->i_ino); + d_backing_inode(next)->i_ino); ret = -ENOBUFS; goto error; } } else { /* non-index objects start out life as files */ - if (!next->d_inode) { + if (d_is_negative(next)) { ret = cachefiles_has_space(cache, 1, 0); if (ret < 0) goto create_error; @@ -558,21 +558,21 @@ lookup_again: if (ret < 0) goto create_error; start = jiffies; - ret = vfs_create(dir->d_inode, next, S_IFREG, true); + ret = vfs_create(d_inode(dir), next, S_IFREG, true); cachefiles_hist(cachefiles_create_histogram, start); if (ret < 0) goto create_error; - ASSERT(next->d_inode); + ASSERT(d_backing_inode(next)); _debug("create -> %p{%p{ino=%lu}}", - next, next->d_inode, next->d_inode->i_ino); + next, d_backing_inode(next), d_backing_inode(next)->i_ino); } else if (!d_can_lookup(next) && !d_is_reg(next) ) { pr_err("inode %lu is not a file or directory\n", - next->d_inode->i_ino); + d_backing_inode(next)->i_ino); ret = -ENOBUFS; goto error; } @@ -581,7 +581,7 @@ lookup_again: /* process the next component */ if (key) { _debug("advance"); - mutex_unlock(&dir->d_inode->i_mutex); + mutex_unlock(&d_inode(dir)->i_mutex); dput(dir); dir = next; next = NULL; @@ -617,7 +617,7 @@ lookup_again: /* note that we're now using this object */ ret = cachefiles_mark_object_active(cache, object); - mutex_unlock(&dir->d_inode->i_mutex); + mutex_unlock(&d_inode(dir)->i_mutex); dput(dir); dir = NULL; @@ -646,7 +646,7 @@ lookup_again: const struct address_space_operations *aops; ret = -EPERM; - aops = object->dentry->d_inode->i_mapping->a_ops; + aops = d_backing_inode(object->dentry)->i_mapping->a_ops; if (!aops->bmap) goto check_error; @@ -659,7 +659,7 @@ lookup_again: object->new = 0; fscache_obtained_object(&object->fscache); - _leave(" = 0 [%lu]", object->dentry->d_inode->i_ino); + _leave(" = 0 [%lu]", d_backing_inode(object->dentry)->i_ino); return 0; create_error: @@ -695,7 +695,7 @@ lookup_error: cachefiles_io_error(cache, "Lookup failed"); next = NULL; error: - mutex_unlock(&dir->d_inode->i_mutex); + mutex_unlock(&d_inode(dir)->i_mutex); dput(next); error_out2: dput(dir); @@ -719,7 +719,7 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache, _enter(",,%s", dirname); /* search the current directory for the element name */ - mutex_lock(&dir->d_inode->i_mutex); + mutex_lock(&d_inode(dir)->i_mutex); start = jiffies; subdir = lookup_one_len(dirname, dir, strlen(dirname)); @@ -731,10 +731,10 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache, } _debug("subdir -> %p %s", - subdir, subdir->d_inode ? "positive" : "negative"); + subdir, d_backing_inode(subdir) ? "positive" : "negative"); /* we need to create the subdir if it doesn't exist yet */ - if (!subdir->d_inode) { + if (d_is_negative(subdir)) { ret = cachefiles_has_space(cache, 1, 0); if (ret < 0) goto mkdir_error; @@ -746,22 +746,22 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache, ret = security_path_mkdir(&path, subdir, 0700); if (ret < 0) goto mkdir_error; - ret = vfs_mkdir(dir->d_inode, subdir, 0700); + ret = vfs_mkdir(d_inode(dir), subdir, 0700); if (ret < 0) goto mkdir_error; - ASSERT(subdir->d_inode); + ASSERT(d_backing_inode(subdir)); _debug("mkdir -> %p{%p{ino=%lu}}", subdir, - subdir->d_inode, - subdir->d_inode->i_ino); + d_backing_inode(subdir), + d_backing_inode(subdir)->i_ino); } - mutex_unlock(&dir->d_inode->i_mutex); + mutex_unlock(&d_inode(dir)->i_mutex); /* we need to make sure the subdir is a directory */ - ASSERT(subdir->d_inode); + ASSERT(d_backing_inode(subdir)); if (!d_can_lookup(subdir)) { pr_err("%s is not a directory\n", dirname); @@ -770,18 +770,18 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache, } ret = -EPERM; - if (!subdir->d_inode->i_op->setxattr || - !subdir->d_inode->i_op->getxattr || - !subdir->d_inode->i_op->lookup || - !subdir->d_inode->i_op->mkdir || - !subdir->d_inode->i_op->create || - (!subdir->d_inode->i_op->rename && - !subdir->d_inode->i_op->rename2) || - !subdir->d_inode->i_op->rmdir || - !subdir->d_inode->i_op->unlink) + if (!d_backing_inode(subdir)->i_op->setxattr || + !d_backing_inode(subdir)->i_op->getxattr || + !d_backing_inode(subdir)->i_op->lookup || + !d_backing_inode(subdir)->i_op->mkdir || + !d_backing_inode(subdir)->i_op->create || + (!d_backing_inode(subdir)->i_op->rename && + !d_backing_inode(subdir)->i_op->rename2) || + !d_backing_inode(subdir)->i_op->rmdir || + !d_backing_inode(subdir)->i_op->unlink) goto check_error; - _leave(" = [%lu]", subdir->d_inode->i_ino); + _leave(" = [%lu]", d_backing_inode(subdir)->i_ino); return subdir; check_error: @@ -790,19 +790,19 @@ check_error: return ERR_PTR(ret); mkdir_error: - mutex_unlock(&dir->d_inode->i_mutex); + mutex_unlock(&d_inode(dir)->i_mutex); dput(subdir); pr_err("mkdir %s failed with error %d\n", dirname, ret); return ERR_PTR(ret); lookup_error: - mutex_unlock(&dir->d_inode->i_mutex); + mutex_unlock(&d_inode(dir)->i_mutex); ret = PTR_ERR(subdir); pr_err("Lookup %s failed with error %d\n", dirname, ret); return ERR_PTR(ret); nomem_d_alloc: - mutex_unlock(&dir->d_inode->i_mutex); + mutex_unlock(&d_inode(dir)->i_mutex); _leave(" = -ENOMEM"); return ERR_PTR(-ENOMEM); } @@ -827,7 +827,7 @@ static struct dentry *cachefiles_check_active(struct cachefiles_cache *cache, // dir, filename); /* look up the victim */ - mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT); + mutex_lock_nested(&d_inode(dir)->i_mutex, I_MUTEX_PARENT); start = jiffies; victim = lookup_one_len(filename, dir, strlen(filename)); @@ -836,13 +836,13 @@ static struct dentry *cachefiles_check_active(struct cachefiles_cache *cache, goto lookup_error; //_debug("victim -> %p %s", - // victim, victim->d_inode ? "positive" : "negative"); + // victim, d_backing_inode(victim) ? "positive" : "negative"); /* if the object is no longer there then we probably retired the object * at the netfs's request whilst the cull was in progress */ - if (!victim->d_inode) { - mutex_unlock(&dir->d_inode->i_mutex); + if (d_is_negative(victim)) { + mutex_unlock(&d_inode(dir)->i_mutex); dput(victim); _leave(" = -ENOENT [absent]"); return ERR_PTR(-ENOENT); @@ -871,13 +871,13 @@ static struct dentry *cachefiles_check_active(struct cachefiles_cache *cache, object_in_use: read_unlock(&cache->active_lock); - mutex_unlock(&dir->d_inode->i_mutex); + mutex_unlock(&d_inode(dir)->i_mutex); dput(victim); //_leave(" = -EBUSY [in use]"); return ERR_PTR(-EBUSY); lookup_error: - mutex_unlock(&dir->d_inode->i_mutex); + mutex_unlock(&d_inode(dir)->i_mutex); ret = PTR_ERR(victim); if (ret == -ENOENT) { /* file or dir now absent - probably retired by netfs */ @@ -913,7 +913,7 @@ int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir, return PTR_ERR(victim); _debug("victim -> %p %s", - victim, victim->d_inode ? "positive" : "negative"); + victim, d_backing_inode(victim) ? "positive" : "negative"); /* okay... the victim is not being used so we can cull it * - start by marking it as stale @@ -936,7 +936,7 @@ int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir, return 0; error_unlock: - mutex_unlock(&dir->d_inode->i_mutex); + mutex_unlock(&d_inode(dir)->i_mutex); error: dput(victim); if (ret == -ENOENT) { @@ -971,7 +971,7 @@ int cachefiles_check_in_use(struct cachefiles_cache *cache, struct dentry *dir, if (IS_ERR(victim)) return PTR_ERR(victim); - mutex_unlock(&dir->d_inode->i_mutex); + mutex_unlock(&d_inode(dir)->i_mutex); dput(victim); //_leave(" = 0"); return 0; diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c index c6cd8d7..3cbb0e8 100644 --- a/fs/cachefiles/rdwr.c +++ b/fs/cachefiles/rdwr.c @@ -74,12 +74,12 @@ static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode, static int cachefiles_read_reissue(struct cachefiles_object *object, struct cachefiles_one_read *monitor) { - struct address_space *bmapping = object->backer->d_inode->i_mapping; + struct address_space *bmapping = d_backing_inode(object->backer)->i_mapping; struct page *backpage = monitor->back_page, *backpage2; int ret; _enter("{ino=%lx},{%lx,%lx}", - object->backer->d_inode->i_ino, + d_backing_inode(object->backer)->i_ino, backpage->index, backpage->flags); /* skip if the page was truncated away completely */ @@ -157,7 +157,7 @@ static void cachefiles_read_copier(struct fscache_operation *_op) object = container_of(op->op.object, struct cachefiles_object, fscache); - _enter("{ino=%lu}", object->backer->d_inode->i_ino); + _enter("{ino=%lu}", d_backing_inode(object->backer)->i_ino); max = 8; spin_lock_irq(&object->work_lock); @@ -247,7 +247,7 @@ static int cachefiles_read_backing_file_one(struct cachefiles_object *object, init_waitqueue_func_entry(&monitor->monitor, cachefiles_read_waiter); /* attempt to get hold of the backing page */ - bmapping = object->backer->d_inode->i_mapping; + bmapping = d_backing_inode(object->backer)->i_mapping; newpage = NULL; for (;;) { @@ -408,7 +408,7 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op, if (!object->backer) goto enobufs; - inode = object->backer->d_inode; + inode = d_backing_inode(object->backer); ASSERT(S_ISREG(inode->i_mode)); ASSERT(inode->i_mapping->a_ops->bmap); ASSERT(inode->i_mapping->a_ops->readpages); @@ -468,7 +468,7 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object, struct list_head *list) { struct cachefiles_one_read *monitor = NULL; - struct address_space *bmapping = object->backer->d_inode->i_mapping; + struct address_space *bmapping = d_backing_inode(object->backer)->i_mapping; struct page *newpage = NULL, *netpage, *_n, *backpage = NULL; int ret = 0; @@ -705,7 +705,7 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op, if (cachefiles_has_space(cache, 0, *nr_pages) < 0) space = 0; - inode = object->backer->d_inode; + inode = d_backing_inode(object->backer); ASSERT(S_ISREG(inode->i_mode)); ASSERT(inode->i_mapping->a_ops->bmap); ASSERT(inode->i_mapping->a_ops->readpages); diff --git a/fs/cachefiles/security.c b/fs/cachefiles/security.c index 396c18e..31bbc05 100644 --- a/fs/cachefiles/security.c +++ b/fs/cachefiles/security.c @@ -55,14 +55,14 @@ static int cachefiles_check_cache_dir(struct cachefiles_cache *cache, { int ret; - ret = security_inode_mkdir(root->d_inode, root, 0); + ret = security_inode_mkdir(d_backing_inode(root), root, 0); if (ret < 0) { pr_err("Security denies permission to make dirs: error %d", ret); return ret; } - ret = security_inode_create(root->d_inode, root, 0); + ret = security_inode_create(d_backing_inode(root), root, 0); if (ret < 0) pr_err("Security denies permission to create files: error %d", ret); @@ -95,7 +95,7 @@ int cachefiles_determine_cache_security(struct cachefiles_cache *cache, /* use the cache root dir's security context as the basis with * which create files */ - ret = set_create_files_as(new, root->d_inode); + ret = set_create_files_as(new, d_backing_inode(root)); if (ret < 0) { abort_creds(new); cachefiles_begin_secure(cache, _saved_cred); diff --git a/fs/cachefiles/xattr.c b/fs/cachefiles/xattr.c index a8a6874..d31c1a72 100644 --- a/fs/cachefiles/xattr.c +++ b/fs/cachefiles/xattr.c @@ -33,7 +33,7 @@ int cachefiles_check_object_type(struct cachefiles_object *object) int ret; ASSERT(dentry); - ASSERT(dentry->d_inode); + ASSERT(d_backing_inode(dentry)); if (!object->fscache.cookie) strcpy(type, "C3"); @@ -52,7 +52,7 @@ int cachefiles_check_object_type(struct cachefiles_object *object) if (ret != -EEXIST) { pr_err("Can't set xattr on %pd [%lu] (err %d)\n", - dentry, dentry->d_inode->i_ino, + dentry, d_backing_inode(dentry)->i_ino, -ret); goto error; } @@ -64,7 +64,7 @@ int cachefiles_check_object_type(struct cachefiles_object *object) goto bad_type_length; pr_err("Can't read xattr on %pd [%lu] (err %d)\n", - dentry, dentry->d_inode->i_ino, + dentry, d_backing_inode(dentry)->i_ino, -ret); goto error; } @@ -84,14 +84,14 @@ error: bad_type_length: pr_err("Cache object %lu type xattr length incorrect\n", - dentry->d_inode->i_ino); + d_backing_inode(dentry)->i_ino); ret = -EIO; goto error; bad_type: xtype[2] = 0; pr_err("Cache object %pd [%lu] type %s not %s\n", - dentry, dentry->d_inode->i_ino, + dentry, d_backing_inode(dentry)->i_ino, xtype, type); ret = -EIO; goto error; @@ -165,7 +165,7 @@ int cachefiles_check_auxdata(struct cachefiles_object *object) int ret; ASSERT(dentry); - ASSERT(dentry->d_inode); + ASSERT(d_backing_inode(dentry)); ASSERT(object->fscache.cookie->def->check_aux); auxbuf = kmalloc(sizeof(struct cachefiles_xattr) + 512, GFP_KERNEL); @@ -204,7 +204,7 @@ int cachefiles_check_object_xattr(struct cachefiles_object *object, _enter("%p,#%d", object, auxdata->len); ASSERT(dentry); - ASSERT(dentry->d_inode); + ASSERT(d_backing_inode(dentry)); auxbuf = kmalloc(sizeof(struct cachefiles_xattr) + 512, cachefiles_gfp); if (!auxbuf) { @@ -225,7 +225,7 @@ int cachefiles_check_object_xattr(struct cachefiles_object *object, cachefiles_io_error_obj(object, "Can't read xattr on %lu (err %d)", - dentry->d_inode->i_ino, -ret); + d_backing_inode(dentry)->i_ino, -ret); goto error; } @@ -276,7 +276,7 @@ int cachefiles_check_object_xattr(struct cachefiles_object *object, cachefiles_io_error_obj(object, "Can't update xattr on %lu" " (error %d)", - dentry->d_inode->i_ino, -ret); + d_backing_inode(dentry)->i_ino, -ret); goto error; } } @@ -291,7 +291,7 @@ error: bad_type_length: pr_err("Cache object %lu xattr length incorrect\n", - dentry->d_inode->i_ino); + d_backing_inode(dentry)->i_ino); ret = -EIO; goto error; @@ -316,7 +316,7 @@ int cachefiles_remove_object_xattr(struct cachefiles_cache *cache, cachefiles_io_error(cache, "Can't remove xattr from %lu" " (error %d)", - dentry->d_inode->i_ino, -ret); + d_backing_inode(dentry)->i_ino, -ret); } _leave(" = %d", ret); diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index fd5599d..e162bcd 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -1146,6 +1146,10 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping, inode, page, (int)pos, (int)len); r = ceph_update_writeable_page(file, pos, len, page); + if (r < 0) + page_cache_release(page); + else + *pagep = page; } while (r == -EAGAIN); return r; @@ -1198,8 +1202,7 @@ static int ceph_write_end(struct file *file, struct address_space *mapping, * intercept O_DIRECT reads and writes early, this function should * never get called. */ -static ssize_t ceph_direct_io(int rw, struct kiocb *iocb, - struct iov_iter *iter, +static ssize_t ceph_direct_io(struct kiocb *iocb, struct iov_iter *iter, loff_t pos) { WARN_ON(1); @@ -1535,19 +1538,27 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page) osd_req_op_extent_osd_data_pages(req, 1, &page, len, 0, false, false); - err = osd_req_op_xattr_init(req, 0, CEPH_OSD_OP_CMPXATTR, - "inline_version", &inline_version, - sizeof(inline_version), - CEPH_OSD_CMPXATTR_OP_GT, - CEPH_OSD_CMPXATTR_MODE_U64); - if (err) - goto out_put; - - err = osd_req_op_xattr_init(req, 2, CEPH_OSD_OP_SETXATTR, - "inline_version", &inline_version, - sizeof(inline_version), 0, 0); - if (err) - goto out_put; + { + __le64 xattr_buf = cpu_to_le64(inline_version); + err = osd_req_op_xattr_init(req, 0, CEPH_OSD_OP_CMPXATTR, + "inline_version", &xattr_buf, + sizeof(xattr_buf), + CEPH_OSD_CMPXATTR_OP_GT, + CEPH_OSD_CMPXATTR_MODE_U64); + if (err) + goto out_put; + } + + { + char xattr_buf[32]; + int xattr_len = snprintf(xattr_buf, sizeof(xattr_buf), + "%llu", inline_version); + err = osd_req_op_xattr_init(req, 2, CEPH_OSD_OP_SETXATTR, + "inline_version", + xattr_buf, xattr_len, 0, 0); + if (err) + goto out_put; + } ceph_osdc_build_request(req, 0, NULL, CEPH_NOSNAP, &inode->i_mtime); err = ceph_osdc_start_request(&fsc->client->osdc, req, false); diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 8172775..be5ea6a 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -896,6 +896,18 @@ int ceph_is_any_caps(struct inode *inode) return ret; } +static void drop_inode_snap_realm(struct ceph_inode_info *ci) +{ + struct ceph_snap_realm *realm = ci->i_snap_realm; + spin_lock(&realm->inodes_with_caps_lock); + list_del_init(&ci->i_snap_realm_item); + ci->i_snap_realm_counter++; + ci->i_snap_realm = NULL; + spin_unlock(&realm->inodes_with_caps_lock); + ceph_put_snap_realm(ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc, + realm); +} + /* * Remove a cap. Take steps to deal with a racing iterate_session_caps. * @@ -946,15 +958,13 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release) if (removed) ceph_put_cap(mdsc, cap); - if (!__ceph_is_any_caps(ci) && ci->i_snap_realm) { - struct ceph_snap_realm *realm = ci->i_snap_realm; - spin_lock(&realm->inodes_with_caps_lock); - list_del_init(&ci->i_snap_realm_item); - ci->i_snap_realm_counter++; - ci->i_snap_realm = NULL; - spin_unlock(&realm->inodes_with_caps_lock); - ceph_put_snap_realm(mdsc, realm); - } + /* when reconnect denied, we remove session caps forcibly, + * i_wr_ref can be non-zero. If there are ongoing write, + * keep i_snap_realm. + */ + if (!__ceph_is_any_caps(ci) && ci->i_wr_ref == 0 && ci->i_snap_realm) + drop_inode_snap_realm(ci); + if (!__ceph_is_any_real_caps(ci)) __cap_delay_cancel(mdsc, ci); } @@ -1394,6 +1404,13 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask) int was = ci->i_dirty_caps; int dirty = 0; + if (!ci->i_auth_cap) { + pr_warn("__mark_dirty_caps %p %llx mask %s, " + "but no auth cap (session was closed?)\n", + inode, ceph_ino(inode), ceph_cap_string(mask)); + return 0; + } + dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->vfs_inode, ceph_cap_string(mask), ceph_cap_string(was), ceph_cap_string(was | mask)); @@ -1404,7 +1421,6 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask) ci->i_snap_realm->cached_context); dout(" inode %p now dirty snapc %p auth cap %p\n", &ci->vfs_inode, ci->i_head_snapc, ci->i_auth_cap); - WARN_ON(!ci->i_auth_cap); BUG_ON(!list_empty(&ci->i_dirty_item)); spin_lock(&mdsc->cap_dirty_lock); list_add(&ci->i_dirty_item, &mdsc->cap_dirty); @@ -1545,7 +1561,19 @@ retry_locked: if (!mdsc->stopping && inode->i_nlink > 0) { if (want) { retain |= CEPH_CAP_ANY; /* be greedy */ + } else if (S_ISDIR(inode->i_mode) && + (issued & CEPH_CAP_FILE_SHARED) && + __ceph_dir_is_complete(ci)) { + /* + * If a directory is complete, we want to keep + * the exclusive cap. So that MDS does not end up + * revoking the shared cap on every create/unlink + * operation. + */ + want = CEPH_CAP_ANY_SHARED | CEPH_CAP_FILE_EXCL; + retain |= want; } else { + retain |= CEPH_CAP_ANY_SHARED; /* * keep RD only if we didn't have the file open RW, @@ -2309,6 +2337,9 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had) wake = 1; } } + /* see comment in __ceph_remove_cap() */ + if (!__ceph_is_any_caps(ci) && ci->i_snap_realm) + drop_inode_snap_realm(ci); } spin_unlock(&ci->i_ceph_lock); @@ -3391,7 +3422,7 @@ int ceph_encode_inode_release(void **p, struct inode *inode, int ceph_encode_dentry_release(void **p, struct dentry *dentry, int mds, int drop, int unless) { - struct inode *dir = dentry->d_parent->d_inode; + struct inode *dir = d_inode(dentry->d_parent); struct ceph_mds_request_release *rel = *p; struct ceph_dentry_info *di = ceph_dentry(dentry); int force = 0; diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c index 1b23551..31f8314 100644 --- a/fs/ceph/debugfs.c +++ b/fs/ceph/debugfs.c @@ -84,7 +84,7 @@ static int mdsc_show(struct seq_file *s, void *p) path = NULL; spin_lock(&req->r_dentry->d_lock); seq_printf(s, " #%llx/%pd (%s)", - ceph_ino(req->r_dentry->d_parent->d_inode), + ceph_ino(d_inode(req->r_dentry->d_parent)), req->r_dentry, path ? path : ""); spin_unlock(&req->r_dentry->d_lock); diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 83e9976..4248307 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -49,9 +49,9 @@ int ceph_init_dentry(struct dentry *dentry) goto out_unlock; } - if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP) + if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_NOSNAP) d_set_d_op(dentry, &ceph_dentry_ops); - else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR) + else if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_SNAPDIR) d_set_d_op(dentry, &ceph_snapdir_dentry_ops); else d_set_d_op(dentry, &ceph_snap_dentry_ops); @@ -77,7 +77,7 @@ struct inode *ceph_get_dentry_parent_inode(struct dentry *dentry) spin_lock(&dentry->d_lock); if (!IS_ROOT(dentry)) { - inode = dentry->d_parent->d_inode; + inode = d_inode(dentry->d_parent); ihold(inode); } spin_unlock(&dentry->d_lock); @@ -122,7 +122,7 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx, { struct ceph_file_info *fi = file->private_data; struct dentry *parent = file->f_path.dentry; - struct inode *dir = parent->d_inode; + struct inode *dir = d_inode(parent); struct list_head *p; struct dentry *dentry, *last; struct ceph_dentry_info *di; @@ -161,15 +161,15 @@ more: } spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); if (di->lease_shared_gen == shared_gen && - !d_unhashed(dentry) && dentry->d_inode && - ceph_snap(dentry->d_inode) != CEPH_SNAPDIR && - ceph_ino(dentry->d_inode) != CEPH_INO_CEPH && + !d_unhashed(dentry) && d_really_is_positive(dentry) && + ceph_snap(d_inode(dentry)) != CEPH_SNAPDIR && + ceph_ino(d_inode(dentry)) != CEPH_INO_CEPH && fpos_cmp(ctx->pos, di->offset) <= 0) break; dout(" skipping %p %pd at %llu (%llu)%s%s\n", dentry, dentry, di->offset, ctx->pos, d_unhashed(dentry) ? " unhashed" : "", - !dentry->d_inode ? " null" : ""); + !d_inode(dentry) ? " null" : ""); spin_unlock(&dentry->d_lock); p = p->prev; dentry = list_entry(p, struct dentry, d_child); @@ -189,11 +189,11 @@ more: } dout(" %llu (%llu) dentry %p %pd %p\n", di->offset, ctx->pos, - dentry, dentry, dentry->d_inode); + dentry, dentry, d_inode(dentry)); if (!dir_emit(ctx, dentry->d_name.name, dentry->d_name.len, - ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino), - dentry->d_inode->i_mode >> 12)) { + ceph_translate_ino(dentry->d_sb, d_inode(dentry)->i_ino), + d_inode(dentry)->i_mode >> 12)) { if (last) { /* remember our position */ fi->dentry = last; @@ -281,6 +281,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx) /* can we use the dcache? */ spin_lock(&ci->i_ceph_lock); if ((ctx->pos == 2 || fi->dentry) && + ceph_test_mount_opt(fsc, DCACHE) && !ceph_test_mount_opt(fsc, NOASYNCREADDIR) && ceph_snap(inode) != CEPH_SNAPDIR && __ceph_dir_is_complete_ordered(ci) && @@ -336,16 +337,23 @@ more: ceph_mdsc_put_request(req); return err; } - req->r_inode = inode; - ihold(inode); - req->r_dentry = dget(file->f_path.dentry); /* hints to request -> mds selection code */ req->r_direct_mode = USE_AUTH_MDS; req->r_direct_hash = ceph_frag_value(frag); req->r_direct_is_hash = true; - req->r_path2 = kstrdup(fi->last_name, GFP_NOFS); + if (fi->last_name) { + req->r_path2 = kstrdup(fi->last_name, GFP_NOFS); + if (!req->r_path2) { + ceph_mdsc_put_request(req); + return -ENOMEM; + } + } req->r_readdir_offset = fi->next_offset; req->r_args.readdir.frag = cpu_to_le32(frag); + + req->r_inode = inode; + ihold(inode); + req->r_dentry = dget(file->f_path.dentry); err = ceph_mdsc_do_request(mdsc, NULL, req); if (err < 0) { ceph_mdsc_put_request(req); @@ -535,7 +543,7 @@ int ceph_handle_snapdir(struct ceph_mds_request *req, struct dentry *dentry, int err) { struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb); - struct inode *parent = dentry->d_parent->d_inode; /* we hold i_mutex */ + struct inode *parent = d_inode(dentry->d_parent); /* we hold i_mutex */ /* .snap dir? */ if (err == -ENOENT && @@ -571,8 +579,8 @@ struct dentry *ceph_finish_lookup(struct ceph_mds_request *req, err = 0; if (!req->r_reply_info.head->is_dentry) { dout("ENOENT and no trace, dentry %p inode %p\n", - dentry, dentry->d_inode); - if (dentry->d_inode) { + dentry, d_inode(dentry)); + if (d_really_is_positive(dentry)) { d_drop(dentry); err = -ENOENT; } else { @@ -619,7 +627,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry, return ERR_PTR(err); /* can we conclude ENOENT locally? */ - if (dentry->d_inode == NULL) { + if (d_really_is_negative(dentry)) { struct ceph_inode_info *ci = ceph_inode(dir); struct ceph_dentry_info *di = ceph_dentry(dentry); @@ -629,6 +637,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry, fsc->mount_options->snapdir_name, dentry->d_name.len) && !is_root_ceph_dentry(dir, dentry) && + ceph_test_mount_opt(fsc, DCACHE) && __ceph_dir_is_complete(ci) && (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) { spin_unlock(&ci->i_ceph_lock); @@ -725,7 +734,7 @@ static int ceph_mknod(struct inode *dir, struct dentry *dentry, ceph_mdsc_put_request(req); out: if (!err) - ceph_init_inode_acls(dentry->d_inode, &acls); + ceph_init_inode_acls(d_inode(dentry), &acls); else d_drop(dentry); ceph_release_acls_info(&acls); @@ -755,10 +764,15 @@ static int ceph_symlink(struct inode *dir, struct dentry *dentry, err = PTR_ERR(req); goto out; } - req->r_dentry = dget(dentry); - req->r_num_caps = 2; req->r_path2 = kstrdup(dest, GFP_NOFS); + if (!req->r_path2) { + err = -ENOMEM; + ceph_mdsc_put_request(req); + goto out; + } req->r_locked_dir = dir; + req->r_dentry = dget(dentry); + req->r_num_caps = 2; req->r_dentry_drop = CEPH_CAP_FILE_SHARED; req->r_dentry_unless = CEPH_CAP_FILE_EXCL; err = ceph_mdsc_do_request(mdsc, dir, req); @@ -821,7 +835,7 @@ static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) ceph_mdsc_put_request(req); out: if (!err) - ceph_init_inode_acls(dentry->d_inode, &acls); + ceph_init_inode_acls(d_inode(dentry), &acls); else d_drop(dentry); ceph_release_acls_info(&acls); @@ -858,8 +872,8 @@ static int ceph_link(struct dentry *old_dentry, struct inode *dir, if (err) { d_drop(dentry); } else if (!req->r_reply_info.head->is_dentry) { - ihold(old_dentry->d_inode); - d_instantiate(dentry, old_dentry->d_inode); + ihold(d_inode(old_dentry)); + d_instantiate(dentry, d_inode(old_dentry)); } ceph_mdsc_put_request(req); return err; @@ -892,7 +906,7 @@ static int ceph_unlink(struct inode *dir, struct dentry *dentry) { struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); struct ceph_mds_client *mdsc = fsc->mdsc; - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct ceph_mds_request *req; int err = -EROFS; int op; @@ -933,16 +947,20 @@ static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry, struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb); struct ceph_mds_client *mdsc = fsc->mdsc; struct ceph_mds_request *req; + int op = CEPH_MDS_OP_RENAME; int err; if (ceph_snap(old_dir) != ceph_snap(new_dir)) return -EXDEV; - if (ceph_snap(old_dir) != CEPH_NOSNAP || - ceph_snap(new_dir) != CEPH_NOSNAP) - return -EROFS; + if (ceph_snap(old_dir) != CEPH_NOSNAP) { + if (old_dir == new_dir && ceph_snap(old_dir) == CEPH_SNAPDIR) + op = CEPH_MDS_OP_RENAMESNAP; + else + return -EROFS; + } dout("rename dir %p dentry %p to dir %p dentry %p\n", old_dir, old_dentry, new_dir, new_dentry); - req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RENAME, USE_AUTH_MDS); + req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); if (IS_ERR(req)) return PTR_ERR(req); ihold(old_dir); @@ -957,8 +975,8 @@ static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry, req->r_dentry_unless = CEPH_CAP_FILE_EXCL; /* release LINK_RDCACHE on source inode (mds will lock it) */ req->r_old_inode_drop = CEPH_CAP_LINK_SHARED; - if (new_dentry->d_inode) - req->r_inode_drop = drop_caps_for_unlink(new_dentry->d_inode); + if (d_really_is_positive(new_dentry)) + req->r_inode_drop = drop_caps_for_unlink(d_inode(new_dentry)); err = ceph_mdsc_do_request(mdsc, old_dir, req); if (!err && !req->r_reply_info.head->is_dentry) { /* @@ -1024,7 +1042,7 @@ static int dentry_lease_is_valid(struct dentry *dentry) if (di->lease_renew_after && time_after(jiffies, di->lease_renew_after)) { /* we should renew */ - dir = dentry->d_parent->d_inode; + dir = d_inode(dentry->d_parent); session = ceph_get_mds_session(s); seq = di->lease_seq; di->lease_renew_after = 0; @@ -1074,22 +1092,22 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags) return -ECHILD; dout("d_revalidate %p '%pd' inode %p offset %lld\n", dentry, - dentry, dentry->d_inode, ceph_dentry(dentry)->offset); + dentry, d_inode(dentry), ceph_dentry(dentry)->offset); dir = ceph_get_dentry_parent_inode(dentry); /* always trust cached snapped dentries, snapdir dentry */ if (ceph_snap(dir) != CEPH_NOSNAP) { dout("d_revalidate %p '%pd' inode %p is SNAPPED\n", dentry, - dentry, dentry->d_inode); + dentry, d_inode(dentry)); valid = 1; - } else if (dentry->d_inode && - ceph_snap(dentry->d_inode) == CEPH_SNAPDIR) { + } else if (d_really_is_positive(dentry) && + ceph_snap(d_inode(dentry)) == CEPH_SNAPDIR) { valid = 1; } else if (dentry_lease_is_valid(dentry) || dir_lease_is_valid(dir, dentry)) { - if (dentry->d_inode) - valid = ceph_is_any_caps(dentry->d_inode); + if (d_really_is_positive(dentry)) + valid = ceph_is_any_caps(d_inode(dentry)); else valid = 1; } @@ -1151,7 +1169,7 @@ static void ceph_d_prune(struct dentry *dentry) * we hold d_lock, so d_parent is stable, and d_fsdata is never * cleared until d_release */ - ceph_dir_clear_complete(dentry->d_parent->d_inode); + ceph_dir_clear_complete(d_inode(dentry->d_parent)); } /* @@ -1240,11 +1258,12 @@ static int ceph_dir_fsync(struct file *file, loff_t start, loff_t end, dout("dir_fsync %p wait on tid %llu (until %llu)\n", inode, req->r_tid, last_tid); if (req->r_timeout) { - ret = wait_for_completion_timeout( - &req->r_safe_completion, req->r_timeout); - if (ret > 0) + unsigned long time_left = wait_for_completion_timeout( + &req->r_safe_completion, + req->r_timeout); + if (time_left > 0) ret = 0; - else if (ret == 0) + else ret = -EIO; /* timed out */ } else { wait_for_completion(&req->r_safe_completion); @@ -1372,6 +1391,7 @@ const struct inode_operations ceph_snapdir_iops = { .getattr = ceph_getattr, .mkdir = ceph_mkdir, .rmdir = ceph_unlink, + .rename = ceph_rename, }; const struct dentry_operations ceph_dentry_ops = { diff --git a/fs/ceph/export.c b/fs/ceph/export.c index 8d7d782..fe02ae7 100644 --- a/fs/ceph/export.c +++ b/fs/ceph/export.c @@ -136,8 +136,8 @@ static struct dentry *__get_parent(struct super_block *sb, return ERR_CAST(req); if (child) { - req->r_inode = child->d_inode; - ihold(child->d_inode); + req->r_inode = d_inode(child); + ihold(d_inode(child)); } else { req->r_ino1 = (struct ceph_vino) { .ino = ino, @@ -164,7 +164,7 @@ static struct dentry *__get_parent(struct super_block *sb, return ERR_PTR(err); } dout("__get_parent ino %llx parent %p ino %llx.%llx\n", - child ? ceph_ino(child->d_inode) : ino, + child ? ceph_ino(d_inode(child)) : ino, dentry, ceph_vinop(inode)); return dentry; } @@ -172,11 +172,11 @@ static struct dentry *__get_parent(struct super_block *sb, static struct dentry *ceph_get_parent(struct dentry *child) { /* don't re-export snaps */ - if (ceph_snap(child->d_inode) != CEPH_NOSNAP) + if (ceph_snap(d_inode(child)) != CEPH_NOSNAP) return ERR_PTR(-EINVAL); dout("get_parent %p ino %llx.%llx\n", - child, ceph_vinop(child->d_inode)); + child, ceph_vinop(d_inode(child))); return __get_parent(child->d_sb, child, 0); } @@ -209,32 +209,32 @@ static int ceph_get_name(struct dentry *parent, char *name, struct ceph_mds_request *req; int err; - mdsc = ceph_inode_to_client(child->d_inode)->mdsc; + mdsc = ceph_inode_to_client(d_inode(child))->mdsc; req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LOOKUPNAME, USE_ANY_MDS); if (IS_ERR(req)) return PTR_ERR(req); - mutex_lock(&parent->d_inode->i_mutex); + mutex_lock(&d_inode(parent)->i_mutex); - req->r_inode = child->d_inode; - ihold(child->d_inode); - req->r_ino2 = ceph_vino(parent->d_inode); - req->r_locked_dir = parent->d_inode; + req->r_inode = d_inode(child); + ihold(d_inode(child)); + req->r_ino2 = ceph_vino(d_inode(parent)); + req->r_locked_dir = d_inode(parent); req->r_num_caps = 2; err = ceph_mdsc_do_request(mdsc, NULL, req); - mutex_unlock(&parent->d_inode->i_mutex); + mutex_unlock(&d_inode(parent)->i_mutex); if (!err) { struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; memcpy(name, rinfo->dname, rinfo->dname_len); name[rinfo->dname_len] = 0; dout("get_name %p ino %llx.%llx name %s\n", - child, ceph_vinop(child->d_inode), name); + child, ceph_vinop(d_inode(child)), name); } else { dout("get_name %p ino %llx.%llx err %d\n", - child, ceph_vinop(child->d_inode), err); + child, ceph_vinop(d_inode(child)), err); } ceph_mdsc_put_request(req); diff --git a/fs/ceph/file.c b/fs/ceph/file.c index d533075..3b6b522 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -7,7 +7,6 @@ #include <linux/mount.h> #include <linux/namei.h> #include <linux/writeback.h> -#include <linux/aio.h> #include <linux/falloc.h> #include "super.h" @@ -292,14 +291,14 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry, } if (err) goto out_req; - if (dn || dentry->d_inode == NULL || d_is_symlink(dentry)) { + if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) { /* make vfs retry on splice, ENOENT, or symlink */ dout("atomic_open finish_no_open on dn %p\n", dn); err = finish_no_open(file, dn); } else { dout("atomic_open finish_open on dn %p\n", dn); if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) { - ceph_init_inode_acls(dentry->d_inode, &acls); + ceph_init_inode_acls(d_inode(dentry), &acls); *opened |= FILE_CREATED; } err = finish_open(file, dentry, ceph_open, opened); @@ -458,7 +457,7 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *i, if (ret < 0) return ret; - if (file->f_flags & O_DIRECT) { + if (iocb->ki_flags & IOCB_DIRECT) { while (iov_iter_count(i)) { size_t start; ssize_t n; @@ -808,7 +807,7 @@ static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to) { struct file *filp = iocb->ki_filp; struct ceph_file_info *fi = filp->private_data; - size_t len = iocb->ki_nbytes; + size_t len = iov_iter_count(to); struct inode *inode = file_inode(filp); struct ceph_inode_info *ci = ceph_inode(inode); struct page *pinned_page = NULL; @@ -829,7 +828,7 @@ again: return ret; if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 || - (iocb->ki_filp->f_flags & O_DIRECT) || + (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC)) { dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n", @@ -942,9 +941,9 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from) struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_osd_client *osdc = &ceph_sb_to_client(inode->i_sb)->client->osdc; - ssize_t count = iov_iter_count(from), written = 0; + ssize_t count, written = 0; int err, want, got; - loff_t pos = iocb->ki_pos; + loff_t pos; if (ceph_snap(inode) != CEPH_NOSNAP) return -EROFS; @@ -954,14 +953,12 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from) /* We can write back this queue in page reclaim */ current->backing_dev_info = inode_to_bdi(inode); - err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); - if (err) - goto out; - - if (count == 0) + err = generic_write_checks(iocb, from); + if (err <= 0) goto out; - iov_iter_truncate(from, count); + pos = iocb->ki_pos; + count = iov_iter_count(from); err = file_remove_suid(file); if (err) goto out; @@ -998,12 +995,12 @@ retry_snap: inode, ceph_vinop(inode), pos, count, ceph_cap_string(got)); if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 || - (file->f_flags & O_DIRECT) || (fi->flags & CEPH_F_SYNC)) { + (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC)) { struct iov_iter data; mutex_unlock(&inode->i_mutex); /* we might need to revert back to that point */ data = *from; - if (file->f_flags & O_DIRECT) + if (iocb->ki_flags & IOCB_DIRECT) written = ceph_sync_direct_write(iocb, &data, pos); else written = ceph_sync_write(iocb, &data, pos); @@ -1332,8 +1329,6 @@ const struct file_operations ceph_file_fops = { .open = ceph_open, .release = ceph_release, .llseek = ceph_llseek, - .read = new_sync_read, - .write = new_sync_write, .read_iter = ceph_read_iter, .write_iter = ceph_write_iter, .mmap = ceph_mmap, diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 119c43c..e876e19 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -940,7 +940,7 @@ static void update_dentry_lease(struct dentry *dentry, dentry, duration, ttl); /* make lease_rdcache_gen match directory */ - dir = dentry->d_parent->d_inode; + dir = d_inode(dentry->d_parent); di->lease_shared_gen = ceph_inode(dir)->i_shared_gen; if (duration == 0) @@ -980,7 +980,7 @@ static struct dentry *splice_dentry(struct dentry *dn, struct inode *in, { struct dentry *realdn; - BUG_ON(dn->d_inode); + BUG_ON(d_inode(dn)); /* dn must be unhashed */ if (!d_unhashed(dn)) @@ -998,13 +998,13 @@ static struct dentry *splice_dentry(struct dentry *dn, struct inode *in, "inode %p ino %llx.%llx\n", dn, d_count(dn), realdn, d_count(realdn), - realdn->d_inode, ceph_vinop(realdn->d_inode)); + d_inode(realdn), ceph_vinop(d_inode(realdn))); dput(dn); dn = realdn; } else { BUG_ON(!ceph_dentry(dn)); dout("dn %p attached to %p ino %llx.%llx\n", - dn, dn->d_inode, ceph_vinop(dn->d_inode)); + dn, d_inode(dn), ceph_vinop(d_inode(dn))); } if ((!prehash || *prehash) && d_unhashed(dn)) d_rehash(dn); @@ -1125,11 +1125,11 @@ retry_lookup: dput(parent); goto done; } - } else if (dn->d_inode && - (ceph_ino(dn->d_inode) != vino.ino || - ceph_snap(dn->d_inode) != vino.snap)) { + } else if (d_really_is_positive(dn) && + (ceph_ino(d_inode(dn)) != vino.ino || + ceph_snap(d_inode(dn)) != vino.snap)) { dout(" dn %p points to wrong inode %p\n", - dn, dn->d_inode); + dn, d_inode(dn)); d_delete(dn); dput(dn); goto retry_lookup; @@ -1183,7 +1183,7 @@ retry_lookup: BUG_ON(!dn); BUG_ON(!dir); - BUG_ON(dn->d_parent->d_inode != dir); + BUG_ON(d_inode(dn->d_parent) != dir); BUG_ON(ceph_ino(dir) != le64_to_cpu(rinfo->diri.in->ino)); BUG_ON(ceph_snap(dir) != @@ -1235,7 +1235,7 @@ retry_lookup: /* null dentry? */ if (!rinfo->head->is_target) { dout("fill_trace null dentry\n"); - if (dn->d_inode) { + if (d_really_is_positive(dn)) { ceph_dir_clear_ordered(dir); dout("d_delete %p\n", dn); d_delete(dn); @@ -1252,7 +1252,7 @@ retry_lookup: } /* attach proper inode */ - if (!dn->d_inode) { + if (d_really_is_negative(dn)) { ceph_dir_clear_ordered(dir); ihold(in); dn = splice_dentry(dn, in, &have_lease); @@ -1261,9 +1261,9 @@ retry_lookup: goto done; } req->r_dentry = dn; /* may have spliced */ - } else if (dn->d_inode && dn->d_inode != in) { + } else if (d_really_is_positive(dn) && d_inode(dn) != in) { dout(" %p links to %p %llx.%llx, not %llx.%llx\n", - dn, dn->d_inode, ceph_vinop(dn->d_inode), + dn, d_inode(dn), ceph_vinop(d_inode(dn)), ceph_vinop(in)); have_lease = false; } @@ -1363,7 +1363,7 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req, return readdir_prepopulate_inodes_only(req, session); if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) { - snapdir = ceph_get_snapdir(parent->d_inode); + snapdir = ceph_get_snapdir(d_inode(parent)); parent = d_find_alias(snapdir); dout("readdir_prepopulate %d items under SNAPDIR dn %p\n", rinfo->dir_nr, parent); @@ -1371,7 +1371,7 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req, dout("readdir_prepopulate %d items under dn %p\n", rinfo->dir_nr, parent); if (rinfo->dir_dir) - ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir); + ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir); } /* FIXME: release caps/leases if error occurs */ @@ -1405,11 +1405,11 @@ retry_lookup: err = ret; goto out; } - } else if (dn->d_inode && - (ceph_ino(dn->d_inode) != vino.ino || - ceph_snap(dn->d_inode) != vino.snap)) { + } else if (d_really_is_positive(dn) && + (ceph_ino(d_inode(dn)) != vino.ino || + ceph_snap(d_inode(dn)) != vino.snap)) { dout(" dn %p points to wrong inode %p\n", - dn, dn->d_inode); + dn, d_inode(dn)); d_delete(dn); dput(dn); goto retry_lookup; @@ -1423,8 +1423,8 @@ retry_lookup: } /* inode */ - if (dn->d_inode) { - in = dn->d_inode; + if (d_really_is_positive(dn)) { + in = d_inode(dn); } else { in = ceph_get_inode(parent->d_sb, vino); if (IS_ERR(in)) { @@ -1440,13 +1440,13 @@ retry_lookup: req->r_request_started, -1, &req->r_caps_reservation) < 0) { pr_err("fill_inode badness on %p\n", in); - if (!dn->d_inode) + if (d_really_is_negative(dn)) iput(in); d_drop(dn); goto next_item; } - if (!dn->d_inode) { + if (d_really_is_negative(dn)) { struct dentry *realdn = splice_dentry(dn, in, NULL); if (IS_ERR(realdn)) { err = PTR_ERR(realdn); @@ -1693,7 +1693,7 @@ retry: */ static void *ceph_sym_follow_link(struct dentry *dentry, struct nameidata *nd) { - struct ceph_inode_info *ci = ceph_inode(dentry->d_inode); + struct ceph_inode_info *ci = ceph_inode(d_inode(dentry)); nd_set_link(nd, ci->i_symlink); return NULL; } @@ -1714,7 +1714,7 @@ static const struct inode_operations ceph_symlink_iops = { */ int ceph_setattr(struct dentry *dentry, struct iattr *attr) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct ceph_inode_info *ci = ceph_inode(inode); const unsigned int ia_valid = attr->ia_valid; struct ceph_mds_request *req; @@ -1990,7 +1990,7 @@ int ceph_permission(struct inode *inode, int mask) int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct ceph_inode_info *ci = ceph_inode(inode); int err; diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 71c073f..84f37f3 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -679,7 +679,7 @@ static struct dentry *get_nonsnap_parent(struct dentry *dentry) * except to resplice to another snapdir, and either the old or new * result is a valid result. */ - while (!IS_ROOT(dentry) && ceph_snap(dentry->d_inode) != CEPH_NOSNAP) + while (!IS_ROOT(dentry) && ceph_snap(d_inode(dentry)) != CEPH_NOSNAP) dentry = dentry->d_parent; return dentry; } @@ -716,20 +716,20 @@ static int __choose_mds(struct ceph_mds_client *mdsc, } else if (req->r_dentry) { /* ignore race with rename; old or new d_parent is okay */ struct dentry *parent = req->r_dentry->d_parent; - struct inode *dir = parent->d_inode; + struct inode *dir = d_inode(parent); if (dir->i_sb != mdsc->fsc->sb) { /* not this fs! */ - inode = req->r_dentry->d_inode; + inode = d_inode(req->r_dentry); } else if (ceph_snap(dir) != CEPH_NOSNAP) { /* direct snapped/virtual snapdir requests * based on parent dir inode */ struct dentry *dn = get_nonsnap_parent(parent); - inode = dn->d_inode; + inode = d_inode(dn); dout("__choose_mds using nonsnap parent %p\n", inode); } else { /* dentry target */ - inode = req->r_dentry->d_inode; + inode = d_inode(req->r_dentry); if (!inode || mode == USE_AUTH_MDS) { /* dir + name */ inode = dir; @@ -1021,6 +1021,33 @@ static void cleanup_cap_releases(struct ceph_mds_session *session) spin_unlock(&session->s_cap_lock); } +static void cleanup_session_requests(struct ceph_mds_client *mdsc, + struct ceph_mds_session *session) +{ + struct ceph_mds_request *req; + struct rb_node *p; + + dout("cleanup_session_requests mds%d\n", session->s_mds); + mutex_lock(&mdsc->mutex); + while (!list_empty(&session->s_unsafe)) { + req = list_first_entry(&session->s_unsafe, + struct ceph_mds_request, r_unsafe_item); + list_del_init(&req->r_unsafe_item); + pr_info(" dropping unsafe request %llu\n", req->r_tid); + __unregister_request(mdsc, req); + } + /* zero r_attempts, so kick_requests() will re-send requests */ + p = rb_first(&mdsc->request_tree); + while (p) { + req = rb_entry(p, struct ceph_mds_request, r_node); + p = rb_next(p); + if (req->r_session && + req->r_session->s_mds == session->s_mds) + req->r_attempts = 0; + } + mutex_unlock(&mdsc->mutex); +} + /* * Helper to safely iterate over all caps associated with a session, with * special care taken to handle a racing __ceph_remove_cap(). @@ -1098,7 +1125,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap, cap, ci, &ci->vfs_inode); spin_lock(&ci->i_ceph_lock); __ceph_remove_cap(cap, false); - if (!__ceph_is_any_real_caps(ci)) { + if (!ci->i_auth_cap) { struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; @@ -1120,13 +1147,6 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap, mdsc->num_cap_flushing--; drop = 1; } - if (drop && ci->i_wrbuffer_ref) { - pr_info(" dropping dirty data for %p %lld\n", - inode, ceph_ino(inode)); - ci->i_wrbuffer_ref = 0; - ci->i_wrbuffer_ref_head = 0; - drop++; - } spin_unlock(&mdsc->cap_dirty_lock); } spin_unlock(&ci->i_ceph_lock); @@ -1712,7 +1732,7 @@ retry: seq = read_seqbegin(&rename_lock); rcu_read_lock(); for (temp = dentry; !IS_ROOT(temp);) { - struct inode *inode = temp->d_inode; + struct inode *inode = d_inode(temp); if (inode && ceph_snap(inode) == CEPH_SNAPDIR) len++; /* slash only */ else if (stop_on_nosnap && inode && @@ -1736,7 +1756,7 @@ retry: struct inode *inode; spin_lock(&temp->d_lock); - inode = temp->d_inode; + inode = d_inode(temp); if (inode && ceph_snap(inode) == CEPH_SNAPDIR) { dout("build_path path+%d: %p SNAPDIR\n", pos, temp); @@ -1770,7 +1790,7 @@ retry: goto retry; } - *base = ceph_ino(temp->d_inode); + *base = ceph_ino(d_inode(temp)); *plen = len; dout("build_path on %p %d built %llx '%.*s'\n", dentry, d_count(dentry), *base, len, path); @@ -1783,8 +1803,8 @@ static int build_dentry_path(struct dentry *dentry, { char *path; - if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP) { - *pino = ceph_ino(dentry->d_parent->d_inode); + if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_NOSNAP) { + *pino = ceph_ino(d_inode(dentry->d_parent)); *ppath = dentry->d_name.name; *ppathlen = dentry->d_name.len; return 0; @@ -1853,7 +1873,7 @@ static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry, */ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc, struct ceph_mds_request *req, - int mds) + int mds, bool drop_cap_releases) { struct ceph_msg *msg; struct ceph_mds_request_head *head; @@ -1925,7 +1945,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc, releases = 0; if (req->r_inode_drop) releases += ceph_encode_inode_release(&p, - req->r_inode ? req->r_inode : req->r_dentry->d_inode, + req->r_inode ? req->r_inode : d_inode(req->r_dentry), mds, req->r_inode_drop, req->r_inode_unless, 0); if (req->r_dentry_drop) releases += ceph_encode_dentry_release(&p, req->r_dentry, @@ -1935,8 +1955,14 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc, mds, req->r_old_dentry_drop, req->r_old_dentry_unless); if (req->r_old_inode_drop) releases += ceph_encode_inode_release(&p, - req->r_old_dentry->d_inode, + d_inode(req->r_old_dentry), mds, req->r_old_inode_drop, req->r_old_inode_unless, 0); + + if (drop_cap_releases) { + releases = 0; + p = msg->front.iov_base + req->r_request_release_offset; + } + head->num_releases = cpu_to_le16(releases); /* time stamp */ @@ -1989,7 +2015,7 @@ static void complete_request(struct ceph_mds_client *mdsc, */ static int __prepare_send_request(struct ceph_mds_client *mdsc, struct ceph_mds_request *req, - int mds) + int mds, bool drop_cap_releases) { struct ceph_mds_request_head *rhead; struct ceph_msg *msg; @@ -2048,7 +2074,7 @@ static int __prepare_send_request(struct ceph_mds_client *mdsc, ceph_msg_put(req->r_request); req->r_request = NULL; } - msg = create_request_message(mdsc, req, mds); + msg = create_request_message(mdsc, req, mds, drop_cap_releases); if (IS_ERR(msg)) { req->r_err = PTR_ERR(msg); complete_request(mdsc, req); @@ -2132,7 +2158,7 @@ static int __do_request(struct ceph_mds_client *mdsc, if (req->r_request_started == 0) /* note request start time */ req->r_request_started = jiffies; - err = __prepare_send_request(mdsc, req, mds); + err = __prepare_send_request(mdsc, req, mds, false); if (!err) { ceph_msg_get(req->r_request); ceph_con_send(&session->s_con, req->r_request); @@ -2590,6 +2616,7 @@ static void handle_session(struct ceph_mds_session *session, case CEPH_SESSION_CLOSE: if (session->s_state == CEPH_MDS_SESSION_RECONNECTING) pr_info("mds%d reconnect denied\n", session->s_mds); + cleanup_session_requests(mdsc, session); remove_session_caps(session); wake = 2; /* for good measure */ wake_up_all(&mdsc->session_close_wq); @@ -2658,7 +2685,7 @@ static void replay_unsafe_requests(struct ceph_mds_client *mdsc, mutex_lock(&mdsc->mutex); list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) { - err = __prepare_send_request(mdsc, req, session->s_mds); + err = __prepare_send_request(mdsc, req, session->s_mds, true); if (!err) { ceph_msg_get(req->r_request); ceph_con_send(&session->s_con, req->r_request); @@ -2679,7 +2706,8 @@ static void replay_unsafe_requests(struct ceph_mds_client *mdsc, continue; /* only old requests */ if (req->r_session && req->r_session->s_mds == session->s_mds) { - err = __prepare_send_request(mdsc, req, session->s_mds); + err = __prepare_send_request(mdsc, req, + session->s_mds, true); if (!err) { ceph_msg_get(req->r_request); ceph_con_send(&session->s_con, req->r_request); @@ -2864,7 +2892,8 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc, spin_unlock(&session->s_cap_lock); /* trim unused caps to reduce MDS's cache rejoin time */ - shrink_dcache_parent(mdsc->fsc->sb->s_root); + if (mdsc->fsc->sb->s_root) + shrink_dcache_parent(mdsc->fsc->sb->s_root); ceph_con_close(&session->s_con); ceph_con_open(&session->s_con, @@ -3133,7 +3162,7 @@ static void handle_lease(struct ceph_mds_client *mdsc, di->lease_renew_from && di->lease_renew_after == 0) { unsigned long duration = - le32_to_cpu(h->duration_ms) * HZ / 1000; + msecs_to_jiffies(le32_to_cpu(h->duration_ms)); di->lease_seq = seq; dentry->d_time = di->lease_renew_from + duration; diff --git a/fs/ceph/strings.c b/fs/ceph/strings.c index 51cc23e..89e6bc3 100644 --- a/fs/ceph/strings.c +++ b/fs/ceph/strings.c @@ -75,6 +75,7 @@ const char *ceph_mds_op_name(int op) case CEPH_MDS_OP_LSSNAP: return "lssnap"; case CEPH_MDS_OP_MKSNAP: return "mksnap"; case CEPH_MDS_OP_RMSNAP: return "rmsnap"; + case CEPH_MDS_OP_RENAMESNAP: return "renamesnap"; case CEPH_MDS_OP_SETFILELOCK: return "setfilelock"; case CEPH_MDS_OP_GETFILELOCK: return "getfilelock"; } diff --git a/fs/ceph/super.c b/fs/ceph/super.c index a63997b..4e99053 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -44,7 +44,7 @@ static void ceph_put_super(struct super_block *s) static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf) { - struct ceph_fs_client *fsc = ceph_inode_to_client(dentry->d_inode); + struct ceph_fs_client *fsc = ceph_inode_to_client(d_inode(dentry)); struct ceph_monmap *monmap = fsc->client->monc.monmap; struct ceph_statfs st; u64 fsid; @@ -345,6 +345,11 @@ static int parse_mount_options(struct ceph_mount_options **pfsopt, fsopt->rsize = CEPH_RSIZE_DEFAULT; fsopt->rasize = CEPH_RASIZE_DEFAULT; fsopt->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL); + if (!fsopt->snapdir_name) { + err = -ENOMEM; + goto out; + } + fsopt->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT; fsopt->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT; fsopt->cap_release_safety = CEPH_CAP_RELEASE_SAFETY_DEFAULT; @@ -406,31 +411,20 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root) { struct ceph_fs_client *fsc = ceph_sb_to_client(root->d_sb); struct ceph_mount_options *fsopt = fsc->mount_options; - struct ceph_options *opt = fsc->client->options; - - if (opt->flags & CEPH_OPT_FSID) - seq_printf(m, ",fsid=%pU", &opt->fsid); - if (opt->flags & CEPH_OPT_NOSHARE) - seq_puts(m, ",noshare"); - if (opt->flags & CEPH_OPT_NOCRC) - seq_puts(m, ",nocrc"); - if (opt->flags & CEPH_OPT_NOMSGAUTH) - seq_puts(m, ",nocephx_require_signatures"); - if ((opt->flags & CEPH_OPT_TCP_NODELAY) == 0) - seq_puts(m, ",notcp_nodelay"); - - if (opt->name) - seq_printf(m, ",name=%s", opt->name); - if (opt->key) - seq_puts(m, ",secret=<hidden>"); - - if (opt->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT) - seq_printf(m, ",mount_timeout=%d", opt->mount_timeout); - if (opt->osd_idle_ttl != CEPH_OSD_IDLE_TTL_DEFAULT) - seq_printf(m, ",osd_idle_ttl=%d", opt->osd_idle_ttl); - if (opt->osd_keepalive_timeout != CEPH_OSD_KEEPALIVE_DEFAULT) - seq_printf(m, ",osdkeepalivetimeout=%d", - opt->osd_keepalive_timeout); + size_t pos; + int ret; + + /* a comma between MNT/MS and client options */ + seq_putc(m, ','); + pos = m->count; + + ret = ceph_print_client_options(m, fsc->client); + if (ret) + return ret; + + /* retract our comma if no client options */ + if (m->count == pos) + m->count--; if (fsopt->flags & CEPH_MOUNT_OPT_DIRSTAT) seq_puts(m, ",dirstat"); @@ -438,14 +432,10 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root) seq_puts(m, ",norbytes"); if (fsopt->flags & CEPH_MOUNT_OPT_NOASYNCREADDIR) seq_puts(m, ",noasyncreaddir"); - if (fsopt->flags & CEPH_MOUNT_OPT_DCACHE) - seq_puts(m, ",dcache"); - else + if ((fsopt->flags & CEPH_MOUNT_OPT_DCACHE) == 0) seq_puts(m, ",nodcache"); if (fsopt->flags & CEPH_MOUNT_OPT_FSCACHE) seq_puts(m, ",fsc"); - else - seq_puts(m, ",nofsc"); #ifdef CONFIG_CEPH_FS_POSIX_ACL if (fsopt->sb_flags & MS_POSIXACL) @@ -477,6 +467,7 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root) seq_printf(m, ",readdir_max_bytes=%d", fsopt->max_readdir_bytes); if (strcmp(fsopt->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT)) seq_printf(m, ",snapdirname=%s", fsopt->snapdir_name); + return 0; } @@ -730,6 +721,11 @@ static struct dentry *open_root_dentry(struct ceph_fs_client *fsc, if (IS_ERR(req)) return ERR_CAST(req); req->r_path1 = kstrdup(path, GFP_NOFS); + if (!req->r_path1) { + root = ERR_PTR(-ENOMEM); + goto out; + } + req->r_ino1.ino = CEPH_INO_ROOT; req->r_ino1.snap = CEPH_NOSNAP; req->r_started = started; @@ -976,7 +972,7 @@ static struct dentry *ceph_mount(struct file_system_type *fs_type, if (IS_ERR(res)) goto out_splat; dout("root %p inode %p ino %llx.%llx\n", res, - res->d_inode, ceph_vinop(res->d_inode)); + d_inode(res), ceph_vinop(d_inode(res))); return res; out_splat: diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 04c8124..fa20e13 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -36,7 +36,8 @@ #define CEPH_MOUNT_OPT_DCACHE (1<<9) /* use dcache for readdir etc */ #define CEPH_MOUNT_OPT_FSCACHE (1<<10) /* use fscache */ -#define CEPH_MOUNT_OPT_DEFAULT (CEPH_MOUNT_OPT_RBYTES) +#define CEPH_MOUNT_OPT_DEFAULT (CEPH_MOUNT_OPT_RBYTES | \ + CEPH_MOUNT_OPT_DCACHE) #define ceph_set_mount_opt(fsc, opt) \ (fsc)->mount_options->flags |= CEPH_MOUNT_OPT_##opt; @@ -881,7 +882,6 @@ extern int ceph_mmap(struct file *file, struct vm_area_struct *vma); /* file.c */ extern const struct file_operations ceph_file_fops; -extern const struct address_space_operations ceph_aops; extern int ceph_open(struct inode *inode, struct file *file); extern int ceph_atomic_open(struct inode *dir, struct dentry *dentry, diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c index 5a492ca..cd7ffad4 100644 --- a/fs/ceph/xattr.c +++ b/fs/ceph/xattr.c @@ -776,12 +776,12 @@ ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value, if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) return generic_getxattr(dentry, name, value, size); - return __ceph_getxattr(dentry->d_inode, name, value, size); + return __ceph_getxattr(d_inode(dentry), name, value, size); } ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_vxattr *vxattrs = ceph_inode_vxattrs(inode); u32 vir_namelen = 0; @@ -847,7 +847,7 @@ static int ceph_sync_setxattr(struct dentry *dentry, const char *name, const char *value, size_t size, int flags) { struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb); - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_mds_request *req; struct ceph_mds_client *mdsc = fsc->mdsc; @@ -877,16 +877,23 @@ static int ceph_sync_setxattr(struct dentry *dentry, const char *name, err = PTR_ERR(req); goto out; } - req->r_inode = inode; - ihold(inode); - req->r_inode_drop = CEPH_CAP_XATTR_SHARED; - req->r_num_caps = 1; + req->r_args.setxattr.flags = cpu_to_le32(flags); req->r_path2 = kstrdup(name, GFP_NOFS); + if (!req->r_path2) { + ceph_mdsc_put_request(req); + err = -ENOMEM; + goto out; + } req->r_pagelist = pagelist; pagelist = NULL; + req->r_inode = inode; + ihold(inode); + req->r_num_caps = 1; + req->r_inode_drop = CEPH_CAP_XATTR_SHARED; + dout("xattr.ver (before): %lld\n", ci->i_xattrs.version); err = ceph_mdsc_do_request(mdsc, NULL, req); ceph_mdsc_put_request(req); @@ -901,7 +908,7 @@ out: int __ceph_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct ceph_vxattr *vxattr; struct ceph_inode_info *ci = ceph_inode(inode); int issued; @@ -995,7 +1002,7 @@ out: int ceph_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { - if (ceph_snap(dentry->d_inode) != CEPH_NOSNAP) + if (ceph_snap(d_inode(dentry)) != CEPH_NOSNAP) return -EROFS; if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) @@ -1011,7 +1018,7 @@ static int ceph_send_removexattr(struct dentry *dentry, const char *name) { struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb); struct ceph_mds_client *mdsc = fsc->mdsc; - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct ceph_mds_request *req; int err; @@ -1019,12 +1026,14 @@ static int ceph_send_removexattr(struct dentry *dentry, const char *name) USE_AUTH_MDS); if (IS_ERR(req)) return PTR_ERR(req); + req->r_path2 = kstrdup(name, GFP_NOFS); + if (!req->r_path2) + return -ENOMEM; + req->r_inode = inode; ihold(inode); - req->r_inode_drop = CEPH_CAP_XATTR_SHARED; req->r_num_caps = 1; - req->r_path2 = kstrdup(name, GFP_NOFS); - + req->r_inode_drop = CEPH_CAP_XATTR_SHARED; err = ceph_mdsc_do_request(mdsc, NULL, req); ceph_mdsc_put_request(req); return err; @@ -1032,7 +1041,7 @@ static int ceph_send_removexattr(struct dentry *dentry, const char *name) int __ceph_removexattr(struct dentry *dentry, const char *name) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct ceph_vxattr *vxattr; struct ceph_inode_info *ci = ceph_inode(inode); int issued; @@ -1098,7 +1107,7 @@ out: int ceph_removexattr(struct dentry *dentry, const char *name) { - if (ceph_snap(dentry->d_inode) != CEPH_NOSNAP) + if (ceph_snap(d_inode(dentry)) != CEPH_NOSNAP) return -EROFS; if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c index b8602f1..430e034 100644 --- a/fs/cifs/cifs_dfs_ref.c +++ b/fs/cifs/cifs_dfs_ref.c @@ -301,7 +301,7 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt) if (full_path == NULL) goto cdda_exit; - cifs_sb = CIFS_SB(mntpt->d_inode->i_sb); + cifs_sb = CIFS_SB(d_inode(mntpt)->i_sb); tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) { mnt = ERR_CAST(tlink); diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index 4ac7445..aa0dc25 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c @@ -1,6 +1,9 @@ /* * fs/cifs/cifsencrypt.c * + * Encryption and hashing operations relating to NTLM, NTLMv2. See MS-NLMP + * for more detailed information + * * Copyright (C) International Business Machines Corp., 2005,2013 * Author(s): Steve French (sfrench@us.ibm.com) * @@ -515,7 +518,8 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash, __func__); return rc; } - } else if (ses->serverName) { + } else { + /* We use ses->serverName if no domain name available */ len = strlen(ses->serverName); server = kmalloc(2 + (len * 2), GFP_KERNEL); diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index d72fe37..f5089bd 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -607,7 +607,7 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb) p = s = full_path; do { - struct inode *dir = dentry->d_inode; + struct inode *dir = d_inode(dentry); struct dentry *child; if (!dir) { @@ -906,8 +906,6 @@ const struct inode_operations cifs_symlink_inode_ops = { }; const struct file_operations cifs_file_ops = { - .read = new_sync_read, - .write = new_sync_write, .read_iter = cifs_loose_read_iter, .write_iter = cifs_file_write_iter, .open = cifs_open, @@ -926,8 +924,6 @@ const struct file_operations cifs_file_ops = { }; const struct file_operations cifs_file_strict_ops = { - .read = new_sync_read, - .write = new_sync_write, .read_iter = cifs_strict_readv, .write_iter = cifs_strict_writev, .open = cifs_open, @@ -947,8 +943,6 @@ const struct file_operations cifs_file_strict_ops = { const struct file_operations cifs_file_direct_ops = { /* BB reevaluate whether they can be done with directio, no cache */ - .read = new_sync_read, - .write = new_sync_write, .read_iter = cifs_user_readv, .write_iter = cifs_user_writev, .open = cifs_open, @@ -967,8 +961,6 @@ const struct file_operations cifs_file_direct_ops = { }; const struct file_operations cifs_file_nobrl_ops = { - .read = new_sync_read, - .write = new_sync_write, .read_iter = cifs_loose_read_iter, .write_iter = cifs_file_write_iter, .open = cifs_open, @@ -986,8 +978,6 @@ const struct file_operations cifs_file_nobrl_ops = { }; const struct file_operations cifs_file_strict_nobrl_ops = { - .read = new_sync_read, - .write = new_sync_write, .read_iter = cifs_strict_readv, .write_iter = cifs_strict_writev, .open = cifs_open, @@ -1006,8 +996,6 @@ const struct file_operations cifs_file_strict_nobrl_ops = { const struct file_operations cifs_file_direct_nobrl_ops = { /* BB reevaluate whether they can be done with directio, no cache */ - .read = new_sync_read, - .write = new_sync_write, .read_iter = cifs_user_readv, .write_iter = cifs_user_writev, .open = cifs_open, diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index fa13d5e..84650a5 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -1898,7 +1898,7 @@ static void cifs_writev_requeue(struct cifs_writedata *wdata) { int i, rc = 0; - struct inode *inode = wdata->cfile->dentry->d_inode; + struct inode *inode = d_inode(wdata->cfile->dentry); struct TCP_Server_Info *server; unsigned int rest_len; @@ -1981,7 +1981,7 @@ cifs_writev_complete(struct work_struct *work) { struct cifs_writedata *wdata = container_of(work, struct cifs_writedata, work); - struct inode *inode = wdata->cfile->dentry->d_inode; + struct inode *inode = d_inode(wdata->cfile->dentry); int i = 0; if (wdata->result == 0) { diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index d3aa999..f3bfe08 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -773,8 +773,7 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server) length = atomic_dec_return(&tcpSesAllocCount); if (length > 0) - mempool_resize(cifs_req_poolp, length + cifs_min_rcv, - GFP_KERNEL); + mempool_resize(cifs_req_poolp, length + cifs_min_rcv); } static int @@ -848,8 +847,7 @@ cifs_demultiplex_thread(void *p) length = atomic_inc_return(&tcpSesAllocCount); if (length > 1) - mempool_resize(cifs_req_poolp, length + cifs_min_rcv, - GFP_KERNEL); + mempool_resize(cifs_req_poolp, length + cifs_min_rcv); set_freezable(); while (server->tcpStatus != CifsExiting) { @@ -1599,6 +1597,8 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, pr_warn("CIFS: username too long\n"); goto cifs_parse_mount_err; } + + kfree(vol->username); vol->username = kstrdup(string, GFP_KERNEL); if (!vol->username) goto cifs_parse_mount_err; @@ -1700,6 +1700,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, goto cifs_parse_mount_err; } + kfree(vol->domainname); vol->domainname = kstrdup(string, GFP_KERNEL); if (!vol->domainname) { pr_warn("CIFS: no memory for domainname\n"); @@ -1731,6 +1732,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, } if (strncasecmp(string, "default", 7) != 0) { + kfree(vol->iocharset); vol->iocharset = kstrdup(string, GFP_KERNEL); if (!vol->iocharset) { @@ -2913,8 +2915,7 @@ ip_rfc1001_connect(struct TCP_Server_Info *server) * calling name ends in null (byte 16) from old smb * convention. */ - if (server->workstation_RFC1001_name && - server->workstation_RFC1001_name[0] != 0) + if (server->workstation_RFC1001_name[0] != 0) rfc1002mangle(ses_init_buf->trailer. session_req.calling_name, server->workstation_RFC1001_name, @@ -3692,6 +3693,12 @@ CIFSTCon(const unsigned int xid, struct cifs_ses *ses, #endif /* CIFS_WEAK_PW_HASH */ rc = SMBNTencrypt(tcon->password, ses->server->cryptkey, bcc_ptr, nls_codepage); + if (rc) { + cifs_dbg(FYI, "%s Can't generate NTLM rsp. Error: %d\n", + __func__, rc); + cifs_buf_release(smb_buffer); + return rc; + } bcc_ptr += CIFS_AUTH_RESP_SIZE; if (ses->capabilities & CAP_UNICODE) { diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index b72bc29..338d569 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -745,13 +745,13 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, goto lookup_out; } - if (direntry->d_inode != NULL) { + if (d_really_is_positive(direntry)) { cifs_dbg(FYI, "non-NULL inode in lookup\n"); } else { cifs_dbg(FYI, "NULL inode in lookup\n"); } cifs_dbg(FYI, "Full path: %s inode = 0x%p\n", - full_path, direntry->d_inode); + full_path, d_inode(direntry)); if (pTcon->unix_ext) { rc = cifs_get_inode_info_unix(&newInode, full_path, @@ -792,7 +792,7 @@ cifs_d_revalidate(struct dentry *direntry, unsigned int flags) if (flags & LOOKUP_RCU) return -ECHILD; - if (direntry->d_inode) { + if (d_really_is_positive(direntry)) { if (cifs_revalidate_dentry(direntry)) return 0; else { @@ -803,7 +803,7 @@ cifs_d_revalidate(struct dentry *direntry, unsigned int flags) * attributes will have been updated by * cifs_revalidate_dentry(). */ - if (IS_AUTOMOUNT(direntry->d_inode) && + if (IS_AUTOMOUNT(d_inode(direntry)) && !(direntry->d_flags & DCACHE_NEED_AUTOMOUNT)) { spin_lock(&direntry->d_lock); direntry->d_flags |= DCACHE_NEED_AUTOMOUNT; diff --git a/fs/cifs/file.c b/fs/cifs/file.c index a94b3e6..cafbf10 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -273,7 +273,7 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file, struct tcon_link *tlink, __u32 oplock) { struct dentry *dentry = file->f_path.dentry; - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct cifsInodeInfo *cinode = CIFS_I(inode); struct cifsFileInfo *cfile; struct cifs_fid_locks *fdlocks; @@ -357,7 +357,7 @@ cifsFileInfo_get(struct cifsFileInfo *cifs_file) */ void cifsFileInfo_put(struct cifsFileInfo *cifs_file) { - struct inode *inode = cifs_file->dentry->d_inode; + struct inode *inode = d_inode(cifs_file->dentry); struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink); struct TCP_Server_Info *server = tcon->ses->server; struct cifsInodeInfo *cifsi = CIFS_I(inode); @@ -386,7 +386,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file) if (list_empty(&cifsi->openFileList)) { cifs_dbg(FYI, "closing last open instance for inode %p\n", - cifs_file->dentry->d_inode); + d_inode(cifs_file->dentry)); /* * In strict cache mode we need invalidate mapping on the last * close because it may cause a error when we open this file @@ -572,7 +572,7 @@ static int cifs_relock_file(struct cifsFileInfo *cfile) { struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb); - struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); + struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); int rc = 0; @@ -620,7 +620,7 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush) return rc; } - inode = cfile->dentry->d_inode; + inode = d_inode(cfile->dentry); cifs_sb = CIFS_SB(inode->i_sb); tcon = tlink_tcon(cfile->tlink); server = tcon->ses->server; @@ -874,7 +874,7 @@ cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length, { bool rc = false; struct cifs_fid_locks *cur; - struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); + struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); list_for_each_entry(cur, &cinode->llist, llist) { rc = cifs_find_fid_lock_conflict(cur, offset, length, type, @@ -899,7 +899,7 @@ cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length, { int rc = 0; struct cifsLockInfo *conf_lock; - struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); + struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server; bool exist; @@ -927,7 +927,7 @@ cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length, static void cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock) { - struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); + struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); down_write(&cinode->lock_sem); list_add_tail(&lock->llist, &cfile->llist->locks); up_write(&cinode->lock_sem); @@ -944,7 +944,7 @@ cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock, bool wait) { struct cifsLockInfo *conf_lock; - struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); + struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); bool exist; int rc = 0; @@ -1125,7 +1125,7 @@ struct lock_to_push { static int cifs_push_posix_locks(struct cifsFileInfo *cfile) { - struct inode *inode = cfile->dentry->d_inode; + struct inode *inode = d_inode(cfile->dentry); struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); struct file_lock *flock; struct file_lock_context *flctx = inode->i_flctx; @@ -1214,7 +1214,7 @@ static int cifs_push_locks(struct cifsFileInfo *cfile) { struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb); - struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); + struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); int rc = 0; @@ -1382,7 +1382,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, unsigned int max_num, num, max_buf; LOCKING_ANDX_RANGE *buf, *cur; struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); - struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); + struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); struct cifsLockInfo *li, *tmp; __u64 length = 1 + flock->fl_end - flock->fl_start; struct list_head tmp_llist; @@ -1488,7 +1488,7 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u32 type, struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data; struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); struct TCP_Server_Info *server = tcon->ses->server; - struct inode *inode = cfile->dentry->d_inode; + struct inode *inode = d_inode(cfile->dentry); if (posix_lck) { int posix_lock_type; @@ -1643,7 +1643,7 @@ cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data, struct TCP_Server_Info *server; unsigned int xid; struct dentry *dentry = open_file->dentry; - struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode); + struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry)); struct cifs_io_parms io_parms; cifs_sb = CIFS_SB(dentry->d_sb); @@ -1676,7 +1676,7 @@ cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data, break; } - len = min(server->ops->wp_retry_size(dentry->d_inode), + len = min(server->ops->wp_retry_size(d_inode(dentry)), (unsigned int)write_size - total_written); /* iov[0] is reserved for smb header */ iov[1].iov_base = (char *)write_data + total_written; @@ -1696,9 +1696,9 @@ cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data, return rc; } } else { - spin_lock(&dentry->d_inode->i_lock); + spin_lock(&d_inode(dentry)->i_lock); cifs_update_eof(cifsi, *offset, bytes_written); - spin_unlock(&dentry->d_inode->i_lock); + spin_unlock(&d_inode(dentry)->i_lock); *offset += bytes_written; } } @@ -1706,12 +1706,12 @@ cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data, cifs_stats_bytes_written(tcon, total_written); if (total_written > 0) { - spin_lock(&dentry->d_inode->i_lock); - if (*offset > dentry->d_inode->i_size) - i_size_write(dentry->d_inode, *offset); - spin_unlock(&dentry->d_inode->i_lock); + spin_lock(&d_inode(dentry)->i_lock); + if (*offset > d_inode(dentry)->i_size) + i_size_write(d_inode(dentry), *offset); + spin_unlock(&d_inode(dentry)->i_lock); } - mark_inode_dirty_sync(dentry->d_inode); + mark_inode_dirty_sync(d_inode(dentry)); free_xid(xid); return total_written; } @@ -1823,6 +1823,7 @@ refind_writable: cifsFileInfo_put(inv_file); spin_lock(&cifs_file_list_lock); ++refind; + inv_file = NULL; goto refind_writable; } } @@ -2405,7 +2406,7 @@ cifs_uncached_writev_complete(struct work_struct *work) { struct cifs_writedata *wdata = container_of(work, struct cifs_writedata, work); - struct inode *inode = wdata->cfile->dentry->d_inode; + struct inode *inode = d_inode(wdata->cfile->dentry); struct cifsInodeInfo *cifsi = CIFS_I(inode); spin_lock(&inode->i_lock); @@ -2559,10 +2560,9 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from, return rc; } -static ssize_t -cifs_iovec_write(struct file *file, struct iov_iter *from, loff_t *poffset) +ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from) { - size_t len; + struct file *file = iocb->ki_filp; ssize_t total_written = 0; struct cifsFileInfo *open_file; struct cifs_tcon *tcon; @@ -2572,15 +2572,15 @@ cifs_iovec_write(struct file *file, struct iov_iter *from, loff_t *poffset) struct iov_iter saved_from; int rc; - len = iov_iter_count(from); - rc = generic_write_checks(file, poffset, &len, 0); - if (rc) - return rc; - - if (!len) - return 0; + /* + * BB - optimize the way when signing is disabled. We can drop this + * extra memory-to-memory copying and use iovec buffers for constructing + * write request. + */ - iov_iter_truncate(from, len); + rc = generic_write_checks(iocb, from); + if (rc <= 0) + return rc; INIT_LIST_HEAD(&wdata_list); cifs_sb = CIFS_FILE_SB(file); @@ -2592,8 +2592,8 @@ cifs_iovec_write(struct file *file, struct iov_iter *from, loff_t *poffset) memcpy(&saved_from, from, sizeof(struct iov_iter)); - rc = cifs_write_from_iter(*poffset, len, from, open_file, cifs_sb, - &wdata_list); + rc = cifs_write_from_iter(iocb->ki_pos, iov_iter_count(from), from, + open_file, cifs_sb, &wdata_list); /* * If at least one write was successfully sent, then discard any rc @@ -2632,7 +2632,7 @@ restart_loop: memcpy(&tmp_from, &saved_from, sizeof(struct iov_iter)); iov_iter_advance(&tmp_from, - wdata->offset - *poffset); + wdata->offset - iocb->ki_pos); rc = cifs_write_from_iter(wdata->offset, wdata->bytes, &tmp_from, @@ -2649,34 +2649,13 @@ restart_loop: kref_put(&wdata->refcount, cifs_uncached_writedata_release); } - if (total_written > 0) - *poffset += total_written; + if (unlikely(!total_written)) + return rc; + iocb->ki_pos += total_written; + set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(file_inode(file))->flags); cifs_stats_bytes_written(tcon, total_written); - return total_written ? total_written : (ssize_t)rc; -} - -ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from) -{ - ssize_t written; - struct inode *inode; - loff_t pos = iocb->ki_pos; - - inode = file_inode(iocb->ki_filp); - - /* - * BB - optimize the way when signing is disabled. We can drop this - * extra memory-to-memory copying and use iovec buffers for constructing - * write request. - */ - - written = cifs_iovec_write(iocb->ki_filp, from, &pos); - if (written > 0) { - set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(inode)->flags); - iocb->ki_pos = pos; - } - - return written; + return total_written; } static ssize_t @@ -2687,8 +2666,7 @@ cifs_writev(struct kiocb *iocb, struct iov_iter *from) struct inode *inode = file->f_mapping->host; struct cifsInodeInfo *cinode = CIFS_I(inode); struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server; - ssize_t rc = -EACCES; - loff_t lock_pos = iocb->ki_pos; + ssize_t rc; /* * We need to hold the sem to be sure nobody modifies lock list @@ -2696,23 +2674,24 @@ cifs_writev(struct kiocb *iocb, struct iov_iter *from) */ down_read(&cinode->lock_sem); mutex_lock(&inode->i_mutex); - if (file->f_flags & O_APPEND) - lock_pos = i_size_read(inode); - if (!cifs_find_lock_conflict(cfile, lock_pos, iov_iter_count(from), + + rc = generic_write_checks(iocb, from); + if (rc <= 0) + goto out; + + if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from), server->vals->exclusive_lock_type, NULL, - CIFS_WRITE_OP)) { + CIFS_WRITE_OP)) rc = __generic_file_write_iter(iocb, from); - mutex_unlock(&inode->i_mutex); - - if (rc > 0) { - ssize_t err; + else + rc = -EACCES; +out: + mutex_unlock(&inode->i_mutex); - err = generic_write_sync(file, iocb->ki_pos - rc, rc); - if (err < 0) - rc = err; - } - } else { - mutex_unlock(&inode->i_mutex); + if (rc > 0) { + ssize_t err = generic_write_sync(file, iocb->ki_pos - rc, rc); + if (err < 0) + rc = err; } up_read(&cinode->lock_sem); return rc; @@ -3815,7 +3794,7 @@ void cifs_oplock_break(struct work_struct *work) { struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo, oplock_break); - struct inode *inode = cfile->dentry->d_inode; + struct inode *inode = d_inode(cfile->dentry); struct cifsInodeInfo *cinode = CIFS_I(inode); struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); struct TCP_Server_Info *server = tcon->ses->server; @@ -3876,8 +3855,7 @@ void cifs_oplock_break(struct work_struct *work) * Direct IO is not yet supported in the cached mode. */ static ssize_t -cifs_direct_io(int rw, struct kiocb *iocb, struct iov_iter *iter, - loff_t pos) +cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter, loff_t pos) { /* * FIXME diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 2d4f372..55b5811 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -771,6 +771,8 @@ cifs_get_inode_info(struct inode **inode, const char *full_path, cifs_buf_release(srchinf->ntwrk_buf_start); } kfree(srchinf); + if (rc) + goto cgii_exit; } else goto cgii_exit; @@ -1065,7 +1067,7 @@ cifs_rename_pending_delete(const char *full_path, struct dentry *dentry, int rc; struct cifs_fid fid; struct cifs_open_parms oparms; - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct cifsInodeInfo *cifsInode = CIFS_I(inode); struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct tcon_link *tlink; @@ -1194,7 +1196,7 @@ cifs_drop_nlink(struct inode *inode) } /* - * If dentry->d_inode is null (usually meaning the cached dentry + * If d_inode(dentry) is null (usually meaning the cached dentry * is a negative dentry) then we would attempt a standard SMB delete, but * if that fails we can not attempt the fall back mechanisms on EACCESS * but will return the EACCESS to the caller. Note that the VFS does not call @@ -1205,7 +1207,7 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry) int rc = 0; unsigned int xid; char *full_path = NULL; - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct cifsInodeInfo *cifs_inode; struct super_block *sb = dir->i_sb; struct cifs_sb_info *cifs_sb = CIFS_SB(sb); @@ -1549,13 +1551,13 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry) cifs_put_tlink(tlink); if (!rc) { - spin_lock(&direntry->d_inode->i_lock); - i_size_write(direntry->d_inode, 0); - clear_nlink(direntry->d_inode); - spin_unlock(&direntry->d_inode->i_lock); + spin_lock(&d_inode(direntry)->i_lock); + i_size_write(d_inode(direntry), 0); + clear_nlink(d_inode(direntry)); + spin_unlock(&d_inode(direntry)->i_lock); } - cifsInode = CIFS_I(direntry->d_inode); + cifsInode = CIFS_I(d_inode(direntry)); /* force revalidate to go get info when needed */ cifsInode->time = 0; @@ -1566,7 +1568,7 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry) */ cifsInode->time = 0; - direntry->d_inode->i_ctime = inode->i_ctime = inode->i_mtime = + d_inode(direntry)->i_ctime = inode->i_ctime = inode->i_mtime = current_fs_time(inode->i_sb); rmdir_exit: @@ -1725,7 +1727,7 @@ cifs_rename2(struct inode *source_dir, struct dentry *source_dentry, unlink_target: /* Try unlinking the target dentry if it's not negative */ - if (target_dentry->d_inode && (rc == -EACCES || rc == -EEXIST)) { + if (d_really_is_positive(target_dentry) && (rc == -EACCES || rc == -EEXIST)) { if (d_is_dir(target_dentry)) tmprc = cifs_rmdir(target_dir, target_dentry); else @@ -1865,7 +1867,7 @@ int cifs_revalidate_dentry_attr(struct dentry *dentry) { unsigned int xid; int rc = 0; - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct super_block *sb = dentry->d_sb; char *full_path = NULL; @@ -1917,7 +1919,7 @@ int cifs_revalidate_file(struct file *filp) int cifs_revalidate_dentry(struct dentry *dentry) { int rc; - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); rc = cifs_revalidate_dentry_attr(dentry); if (rc) @@ -1931,7 +1933,7 @@ int cifs_getattr(struct vfsmount *mnt, struct dentry *dentry, { struct cifs_sb_info *cifs_sb = CIFS_SB(dentry->d_sb); struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); int rc; /* @@ -2108,7 +2110,7 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs) int rc; unsigned int xid; char *full_path = NULL; - struct inode *inode = direntry->d_inode; + struct inode *inode = d_inode(direntry); struct cifsInodeInfo *cifsInode = CIFS_I(inode); struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct tcon_link *tlink; @@ -2249,7 +2251,7 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs) unsigned int xid; kuid_t uid = INVALID_UID; kgid_t gid = INVALID_GID; - struct inode *inode = direntry->d_inode; + struct inode *inode = d_inode(direntry); struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct cifsInodeInfo *cifsInode = CIFS_I(inode); char *full_path = NULL; @@ -2407,7 +2409,7 @@ cifs_setattr_exit: int cifs_setattr(struct dentry *direntry, struct iattr *attrs) { - struct inode *inode = direntry->d_inode; + struct inode *inode = d_inode(direntry); struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct cifs_tcon *pTcon = cifs_sb_master_tcon(cifs_sb); diff --git a/fs/cifs/link.c b/fs/cifs/link.c index 2ec6037f..252e672 100644 --- a/fs/cifs/link.c +++ b/fs/cifs/link.c @@ -586,12 +586,12 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode, * if source file is cached (oplocked) revalidate will not go to server * until the file is closed or oplock broken so update nlinks locally */ - if (old_file->d_inode) { - cifsInode = CIFS_I(old_file->d_inode); + if (d_really_is_positive(old_file)) { + cifsInode = CIFS_I(d_inode(old_file)); if (rc == 0) { - spin_lock(&old_file->d_inode->i_lock); - inc_nlink(old_file->d_inode); - spin_unlock(&old_file->d_inode->i_lock); + spin_lock(&d_inode(old_file)->i_lock); + inc_nlink(d_inode(old_file)); + spin_unlock(&d_inode(old_file)->i_lock); /* * parent dir timestamps will update from srv within a @@ -629,7 +629,7 @@ cifs_hl_exit: void * cifs_follow_link(struct dentry *direntry, struct nameidata *nd) { - struct inode *inode = direntry->d_inode; + struct inode *inode = d_inode(direntry); int rc = -ENOMEM; unsigned int xid; char *full_path = NULL; diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index 3379463..8442b8b 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -473,7 +473,7 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv) continue; cifs_dbg(FYI, "file id match, oplock break\n"); - pCifsInode = CIFS_I(netfile->dentry->d_inode); + pCifsInode = CIFS_I(d_inode(netfile->dentry)); set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &pCifsInode->flags); diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index c295338..b4a4723 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c @@ -78,7 +78,7 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name, { struct dentry *dentry, *alias; struct inode *inode; - struct super_block *sb = parent->d_inode->i_sb; + struct super_block *sb = d_inode(parent)->i_sb; struct cifs_sb_info *cifs_sb = CIFS_SB(sb); cifs_dbg(FYI, "%s: for %s\n", __func__, name->name); @@ -88,7 +88,7 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name, return; if (dentry) { - inode = dentry->d_inode; + inode = d_inode(dentry); if (inode) { /* * If we're generating inode numbers, then we don't diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index d297903..7bfdd60 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c @@ -722,7 +722,7 @@ cifs_open_file(const unsigned int xid, struct cifs_open_parms *oparms, static void cifs_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock) { - struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); + struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); cfile->fid.netfid = fid->netfid; cifs_set_oplock_level(cinode, oplock); cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode); diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c index 7198eac..2ab297d 100644 --- a/fs/cifs/smb2file.c +++ b/fs/cifs/smb2file.c @@ -95,7 +95,7 @@ smb2_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, unsigned int max_num, num = 0, max_buf; struct smb2_lock_element *buf, *cur; struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); - struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); + struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); struct cifsLockInfo *li, *tmp; __u64 length = 1 + flock->fl_end - flock->fl_start; struct list_head tmp_llist; @@ -231,7 +231,7 @@ smb2_push_mandatory_locks(struct cifsFileInfo *cfile) unsigned int xid; unsigned int max_num, max_buf; struct smb2_lock_element *buf; - struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); + struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); struct cifs_fid_locks *fdlocks; xid = get_xid(); diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c index 689f035..1c59070 100644 --- a/fs/cifs/smb2misc.c +++ b/fs/cifs/smb2misc.c @@ -322,7 +322,7 @@ smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *hdr) /* return pointer to beginning of data area, ie offset from SMB start */ if ((*off != 0) && (*len != 0)) - return hdr->ProtocolId + *off; + return (char *)(&hdr->ProtocolId[0]) + *off; else return NULL; } @@ -453,7 +453,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp, list_for_each(tmp, &tcon->openFileList) { cfile = list_entry(tmp, struct cifsFileInfo, tlist); - cinode = CIFS_I(cfile->dentry->d_inode); + cinode = CIFS_I(d_inode(cfile->dentry)); if (memcmp(cinode->lease_key, rsp->LeaseKey, SMB2_LEASE_KEY_SIZE)) @@ -590,7 +590,7 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server) continue; cifs_dbg(FYI, "file id match, oplock break\n"); - cinode = CIFS_I(cfile->dentry->d_inode); + cinode = CIFS_I(d_inode(cfile->dentry)); if (!CIFS_CACHE_WRITE(cinode) && rsp->OplockLevel == SMB2_OPLOCK_LEVEL_NONE) diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 96b5d40..54daee5 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -524,7 +524,7 @@ smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon) static void smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock) { - struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); + struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server; cfile->fid.persistent_fid = fid->persistent_fid; @@ -684,7 +684,8 @@ smb2_clone_range(const unsigned int xid, /* No need to change MaxChunks since already set to 1 */ chunk_sizes_updated = true; - } + } else + goto cchunk_out; } cchunk_out: @@ -792,7 +793,7 @@ smb2_set_file_size(const unsigned int xid, struct cifs_tcon *tcon, * If extending file more than one page make sparse. Many Linux fs * make files sparse by default when extending via ftruncate */ - inode = cfile->dentry->d_inode; + inode = d_inode(cfile->dentry); if (!set_alloc && (size > inode->i_size + 8192)) { __u8 set_sparse = 1; @@ -1031,7 +1032,7 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon, xid = get_xid(); - inode = cfile->dentry->d_inode; + inode = d_inode(cfile->dentry); cifsi = CIFS_I(inode); /* if file not oplocked can't be sure whether asking to extend size */ @@ -1082,7 +1083,7 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon, xid = get_xid(); - inode = cfile->dentry->d_inode; + inode = d_inode(cfile->dentry); cifsi = CIFS_I(inode); /* Need to make file sparse, if not already, before freeing range. */ @@ -1114,7 +1115,7 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon, xid = get_xid(); - inode = cfile->dentry->d_inode; + inode = d_inode(cfile->dentry); cifsi = CIFS_I(inode); /* if file not oplocked can't be sure whether asking to extend size */ diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 3417340..65cd7a8 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -1218,7 +1218,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, struct smb2_ioctl_req *req; struct smb2_ioctl_rsp *rsp; struct TCP_Server_Info *server; - struct cifs_ses *ses = tcon->ses; + struct cifs_ses *ses; struct kvec iov[2]; int resp_buftype; int num_iovecs; @@ -1233,6 +1233,11 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, if (plen) *plen = 0; + if (tcon) + ses = tcon->ses; + else + return -EIO; + if (ses && (ses->server)) server = ses->server; else @@ -1296,14 +1301,12 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, rsp = (struct smb2_ioctl_rsp *)iov[0].iov_base; if ((rc != 0) && (rc != -EINVAL)) { - if (tcon) - cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE); + cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE); goto ioctl_exit; } else if (rc == -EINVAL) { if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) && (opcode != FSCTL_SRV_COPYCHUNK)) { - if (tcon) - cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE); + cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE); goto ioctl_exit; } } @@ -1629,7 +1632,7 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0); - if ((rc != 0) && tcon) + if (rc != 0) cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE); free_rsp_buf(resp_buftype, iov[0].iov_base); @@ -2114,7 +2117,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon, struct kvec iov[2]; int rc = 0; int len; - int resp_buftype; + int resp_buftype = CIFS_NO_BUFFER; unsigned char *bufptr; struct TCP_Server_Info *server; struct cifs_ses *ses = tcon->ses; diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c index 72a4d10..ff9e1f8 100644 --- a/fs/cifs/xattr.c +++ b/fs/cifs/xattr.c @@ -50,9 +50,9 @@ int cifs_removexattr(struct dentry *direntry, const char *ea_name) if (direntry == NULL) return -EIO; - if (direntry->d_inode == NULL) + if (d_really_is_negative(direntry)) return -EIO; - sb = direntry->d_inode->i_sb; + sb = d_inode(direntry)->i_sb; if (sb == NULL) return -EIO; @@ -111,9 +111,9 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name, if (direntry == NULL) return -EIO; - if (direntry->d_inode == NULL) + if (d_really_is_negative(direntry)) return -EIO; - sb = direntry->d_inode->i_sb; + sb = d_inode(direntry)->i_sb; if (sb == NULL) return -EIO; @@ -177,12 +177,12 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name, memcpy(pacl, ea_value, value_size); if (pTcon->ses->server->ops->set_acl) rc = pTcon->ses->server->ops->set_acl(pacl, - value_size, direntry->d_inode, + value_size, d_inode(direntry), full_path, CIFS_ACL_DACL); else rc = -EOPNOTSUPP; if (rc == 0) /* force revalidate of the inode */ - CIFS_I(direntry->d_inode)->time = 0; + CIFS_I(d_inode(direntry))->time = 0; kfree(pacl); } #else @@ -246,9 +246,9 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name, if (direntry == NULL) return -EIO; - if (direntry->d_inode == NULL) + if (d_really_is_negative(direntry)) return -EIO; - sb = direntry->d_inode->i_sb; + sb = d_inode(direntry)->i_sb; if (sb == NULL) return -EIO; @@ -324,7 +324,7 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name, goto get_ea_exit; /* rc already EOPNOTSUPP */ pacl = pTcon->ses->server->ops->get_acl(cifs_sb, - direntry->d_inode, full_path, &acllen); + d_inode(direntry), full_path, &acllen); if (IS_ERR(pacl)) { rc = PTR_ERR(pacl); cifs_dbg(VFS, "%s: error %zd getting sec desc\n", @@ -382,9 +382,9 @@ ssize_t cifs_listxattr(struct dentry *direntry, char *data, size_t buf_size) if (direntry == NULL) return -EIO; - if (direntry->d_inode == NULL) + if (d_really_is_negative(direntry)) return -EIO; - sb = direntry->d_inode->i_sb; + sb = d_inode(direntry)->i_sb; if (sb == NULL) return -EIO; diff --git a/fs/coda/cache.c b/fs/coda/cache.c index 46ee6f2..5bb630a 100644 --- a/fs/coda/cache.c +++ b/fs/coda/cache.c @@ -94,8 +94,8 @@ static void coda_flag_children(struct dentry *parent, int flag) spin_lock(&parent->d_lock); list_for_each_entry(de, &parent->d_subdirs, d_child) { /* don't know what to do with negative dentries */ - if (de->d_inode ) - coda_flag_inode(de->d_inode, flag); + if (d_inode(de) ) + coda_flag_inode(d_inode(de), flag); } spin_unlock(&parent->d_lock); return; diff --git a/fs/coda/dir.c b/fs/coda/dir.c index 60cb88c..fda9f43 100644 --- a/fs/coda/dir.c +++ b/fs/coda/dir.c @@ -201,7 +201,7 @@ err_out: static int coda_link(struct dentry *source_de, struct inode *dir_inode, struct dentry *de) { - struct inode *inode = source_de->d_inode; + struct inode *inode = d_inode(source_de); const char * name = de->d_name.name; int len = de->d_name.len; int error; @@ -266,7 +266,7 @@ static int coda_unlink(struct inode *dir, struct dentry *de) return error; coda_dir_update_mtime(dir); - drop_nlink(de->d_inode); + drop_nlink(d_inode(de)); return 0; } @@ -279,8 +279,8 @@ static int coda_rmdir(struct inode *dir, struct dentry *de) error = venus_rmdir(dir->i_sb, coda_i2f(dir), name, len); if (!error) { /* VFS may delete the child */ - if (de->d_inode) - clear_nlink(de->d_inode); + if (d_really_is_positive(de)) + clear_nlink(d_inode(de)); /* fix the link count of the parent */ coda_dir_drop_nlink(dir); @@ -303,14 +303,14 @@ static int coda_rename(struct inode *old_dir, struct dentry *old_dentry, coda_i2f(new_dir), old_length, new_length, (const char *) old_name, (const char *)new_name); if (!error) { - if (new_dentry->d_inode) { + if (d_really_is_positive(new_dentry)) { if (d_is_dir(new_dentry)) { coda_dir_drop_nlink(old_dir); coda_dir_inc_nlink(new_dir); } coda_dir_update_mtime(old_dir); coda_dir_update_mtime(new_dir); - coda_flag_inode(new_dentry->d_inode, C_VATTR); + coda_flag_inode(d_inode(new_dentry), C_VATTR); } else { coda_flag_inode(old_dir, C_VATTR); coda_flag_inode(new_dir, C_VATTR); @@ -449,13 +449,13 @@ static int coda_dentry_revalidate(struct dentry *de, unsigned int flags) if (flags & LOOKUP_RCU) return -ECHILD; - inode = de->d_inode; + inode = d_inode(de); if (!inode || is_root_inode(inode)) goto out; if (is_bad_inode(inode)) goto bad; - cii = ITOC(de->d_inode); + cii = ITOC(d_inode(de)); if (!(cii->c_flags & (C_PURGE | C_FLUSH))) goto out; @@ -487,11 +487,11 @@ static int coda_dentry_delete(const struct dentry * dentry) { int flags; - if (!dentry->d_inode) + if (d_really_is_negative(dentry)) return 0; - flags = (ITOC(dentry->d_inode)->c_flags) & C_PURGE; - if (is_bad_inode(dentry->d_inode) || flags) { + flags = (ITOC(d_inode(dentry))->c_flags) & C_PURGE; + if (is_bad_inode(d_inode(dentry)) || flags) { return 1; } return 0; diff --git a/fs/coda/file.c b/fs/coda/file.c index d244d74..1da3805 100644 --- a/fs/coda/file.c +++ b/fs/coda/file.c @@ -27,19 +27,14 @@ #include "coda_int.h" static ssize_t -coda_file_read(struct file *coda_file, char __user *buf, size_t count, loff_t *ppos) +coda_file_read_iter(struct kiocb *iocb, struct iov_iter *to) { - struct coda_file_info *cfi; - struct file *host_file; + struct file *coda_file = iocb->ki_filp; + struct coda_file_info *cfi = CODA_FTOC(coda_file); - cfi = CODA_FTOC(coda_file); BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC); - host_file = cfi->cfi_container; - if (!host_file->f_op->read) - return -EINVAL; - - return host_file->f_op->read(host_file, buf, count, ppos); + return vfs_iter_read(cfi->cfi_container, to, &iocb->ki_pos); } static ssize_t @@ -64,32 +59,25 @@ coda_file_splice_read(struct file *coda_file, loff_t *ppos, } static ssize_t -coda_file_write(struct file *coda_file, const char __user *buf, size_t count, loff_t *ppos) +coda_file_write_iter(struct kiocb *iocb, struct iov_iter *to) { - struct inode *host_inode, *coda_inode = file_inode(coda_file); - struct coda_file_info *cfi; + struct file *coda_file = iocb->ki_filp; + struct inode *coda_inode = file_inode(coda_file); + struct coda_file_info *cfi = CODA_FTOC(coda_file); struct file *host_file; ssize_t ret; - cfi = CODA_FTOC(coda_file); BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC); - host_file = cfi->cfi_container; - - if (!host_file->f_op->write) - return -EINVAL; - host_inode = file_inode(host_file); + host_file = cfi->cfi_container; file_start_write(host_file); mutex_lock(&coda_inode->i_mutex); - - ret = host_file->f_op->write(host_file, buf, count, ppos); - - coda_inode->i_size = host_inode->i_size; + ret = vfs_iter_write(cfi->cfi_container, to, &iocb->ki_pos); + coda_inode->i_size = file_inode(host_file)->i_size; coda_inode->i_blocks = (coda_inode->i_size + 511) >> 9; coda_inode->i_mtime = coda_inode->i_ctime = CURRENT_TIME_SEC; mutex_unlock(&coda_inode->i_mutex); file_end_write(host_file); - return ret; } @@ -231,8 +219,8 @@ int coda_fsync(struct file *coda_file, loff_t start, loff_t end, int datasync) const struct file_operations coda_file_operations = { .llseek = generic_file_llseek, - .read = coda_file_read, - .write = coda_file_write, + .read_iter = coda_file_read_iter, + .write_iter = coda_file_write_iter, .mmap = coda_file_mmap, .open = coda_open, .release = coda_release, diff --git a/fs/coda/inode.c b/fs/coda/inode.c index 82ec68b..cac1390 100644 --- a/fs/coda/inode.c +++ b/fs/coda/inode.c @@ -257,15 +257,15 @@ static void coda_evict_inode(struct inode *inode) int coda_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { - int err = coda_revalidate_inode(dentry->d_inode); + int err = coda_revalidate_inode(d_inode(dentry)); if (!err) - generic_fillattr(dentry->d_inode, stat); + generic_fillattr(d_inode(dentry), stat); return err; } int coda_setattr(struct dentry *de, struct iattr *iattr) { - struct inode *inode = de->d_inode; + struct inode *inode = d_inode(de); struct coda_vattr vattr; int error; diff --git a/fs/coda/pioctl.c b/fs/coda/pioctl.c index 4326d17..f36a404 100644 --- a/fs/coda/pioctl.c +++ b/fs/coda/pioctl.c @@ -72,7 +72,7 @@ static long coda_pioctl(struct file *filp, unsigned int cmd, if (error) return error; - target_inode = path.dentry->d_inode; + target_inode = d_inode(path.dentry); /* return if it is not a Coda inode */ if (target_inode->i_sb != inode->i_sb) { diff --git a/fs/coda/upcall.c b/fs/coda/upcall.c index 5bb6e27..9b1ffaa 100644 --- a/fs/coda/upcall.c +++ b/fs/coda/upcall.c @@ -820,8 +820,8 @@ int coda_downcall(struct venus_comm *vcp, int opcode, union outputArgs *out) case CODA_FLUSH: coda_cache_clear_all(sb); shrink_dcache_sb(sb); - if (sb->s_root->d_inode) - coda_flag_inode(sb->s_root->d_inode, C_FLUSH); + if (d_really_is_positive(sb->s_root)) + coda_flag_inode(d_inode(sb->s_root), C_FLUSH); break; case CODA_PURGEUSER: diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index afec645..6b8e2f0 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c @@ -570,6 +570,7 @@ static int mt_ioctl_trans(unsigned int fd, unsigned int cmd, void __user *argp) #define BNEPCONNDEL _IOW('B', 201, int) #define BNEPGETCONNLIST _IOR('B', 210, int) #define BNEPGETCONNINFO _IOR('B', 211, int) +#define BNEPGETSUPPFEAT _IOR('B', 212, int) #define CMTPCONNADD _IOW('C', 200, int) #define CMTPCONNDEL _IOW('C', 201, int) @@ -1247,6 +1248,7 @@ COMPATIBLE_IOCTL(BNEPCONNADD) COMPATIBLE_IOCTL(BNEPCONNDEL) COMPATIBLE_IOCTL(BNEPGETCONNLIST) COMPATIBLE_IOCTL(BNEPGETCONNINFO) +COMPATIBLE_IOCTL(BNEPGETSUPPFEAT) COMPATIBLE_IOCTL(CMTPCONNADD) COMPATIBLE_IOCTL(CMTPCONNDEL) COMPATIBLE_IOCTL(CMTPGETCONNLIST) diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index cf0db00..c81ce7f 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c @@ -289,7 +289,7 @@ static int configfs_create_dir(struct config_item *item, struct dentry *dentry) configfs_set_dir_dirent_depth(p->d_fsdata, dentry->d_fsdata); error = configfs_create(dentry, mode, init_dir); if (!error) { - inc_nlink(p->d_inode); + inc_nlink(d_inode(p)); item->ci_dentry = dentry; } else { struct configfs_dirent *sd = dentry->d_fsdata; @@ -375,8 +375,8 @@ static void remove_dir(struct dentry * d) list_del_init(&sd->s_sibling); spin_unlock(&configfs_dirent_lock); configfs_put(sd); - if (d->d_inode) - simple_rmdir(parent->d_inode,d); + if (d_really_is_positive(d)) + simple_rmdir(d_inode(parent),d); pr_debug(" o %pd removing done (%d)\n", d, d_count(d)); @@ -513,7 +513,7 @@ static int configfs_detach_prep(struct dentry *dentry, struct mutex **wait_mutex /* Abort if racing with mkdir() */ if (sd->s_type & CONFIGFS_USET_IN_MKDIR) { if (wait_mutex) - *wait_mutex = &sd->s_dentry->d_inode->i_mutex; + *wait_mutex = &d_inode(sd->s_dentry)->i_mutex; return -EAGAIN; } @@ -624,13 +624,13 @@ static void detach_groups(struct config_group *group) child = sd->s_dentry; - mutex_lock(&child->d_inode->i_mutex); + mutex_lock(&d_inode(child)->i_mutex); configfs_detach_group(sd->s_element); - child->d_inode->i_flags |= S_DEAD; + d_inode(child)->i_flags |= S_DEAD; dont_mount(child); - mutex_unlock(&child->d_inode->i_mutex); + mutex_unlock(&d_inode(child)->i_mutex); d_delete(child); dput(child); @@ -672,7 +672,7 @@ static int create_default_group(struct config_group *parent_group, sd = child->d_fsdata; sd->s_type |= CONFIGFS_USET_DEFAULT; } else { - BUG_ON(child->d_inode); + BUG_ON(d_inode(child)); d_drop(child); dput(child); } @@ -818,11 +818,11 @@ static int configfs_attach_item(struct config_item *parent_item, * the VFS may already have hit and used them. Thus, * we must lock them as rmdir() would. */ - mutex_lock(&dentry->d_inode->i_mutex); + mutex_lock(&d_inode(dentry)->i_mutex); configfs_remove_dir(item); - dentry->d_inode->i_flags |= S_DEAD; + d_inode(dentry)->i_flags |= S_DEAD; dont_mount(dentry); - mutex_unlock(&dentry->d_inode->i_mutex); + mutex_unlock(&d_inode(dentry)->i_mutex); d_delete(dentry); } } @@ -858,16 +858,16 @@ static int configfs_attach_group(struct config_item *parent_item, * We must also lock the inode to remove it safely in case of * error, as rmdir() would. */ - mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_CHILD); + mutex_lock_nested(&d_inode(dentry)->i_mutex, I_MUTEX_CHILD); configfs_adjust_dir_dirent_depth_before_populate(sd); ret = populate_groups(to_config_group(item)); if (ret) { configfs_detach_item(item); - dentry->d_inode->i_flags |= S_DEAD; + d_inode(dentry)->i_flags |= S_DEAD; dont_mount(dentry); } configfs_adjust_dir_dirent_depth_after_populate(sd); - mutex_unlock(&dentry->d_inode->i_mutex); + mutex_unlock(&d_inode(dentry)->i_mutex); if (ret) d_delete(dentry); } @@ -1075,7 +1075,7 @@ int configfs_depend_item(struct configfs_subsystem *subsys, * subsystem is really registered, and so we need to lock out * configfs_[un]register_subsystem(). */ - mutex_lock(&root->d_inode->i_mutex); + mutex_lock(&d_inode(root)->i_mutex); root_sd = root->d_fsdata; @@ -1111,7 +1111,7 @@ int configfs_depend_item(struct configfs_subsystem *subsys, out_unlock_dirent_lock: spin_unlock(&configfs_dirent_lock); out_unlock_fs: - mutex_unlock(&root->d_inode->i_mutex); + mutex_unlock(&d_inode(root)->i_mutex); /* * If we succeeded, the fs is pinned via other methods. If not, @@ -1453,11 +1453,11 @@ int configfs_rename_dir(struct config_item * item, const char *new_name) down_write(&configfs_rename_sem); parent = item->parent->dentry; - mutex_lock(&parent->d_inode->i_mutex); + mutex_lock(&d_inode(parent)->i_mutex); new_dentry = lookup_one_len(new_name, parent, strlen(new_name)); if (!IS_ERR(new_dentry)) { - if (!new_dentry->d_inode) { + if (d_really_is_negative(new_dentry)) { error = config_item_set_name(item, "%s", new_name); if (!error) { d_add(new_dentry, NULL); @@ -1469,7 +1469,7 @@ int configfs_rename_dir(struct config_item * item, const char *new_name) error = -EEXIST; dput(new_dentry); } - mutex_unlock(&parent->d_inode->i_mutex); + mutex_unlock(&d_inode(parent)->i_mutex); up_write(&configfs_rename_sem); return error; @@ -1482,7 +1482,7 @@ static int configfs_dir_open(struct inode *inode, struct file *file) struct configfs_dirent * parent_sd = dentry->d_fsdata; int err; - mutex_lock(&dentry->d_inode->i_mutex); + mutex_lock(&d_inode(dentry)->i_mutex); /* * Fake invisibility if dir belongs to a group/default groups hierarchy * being attached @@ -1495,7 +1495,7 @@ static int configfs_dir_open(struct inode *inode, struct file *file) else err = 0; } - mutex_unlock(&dentry->d_inode->i_mutex); + mutex_unlock(&d_inode(dentry)->i_mutex); return err; } @@ -1505,11 +1505,11 @@ static int configfs_dir_close(struct inode *inode, struct file *file) struct dentry * dentry = file->f_path.dentry; struct configfs_dirent * cursor = file->private_data; - mutex_lock(&dentry->d_inode->i_mutex); + mutex_lock(&d_inode(dentry)->i_mutex); spin_lock(&configfs_dirent_lock); list_del_init(&cursor->s_sibling); spin_unlock(&configfs_dirent_lock); - mutex_unlock(&dentry->d_inode->i_mutex); + mutex_unlock(&d_inode(dentry)->i_mutex); release_configfs_dirent(cursor); @@ -1567,7 +1567,7 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx) spin_lock(&configfs_dirent_lock); dentry = next->s_dentry; if (dentry) - inode = dentry->d_inode; + inode = d_inode(dentry); if (inode) ino = inode->i_ino; spin_unlock(&configfs_dirent_lock); @@ -1590,7 +1590,7 @@ static loff_t configfs_dir_lseek(struct file *file, loff_t offset, int whence) { struct dentry * dentry = file->f_path.dentry; - mutex_lock(&dentry->d_inode->i_mutex); + mutex_lock(&d_inode(dentry)->i_mutex); switch (whence) { case 1: offset += file->f_pos; @@ -1598,7 +1598,7 @@ static loff_t configfs_dir_lseek(struct file *file, loff_t offset, int whence) if (offset >= 0) break; default: - mutex_unlock(&file_inode(file)->i_mutex); + mutex_unlock(&d_inode(dentry)->i_mutex); return -EINVAL; } if (offset != file->f_pos) { @@ -1624,7 +1624,7 @@ static loff_t configfs_dir_lseek(struct file *file, loff_t offset, int whence) spin_unlock(&configfs_dirent_lock); } } - mutex_unlock(&dentry->d_inode->i_mutex); + mutex_unlock(&d_inode(dentry)->i_mutex); return offset; } @@ -1654,7 +1654,7 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys) sd = root->d_fsdata; link_group(to_config_group(sd->s_element), group); - mutex_lock_nested(&root->d_inode->i_mutex, I_MUTEX_PARENT); + mutex_lock_nested(&d_inode(root)->i_mutex, I_MUTEX_PARENT); err = -ENOMEM; dentry = d_alloc_name(root, group->cg_item.ci_name); @@ -1664,7 +1664,7 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys) err = configfs_attach_group(sd->s_element, &group->cg_item, dentry); if (err) { - BUG_ON(dentry->d_inode); + BUG_ON(d_inode(dentry)); d_drop(dentry); dput(dentry); } else { @@ -1674,7 +1674,7 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys) } } - mutex_unlock(&root->d_inode->i_mutex); + mutex_unlock(&d_inode(root)->i_mutex); if (err) { unlink_group(group); @@ -1695,9 +1695,9 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys) return; } - mutex_lock_nested(&root->d_inode->i_mutex, + mutex_lock_nested(&d_inode(root)->i_mutex, I_MUTEX_PARENT); - mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_CHILD); + mutex_lock_nested(&d_inode(dentry)->i_mutex, I_MUTEX_CHILD); mutex_lock(&configfs_symlink_mutex); spin_lock(&configfs_dirent_lock); if (configfs_detach_prep(dentry, NULL)) { @@ -1706,13 +1706,13 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys) spin_unlock(&configfs_dirent_lock); mutex_unlock(&configfs_symlink_mutex); configfs_detach_group(&group->cg_item); - dentry->d_inode->i_flags |= S_DEAD; + d_inode(dentry)->i_flags |= S_DEAD; dont_mount(dentry); - mutex_unlock(&dentry->d_inode->i_mutex); + mutex_unlock(&d_inode(dentry)->i_mutex); d_delete(dentry); - mutex_unlock(&root->d_inode->i_mutex); + mutex_unlock(&d_inode(root)->i_mutex); dput(dentry); diff --git a/fs/configfs/file.c b/fs/configfs/file.c index 56d2cdc..403269f 100644 --- a/fs/configfs/file.c +++ b/fs/configfs/file.c @@ -326,10 +326,10 @@ int configfs_create_file(struct config_item * item, const struct configfs_attrib umode_t mode = (attr->ca_mode & S_IALLUGO) | S_IFREG; int error = 0; - mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_NORMAL); + mutex_lock_nested(&d_inode(dir)->i_mutex, I_MUTEX_NORMAL); error = configfs_make_dirent(parent_sd, NULL, (void *) attr, mode, CONFIGFS_ITEM_ATTR); - mutex_unlock(&dir->d_inode->i_mutex); + mutex_unlock(&d_inode(dir)->i_mutex); return error; } diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c index 5423a6a..8d89f5f 100644 --- a/fs/configfs/inode.c +++ b/fs/configfs/inode.c @@ -56,7 +56,7 @@ static const struct inode_operations configfs_inode_operations ={ int configfs_setattr(struct dentry * dentry, struct iattr * iattr) { - struct inode * inode = dentry->d_inode; + struct inode * inode = d_inode(dentry); struct configfs_dirent * sd = dentry->d_fsdata; struct iattr * sd_iattr; unsigned int ia_valid = iattr->ia_valid; @@ -186,7 +186,7 @@ int configfs_create(struct dentry * dentry, umode_t mode, void (*init)(struct in if (!dentry) return -ENOENT; - if (dentry->d_inode) + if (d_really_is_positive(dentry)) return -EEXIST; sd = dentry->d_fsdata; @@ -194,7 +194,7 @@ int configfs_create(struct dentry * dentry, umode_t mode, void (*init)(struct in if (!inode) return -ENOMEM; - p_inode = dentry->d_parent->d_inode; + p_inode = d_inode(dentry->d_parent); p_inode->i_mtime = p_inode->i_ctime = CURRENT_TIME; configfs_set_inode_lock_class(sd, inode); @@ -236,11 +236,11 @@ void configfs_drop_dentry(struct configfs_dirent * sd, struct dentry * parent) if (dentry) { spin_lock(&dentry->d_lock); - if (!d_unhashed(dentry) && dentry->d_inode) { + if (!d_unhashed(dentry) && d_really_is_positive(dentry)) { dget_dlock(dentry); __d_drop(dentry); spin_unlock(&dentry->d_lock); - simple_unlink(parent->d_inode, dentry); + simple_unlink(d_inode(parent), dentry); } else spin_unlock(&dentry->d_lock); } @@ -251,11 +251,11 @@ void configfs_hash_and_remove(struct dentry * dir, const char * name) struct configfs_dirent * sd; struct configfs_dirent * parent_sd = dir->d_fsdata; - if (dir->d_inode == NULL) + if (d_really_is_negative(dir)) /* no inode means this hasn't been made visible yet */ return; - mutex_lock(&dir->d_inode->i_mutex); + mutex_lock(&d_inode(dir)->i_mutex); list_for_each_entry(sd, &parent_sd->s_children, s_sibling) { if (!sd->s_element) continue; @@ -268,5 +268,5 @@ void configfs_hash_and_remove(struct dentry * dir, const char * name) break; } } - mutex_unlock(&dir->d_inode->i_mutex); + mutex_unlock(&d_inode(dir)->i_mutex); } diff --git a/fs/coredump.c b/fs/coredump.c index f319926..bbbe139 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -657,7 +657,7 @@ void do_coredump(const siginfo_t *siginfo) */ if (!uid_eq(inode->i_uid, current_fsuid())) goto close_fail; - if (!cprm.file->f_op->write) + if (!(cprm.file->f_mode & FMODE_CAN_WRITE)) goto close_fail; if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file)) goto close_fail; @@ -98,9 +98,9 @@ static bool buffer_size_valid(struct buffer_head *bh) return bh->b_state != 0; } -static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter, - loff_t start, loff_t end, get_block_t get_block, - struct buffer_head *bh) +static ssize_t dax_io(struct inode *inode, struct iov_iter *iter, + loff_t start, loff_t end, get_block_t get_block, + struct buffer_head *bh) { ssize_t retval = 0; loff_t pos = start; @@ -109,7 +109,7 @@ static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter, void *addr; bool hole = false; - if (rw != WRITE) + if (iov_iter_rw(iter) != WRITE) end = min(end, i_size_read(inode)); while (pos < end) { @@ -124,7 +124,7 @@ static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter, bh->b_size = PAGE_ALIGN(end - pos); bh->b_state = 0; retval = get_block(inode, block, bh, - rw == WRITE); + iov_iter_rw(iter) == WRITE); if (retval) break; if (!buffer_size_valid(bh)) @@ -137,7 +137,7 @@ static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter, bh->b_size -= done; } - hole = (rw != WRITE) && !buffer_written(bh); + hole = iov_iter_rw(iter) != WRITE && !buffer_written(bh); if (hole) { addr = NULL; size = bh->b_size - first; @@ -154,7 +154,7 @@ static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter, max = min(pos + size, end); } - if (rw == WRITE) + if (iov_iter_rw(iter) == WRITE) len = copy_from_iter(addr, max - pos, iter); else if (!hole) len = copy_to_iter(addr, max - pos, iter); @@ -173,7 +173,6 @@ static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter, /** * dax_do_io - Perform I/O to a DAX file - * @rw: READ to read or WRITE to write * @iocb: The control block for this I/O * @inode: The file which the I/O is directed at * @iter: The addresses to do I/O from or to @@ -189,9 +188,9 @@ static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter, * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O * is in progress. */ -ssize_t dax_do_io(int rw, struct kiocb *iocb, struct inode *inode, - struct iov_iter *iter, loff_t pos, - get_block_t get_block, dio_iodone_t end_io, int flags) +ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode, + struct iov_iter *iter, loff_t pos, get_block_t get_block, + dio_iodone_t end_io, int flags) { struct buffer_head bh; ssize_t retval = -EINVAL; @@ -199,7 +198,7 @@ ssize_t dax_do_io(int rw, struct kiocb *iocb, struct inode *inode, memset(&bh, 0, sizeof(bh)); - if ((flags & DIO_LOCKING) && (rw == READ)) { + if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) { struct address_space *mapping = inode->i_mapping; mutex_lock(&inode->i_mutex); retval = filemap_write_and_wait_range(mapping, pos, end - 1); @@ -210,17 +209,17 @@ ssize_t dax_do_io(int rw, struct kiocb *iocb, struct inode *inode, } /* Protects against truncate */ - atomic_inc(&inode->i_dio_count); + inode_dio_begin(inode); - retval = dax_io(rw, inode, iter, pos, end, get_block, &bh); + retval = dax_io(inode, iter, pos, end, get_block, &bh); - if ((flags & DIO_LOCKING) && (rw == READ)) + if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) mutex_unlock(&inode->i_mutex); if ((retval > 0) && end_io) end_io(iocb, pos, retval, bh.b_private); - inode_dio_done(inode); + inode_dio_end(inode); out: return retval; } @@ -464,6 +463,23 @@ int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, EXPORT_SYMBOL_GPL(dax_fault); /** + * dax_pfn_mkwrite - handle first write to DAX page + * @vma: The virtual memory area where the fault occurred + * @vmf: The description of the fault + * + */ +int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct super_block *sb = file_inode(vma->vm_file)->i_sb; + + sb_start_pagefault(sb); + file_update_time(vma->vm_file); + sb_end_pagefault(sb); + return VM_FAULT_NOPAGE; +} +EXPORT_SYMBOL_GPL(dax_pfn_mkwrite); + +/** * dax_zero_page_range - zero a range within a page of a DAX file * @inode: The file being truncated * @from: The file offset that is being truncated to diff --git a/fs/dcache.c b/fs/dcache.c index c71e373..656ce52 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -269,6 +269,41 @@ static inline int dname_external(const struct dentry *dentry) return dentry->d_name.name != dentry->d_iname; } +/* + * Make sure other CPUs see the inode attached before the type is set. + */ +static inline void __d_set_inode_and_type(struct dentry *dentry, + struct inode *inode, + unsigned type_flags) +{ + unsigned flags; + + dentry->d_inode = inode; + smp_wmb(); + flags = READ_ONCE(dentry->d_flags); + flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU); + flags |= type_flags; + WRITE_ONCE(dentry->d_flags, flags); +} + +/* + * Ideally, we want to make sure that other CPUs see the flags cleared before + * the inode is detached, but this is really a violation of RCU principles + * since the ordering suggests we should always set inode before flags. + * + * We should instead replace or discard the entire dentry - but that sucks + * performancewise on mass deletion/rename. + */ +static inline void __d_clear_type_and_inode(struct dentry *dentry) +{ + unsigned flags = READ_ONCE(dentry->d_flags); + + flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU); + WRITE_ONCE(dentry->d_flags, flags); + smp_wmb(); + dentry->d_inode = NULL; +} + static void dentry_free(struct dentry *dentry) { WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias)); @@ -311,7 +346,7 @@ static void dentry_iput(struct dentry * dentry) { struct inode *inode = dentry->d_inode; if (inode) { - dentry->d_inode = NULL; + __d_clear_type_and_inode(dentry); hlist_del_init(&dentry->d_u.d_alias); spin_unlock(&dentry->d_lock); spin_unlock(&inode->i_lock); @@ -335,8 +370,7 @@ static void dentry_unlink_inode(struct dentry * dentry) __releases(dentry->d_inode->i_lock) { struct inode *inode = dentry->d_inode; - __d_clear_type(dentry); - dentry->d_inode = NULL; + __d_clear_type_and_inode(dentry); hlist_del_init(&dentry->d_u.d_alias); dentry_rcuwalk_barrier(dentry); spin_unlock(&dentry->d_lock); @@ -1715,11 +1749,9 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode) unsigned add_flags = d_flags_for_inode(inode); spin_lock(&dentry->d_lock); - dentry->d_flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU); - dentry->d_flags |= add_flags; if (inode) hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); - dentry->d_inode = inode; + __d_set_inode_and_type(dentry, inode, add_flags); dentry_rcuwalk_barrier(dentry); spin_unlock(&dentry->d_lock); fsnotify_d_instantiate(dentry, inode); @@ -1937,8 +1969,7 @@ static struct dentry *__d_obtain_alias(struct inode *inode, int disconnected) add_flags |= DCACHE_DISCONNECTED; spin_lock(&tmp->d_lock); - tmp->d_inode = inode; - tmp->d_flags |= add_flags; + __d_set_inode_and_type(tmp, inode, add_flags); hlist_add_head(&tmp->d_u.d_alias, &inode->i_dentry); hlist_bl_lock(&tmp->d_sb->s_anon); hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon); @@ -2690,7 +2721,7 @@ static int __d_unalias(struct inode *inode, struct dentry *dentry, struct dentry *alias) { struct mutex *m1 = NULL, *m2 = NULL; - int ret = -EBUSY; + int ret = -ESTALE; /* If alias and dentry share a parent, then no extra locks required */ if (alias->d_parent == dentry->d_parent) diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c index 517e649..830a7e7 100644 --- a/fs/debugfs/file.c +++ b/fs/debugfs/file.c @@ -45,7 +45,7 @@ const struct file_operations debugfs_file_operations = { static void *debugfs_follow_link(struct dentry *dentry, struct nameidata *nd) { - nd_set_link(nd, dentry->d_inode->i_private); + nd_set_link(nd, d_inode(dentry)->i_private); return NULL; } diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index 96400ab..c1e7ffb 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -46,7 +46,7 @@ static struct inode *debugfs_get_inode(struct super_block *sb) static inline int debugfs_positive(struct dentry *dentry) { - return dentry->d_inode && !d_unhashed(dentry); + return d_really_is_positive(dentry) && !d_unhashed(dentry); } struct debugfs_mount_opts { @@ -124,7 +124,7 @@ static int debugfs_parse_options(char *data, struct debugfs_mount_opts *opts) static int debugfs_apply_options(struct super_block *sb) { struct debugfs_fs_info *fsi = sb->s_fs_info; - struct inode *inode = sb->s_root->d_inode; + struct inode *inode = d_inode(sb->s_root); struct debugfs_mount_opts *opts = &fsi->mount_opts; inode->i_mode &= ~S_IALLUGO; @@ -188,7 +188,7 @@ static struct vfsmount *debugfs_automount(struct path *path) { struct vfsmount *(*f)(void *); f = (struct vfsmount *(*)(void *))path->dentry->d_fsdata; - return f(path->dentry->d_inode->i_private); + return f(d_inode(path->dentry)->i_private); } static const struct dentry_operations debugfs_dops = { @@ -254,6 +254,9 @@ static struct dentry *start_creating(const char *name, struct dentry *parent) pr_debug("debugfs: creating file '%s'\n",name); + if (IS_ERR(parent)) + return parent; + error = simple_pin_fs(&debug_fs_type, &debugfs_mount, &debugfs_mount_count); if (error) @@ -267,20 +270,20 @@ static struct dentry *start_creating(const char *name, struct dentry *parent) if (!parent) parent = debugfs_mount->mnt_root; - mutex_lock(&parent->d_inode->i_mutex); + mutex_lock(&d_inode(parent)->i_mutex); dentry = lookup_one_len(name, parent, strlen(name)); - if (!IS_ERR(dentry) && dentry->d_inode) { + if (!IS_ERR(dentry) && d_really_is_positive(dentry)) { dput(dentry); dentry = ERR_PTR(-EEXIST); } if (IS_ERR(dentry)) - mutex_unlock(&parent->d_inode->i_mutex); + mutex_unlock(&d_inode(parent)->i_mutex); return dentry; } static struct dentry *failed_creating(struct dentry *dentry) { - mutex_unlock(&dentry->d_parent->d_inode->i_mutex); + mutex_unlock(&d_inode(dentry->d_parent)->i_mutex); dput(dentry); simple_release_fs(&debugfs_mount, &debugfs_mount_count); return NULL; @@ -288,7 +291,7 @@ static struct dentry *failed_creating(struct dentry *dentry) static struct dentry *end_creating(struct dentry *dentry) { - mutex_unlock(&dentry->d_parent->d_inode->i_mutex); + mutex_unlock(&d_inode(dentry->d_parent)->i_mutex); return dentry; } @@ -341,7 +344,7 @@ struct dentry *debugfs_create_file(const char *name, umode_t mode, inode->i_fop = fops ? fops : &debugfs_file_operations; inode->i_private = data; d_instantiate(dentry, inode); - fsnotify_create(dentry->d_parent->d_inode, dentry); + fsnotify_create(d_inode(dentry->d_parent), dentry); return end_creating(dentry); } EXPORT_SYMBOL_GPL(debugfs_create_file); @@ -381,7 +384,7 @@ struct dentry *debugfs_create_file_size(const char *name, umode_t mode, struct dentry *de = debugfs_create_file(name, mode, parent, data, fops); if (de) - de->d_inode->i_size = file_size; + d_inode(de)->i_size = file_size; return de; } EXPORT_SYMBOL_GPL(debugfs_create_file_size); @@ -423,8 +426,8 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent) /* directory inodes start off with i_nlink == 2 (for "." entry) */ inc_nlink(inode); d_instantiate(dentry, inode); - inc_nlink(dentry->d_parent->d_inode); - fsnotify_mkdir(dentry->d_parent->d_inode, dentry); + inc_nlink(d_inode(dentry->d_parent)); + fsnotify_mkdir(d_inode(dentry->d_parent), dentry); return end_creating(dentry); } EXPORT_SYMBOL_GPL(debugfs_create_dir); @@ -521,10 +524,10 @@ static int __debugfs_remove(struct dentry *dentry, struct dentry *parent) if (debugfs_positive(dentry)) { dget(dentry); - if (S_ISDIR(dentry->d_inode->i_mode)) - ret = simple_rmdir(parent->d_inode, dentry); + if (d_is_dir(dentry)) + ret = simple_rmdir(d_inode(parent), dentry); else - simple_unlink(parent->d_inode, dentry); + simple_unlink(d_inode(parent), dentry); if (!ret) d_delete(dentry); dput(dentry); @@ -554,12 +557,12 @@ void debugfs_remove(struct dentry *dentry) return; parent = dentry->d_parent; - if (!parent || !parent->d_inode) + if (!parent || d_really_is_negative(parent)) return; - mutex_lock(&parent->d_inode->i_mutex); + mutex_lock(&d_inode(parent)->i_mutex); ret = __debugfs_remove(dentry, parent); - mutex_unlock(&parent->d_inode->i_mutex); + mutex_unlock(&d_inode(parent)->i_mutex); if (!ret) simple_release_fs(&debugfs_mount, &debugfs_mount_count); } @@ -585,12 +588,12 @@ void debugfs_remove_recursive(struct dentry *dentry) return; parent = dentry->d_parent; - if (!parent || !parent->d_inode) + if (!parent || d_really_is_negative(parent)) return; parent = dentry; down: - mutex_lock(&parent->d_inode->i_mutex); + mutex_lock(&d_inode(parent)->i_mutex); loop: /* * The parent->d_subdirs is protected by the d_lock. Outside that @@ -605,7 +608,7 @@ void debugfs_remove_recursive(struct dentry *dentry) /* perhaps simple_empty(child) makes more sense */ if (!list_empty(&child->d_subdirs)) { spin_unlock(&parent->d_lock); - mutex_unlock(&parent->d_inode->i_mutex); + mutex_unlock(&d_inode(parent)->i_mutex); parent = child; goto down; } @@ -626,10 +629,10 @@ void debugfs_remove_recursive(struct dentry *dentry) } spin_unlock(&parent->d_lock); - mutex_unlock(&parent->d_inode->i_mutex); + mutex_unlock(&d_inode(parent)->i_mutex); child = parent; parent = parent->d_parent; - mutex_lock(&parent->d_inode->i_mutex); + mutex_lock(&d_inode(parent)->i_mutex); if (child != dentry) /* go up */ @@ -637,7 +640,7 @@ void debugfs_remove_recursive(struct dentry *dentry) if (!__debugfs_remove(child, parent)) simple_release_fs(&debugfs_mount, &debugfs_mount_count); - mutex_unlock(&parent->d_inode->i_mutex); + mutex_unlock(&d_inode(parent)->i_mutex); } EXPORT_SYMBOL_GPL(debugfs_remove_recursive); @@ -669,27 +672,27 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, trap = lock_rename(new_dir, old_dir); /* Source or destination directories don't exist? */ - if (!old_dir->d_inode || !new_dir->d_inode) + if (d_really_is_negative(old_dir) || d_really_is_negative(new_dir)) goto exit; /* Source does not exist, cyclic rename, or mountpoint? */ - if (!old_dentry->d_inode || old_dentry == trap || + if (d_really_is_negative(old_dentry) || old_dentry == trap || d_mountpoint(old_dentry)) goto exit; dentry = lookup_one_len(new_name, new_dir, strlen(new_name)); /* Lookup failed, cyclic rename or target exists? */ - if (IS_ERR(dentry) || dentry == trap || dentry->d_inode) + if (IS_ERR(dentry) || dentry == trap || d_really_is_positive(dentry)) goto exit; old_name = fsnotify_oldname_init(old_dentry->d_name.name); - error = simple_rename(old_dir->d_inode, old_dentry, new_dir->d_inode, + error = simple_rename(d_inode(old_dir), old_dentry, d_inode(new_dir), dentry); if (error) { fsnotify_oldname_free(old_name); goto exit; } d_move(old_dentry, dentry); - fsnotify_move(old_dir->d_inode, new_dir->d_inode, old_name, + fsnotify_move(d_inode(old_dir), d_inode(new_dir), old_name, d_is_dir(old_dentry), NULL, old_dentry); fsnotify_oldname_free(old_name); diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index cfe8466..add5663 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -253,7 +253,7 @@ static int mknod_ptmx(struct super_block *sb) if (!uid_valid(root_uid) || !gid_valid(root_gid)) return -EINVAL; - mutex_lock(&root->d_inode->i_mutex); + mutex_lock(&d_inode(root)->i_mutex); /* If we have already created ptmx node, return */ if (fsi->ptmx_dentry) { @@ -290,7 +290,7 @@ static int mknod_ptmx(struct super_block *sb) fsi->ptmx_dentry = dentry; rc = 0; out: - mutex_unlock(&root->d_inode->i_mutex); + mutex_unlock(&d_inode(root)->i_mutex); return rc; } @@ -298,7 +298,7 @@ static void update_ptmx_mode(struct pts_fs_info *fsi) { struct inode *inode; if (fsi->ptmx_dentry) { - inode = fsi->ptmx_dentry->d_inode; + inode = d_inode(fsi->ptmx_dentry); inode->i_mode = S_IFCHR|fsi->mount_opts.ptmxmode; } } @@ -602,18 +602,18 @@ struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index, sprintf(s, "%d", index); - mutex_lock(&root->d_inode->i_mutex); + mutex_lock(&d_inode(root)->i_mutex); dentry = d_alloc_name(root, s); if (dentry) { d_add(dentry, inode); - fsnotify_create(root->d_inode, dentry); + fsnotify_create(d_inode(root), dentry); } else { iput(inode); inode = ERR_PTR(-ENOMEM); } - mutex_unlock(&root->d_inode->i_mutex); + mutex_unlock(&d_inode(root)->i_mutex); return inode; } @@ -658,7 +658,7 @@ void devpts_pty_kill(struct inode *inode) BUG_ON(inode->i_rdev == MKDEV(TTYAUX_MAJOR, PTMX_MINOR)); - mutex_lock(&root->d_inode->i_mutex); + mutex_lock(&d_inode(root)->i_mutex); dentry = d_find_alias(inode); @@ -667,7 +667,7 @@ void devpts_pty_kill(struct inode *inode) dput(dentry); /* d_alloc_name() in devpts_pty_new() */ dput(dentry); /* d_find_alias above */ - mutex_unlock(&root->d_inode->i_mutex); + mutex_unlock(&d_inode(root)->i_mutex); } static int __init init_devpts_fs(void) diff --git a/fs/direct-io.c b/fs/direct-io.c index e181b6b..745d234 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -37,7 +37,6 @@ #include <linux/uio.h> #include <linux/atomic.h> #include <linux/prefetch.h> -#include <linux/aio.h> /* * How many user pages to map in one call to get_user_pages(). This determines @@ -254,7 +253,9 @@ static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret, if (dio->end_io && dio->result) dio->end_io(dio->iocb, offset, transferred, dio->private); - inode_dio_done(dio->inode); + if (!(dio->flags & DIO_SKIP_DIO_COUNT)) + inode_dio_end(dio->inode); + if (is_async) { if (dio->rw & WRITE) { int err; @@ -265,7 +266,7 @@ static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret, ret = err; } - aio_complete(dio->iocb, ret, 0); + dio->iocb->ki_complete(dio->iocb, ret, 0); } kmem_cache_free(dio_cache, dio); @@ -1056,7 +1057,7 @@ static inline int drop_refcount(struct dio *dio) * operation. AIO can if it was a broken operation described above or * in fact if all the bios race to complete before we get here. In * that case dio_complete() translates the EIOCBQUEUED into the proper - * return code that the caller will hand to aio_complete(). + * return code that the caller will hand to ->complete(). * * This is managed by the bio_lock instead of being an atomic_t so that * completion paths can drop their ref and use the remaining count to @@ -1094,10 +1095,10 @@ static inline int drop_refcount(struct dio *dio) * for the whole file. */ static inline ssize_t -do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, - struct block_device *bdev, struct iov_iter *iter, loff_t offset, - get_block_t get_block, dio_iodone_t end_io, - dio_submit_t submit_io, int flags) +do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, + struct block_device *bdev, struct iov_iter *iter, + loff_t offset, get_block_t get_block, dio_iodone_t end_io, + dio_submit_t submit_io, int flags) { unsigned i_blkbits = ACCESS_ONCE(inode->i_blkbits); unsigned blkbits = i_blkbits; @@ -1111,9 +1112,6 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, struct blk_plug plug; unsigned long align = offset | iov_iter_alignment(iter); - if (rw & WRITE) - rw = WRITE_ODIRECT; - /* * Avoid references to bdev if not absolutely needed to give * the early prefetch in the caller enough time. @@ -1128,7 +1126,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, } /* watch out for a 0 len io from a tricksy fs */ - if (rw == READ && !iov_iter_count(iter)) + if (iov_iter_rw(iter) == READ && !iov_iter_count(iter)) return 0; dio = kmem_cache_alloc(dio_cache, GFP_KERNEL); @@ -1144,7 +1142,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, dio->flags = flags; if (dio->flags & DIO_LOCKING) { - if (rw == READ) { + if (iov_iter_rw(iter) == READ) { struct address_space *mapping = iocb->ki_filp->f_mapping; @@ -1170,19 +1168,19 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, if (is_sync_kiocb(iocb)) dio->is_async = false; else if (!(dio->flags & DIO_ASYNC_EXTEND) && - (rw & WRITE) && end > i_size_read(inode)) + iov_iter_rw(iter) == WRITE && end > i_size_read(inode)) dio->is_async = false; else dio->is_async = true; dio->inode = inode; - dio->rw = rw; + dio->rw = iov_iter_rw(iter) == WRITE ? WRITE_ODIRECT : READ; /* * For AIO O_(D)SYNC writes we need to defer completions to a workqueue * so that we can call ->fsync. */ - if (dio->is_async && (rw & WRITE) && + if (dio->is_async && iov_iter_rw(iter) == WRITE && ((iocb->ki_filp->f_flags & O_DSYNC) || IS_SYNC(iocb->ki_filp->f_mapping->host))) { retval = dio_set_defer_completion(dio); @@ -1199,7 +1197,8 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, /* * Will be decremented at I/O completion time. */ - atomic_inc(&inode->i_dio_count); + if (!(dio->flags & DIO_SKIP_DIO_COUNT)) + inode_dio_begin(inode); retval = 0; sdio.blkbits = blkbits; @@ -1275,7 +1274,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, * we can let i_mutex go now that its achieved its purpose * of protecting us from looking up uninitialized blocks. */ - if (rw == READ && (dio->flags & DIO_LOCKING)) + if (iov_iter_rw(iter) == READ && (dio->flags & DIO_LOCKING)) mutex_unlock(&dio->inode->i_mutex); /* @@ -1287,7 +1286,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, */ BUG_ON(retval == -EIOCBQUEUED); if (dio->is_async && retval == 0 && dio->result && - (rw == READ || dio->result == count)) + (iov_iter_rw(iter) == READ || dio->result == count)) retval = -EIOCBQUEUED; else dio_await_completion(dio); @@ -1301,11 +1300,11 @@ out: return retval; } -ssize_t -__blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, - struct block_device *bdev, struct iov_iter *iter, loff_t offset, - get_block_t get_block, dio_iodone_t end_io, - dio_submit_t submit_io, int flags) +ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, + struct block_device *bdev, struct iov_iter *iter, + loff_t offset, get_block_t get_block, + dio_iodone_t end_io, dio_submit_t submit_io, + int flags) { /* * The block device state is needed in the end to finally @@ -1319,8 +1318,8 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, prefetch(bdev->bd_queue); prefetch((char *)bdev->bd_queue + SMP_CACHE_BYTES); - return do_blockdev_direct_IO(rw, iocb, inode, bdev, iter, offset, - get_block, end_io, submit_io, flags); + return do_blockdev_direct_IO(iocb, inode, bdev, iter, offset, get_block, + end_io, submit_io, flags); } EXPORT_SYMBOL(__blockdev_direct_IO); diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c index 719e1ce..97315f2 100644 --- a/fs/ecryptfs/crypto.c +++ b/fs/ecryptfs/crypto.c @@ -1326,7 +1326,7 @@ static int ecryptfs_read_headers_virt(char *page_virt, if (rc) goto out; if (!(crypt_stat->flags & ECRYPTFS_I_SIZE_INITIALIZED)) - ecryptfs_i_size_init(page_virt, ecryptfs_dentry->d_inode); + ecryptfs_i_size_init(page_virt, d_inode(ecryptfs_dentry)); offset += MAGIC_ECRYPTFS_MARKER_SIZE_BYTES; rc = ecryptfs_process_flags(crypt_stat, (page_virt + offset), &bytes_read); @@ -1425,7 +1425,7 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry) { int rc; char *page_virt; - struct inode *ecryptfs_inode = ecryptfs_dentry->d_inode; + struct inode *ecryptfs_inode = d_inode(ecryptfs_dentry); struct ecryptfs_crypt_stat *crypt_stat = &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat; struct ecryptfs_mount_crypt_stat *mount_crypt_stat = diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c index 4000f6b..8db0b46 100644 --- a/fs/ecryptfs/dentry.c +++ b/fs/ecryptfs/dentry.c @@ -54,11 +54,11 @@ static int ecryptfs_d_revalidate(struct dentry *dentry, unsigned int flags) return -ECHILD; rc = lower_dentry->d_op->d_revalidate(lower_dentry, flags); - if (dentry->d_inode) { + if (d_really_is_positive(dentry)) { struct inode *lower_inode = - ecryptfs_inode_to_lower(dentry->d_inode); + ecryptfs_inode_to_lower(d_inode(dentry)); - fsstack_copy_attr_all(dentry->d_inode, lower_inode); + fsstack_copy_attr_all(d_inode(dentry), lower_inode); } return rc; } diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c index fd39bad..72afcc6 100644 --- a/fs/ecryptfs/file.c +++ b/fs/ecryptfs/file.c @@ -31,7 +31,6 @@ #include <linux/security.h> #include <linux/compat.h> #include <linux/fs_stack.h> -#include <linux/aio.h> #include "ecryptfs_kernel.h" /** @@ -52,12 +51,6 @@ static ssize_t ecryptfs_read_update_atime(struct kiocb *iocb, struct file *file = iocb->ki_filp; rc = generic_file_read_iter(iocb, to); - /* - * Even though this is a async interface, we need to wait - * for IO to finish to update atime - */ - if (-EIOCBQUEUED == rc) - rc = wait_on_sync_kiocb(iocb); if (rc >= 0) { path = ecryptfs_dentry_to_lower_path(file->f_path.dentry); touch_atime(path); @@ -137,7 +130,7 @@ struct kmem_cache *ecryptfs_file_info_cache; static int read_or_initialize_metadata(struct dentry *dentry) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct ecryptfs_mount_crypt_stat *mount_crypt_stat; struct ecryptfs_crypt_stat *crypt_stat; int rc; @@ -365,9 +358,7 @@ const struct file_operations ecryptfs_dir_fops = { const struct file_operations ecryptfs_main_fops = { .llseek = generic_file_llseek, - .read = new_sync_read, .read_iter = ecryptfs_read_update_atime, - .write = new_sync_write, .write_iter = generic_file_write_iter, .iterate = ecryptfs_readdir, .unlocked_ioctl = ecryptfs_unlocked_ioctl, diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index b08b518..fc850b5 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -41,13 +41,13 @@ static struct dentry *lock_parent(struct dentry *dentry) struct dentry *dir; dir = dget_parent(dentry); - mutex_lock_nested(&(dir->d_inode->i_mutex), I_MUTEX_PARENT); + mutex_lock_nested(&(d_inode(dir)->i_mutex), I_MUTEX_PARENT); return dir; } static void unlock_dir(struct dentry *dir) { - mutex_unlock(&dir->d_inode->i_mutex); + mutex_unlock(&d_inode(dir)->i_mutex); dput(dir); } @@ -131,7 +131,7 @@ struct inode *ecryptfs_get_inode(struct inode *lower_inode, static int ecryptfs_interpose(struct dentry *lower_dentry, struct dentry *dentry, struct super_block *sb) { - struct inode *inode = ecryptfs_get_inode(lower_dentry->d_inode, sb); + struct inode *inode = ecryptfs_get_inode(d_inode(lower_dentry), sb); if (IS_ERR(inode)) return PTR_ERR(inode); @@ -189,21 +189,21 @@ ecryptfs_do_create(struct inode *directory_inode, lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry); lower_dir_dentry = lock_parent(lower_dentry); - rc = vfs_create(lower_dir_dentry->d_inode, lower_dentry, mode, true); + rc = vfs_create(d_inode(lower_dir_dentry), lower_dentry, mode, true); if (rc) { printk(KERN_ERR "%s: Failure to create dentry in lower fs; " "rc = [%d]\n", __func__, rc); inode = ERR_PTR(rc); goto out_lock; } - inode = __ecryptfs_get_inode(lower_dentry->d_inode, + inode = __ecryptfs_get_inode(d_inode(lower_dentry), directory_inode->i_sb); if (IS_ERR(inode)) { - vfs_unlink(lower_dir_dentry->d_inode, lower_dentry, NULL); + vfs_unlink(d_inode(lower_dir_dentry), lower_dentry, NULL); goto out_lock; } - fsstack_copy_attr_times(directory_inode, lower_dir_dentry->d_inode); - fsstack_copy_inode_size(directory_inode, lower_dir_dentry->d_inode); + fsstack_copy_attr_times(directory_inode, d_inode(lower_dir_dentry)); + fsstack_copy_inode_size(directory_inode, d_inode(lower_dir_dentry)); out_lock: unlock_dir(lower_dir_dentry); return inode; @@ -332,7 +332,7 @@ static int ecryptfs_lookup_interpose(struct dentry *dentry, struct dentry *lower_dentry, struct inode *dir_inode) { - struct inode *inode, *lower_inode = lower_dentry->d_inode; + struct inode *inode, *lower_inode = d_inode(lower_dentry); struct ecryptfs_dentry_info *dentry_info; struct vfsmount *lower_mnt; int rc = 0; @@ -347,14 +347,14 @@ static int ecryptfs_lookup_interpose(struct dentry *dentry, } lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt(dentry->d_parent)); - fsstack_copy_attr_atime(dir_inode, lower_dentry->d_parent->d_inode); + fsstack_copy_attr_atime(dir_inode, d_inode(lower_dentry->d_parent)); BUG_ON(!d_count(lower_dentry)); ecryptfs_set_dentry_private(dentry, dentry_info); dentry_info->lower_path.mnt = lower_mnt; dentry_info->lower_path.dentry = lower_dentry; - if (!lower_dentry->d_inode) { + if (d_really_is_negative(lower_dentry)) { /* We want to add because we couldn't find in lower */ d_add(dentry, NULL); return 0; @@ -400,11 +400,11 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, int rc = 0; lower_dir_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry->d_parent); - mutex_lock(&lower_dir_dentry->d_inode->i_mutex); + mutex_lock(&d_inode(lower_dir_dentry)->i_mutex); lower_dentry = lookup_one_len(ecryptfs_dentry->d_name.name, lower_dir_dentry, ecryptfs_dentry->d_name.len); - mutex_unlock(&lower_dir_dentry->d_inode->i_mutex); + mutex_unlock(&d_inode(lower_dir_dentry)->i_mutex); if (IS_ERR(lower_dentry)) { rc = PTR_ERR(lower_dentry); ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned " @@ -412,7 +412,7 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, ecryptfs_dentry); goto out; } - if (lower_dentry->d_inode) + if (d_really_is_positive(lower_dentry)) goto interpose; mount_crypt_stat = &ecryptfs_superblock_to_private( ecryptfs_dentry->d_sb)->mount_crypt_stat; @@ -429,11 +429,11 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, "filename; rc = [%d]\n", __func__, rc); goto out; } - mutex_lock(&lower_dir_dentry->d_inode->i_mutex); + mutex_lock(&d_inode(lower_dir_dentry)->i_mutex); lower_dentry = lookup_one_len(encrypted_and_encoded_name, lower_dir_dentry, encrypted_and_encoded_name_size); - mutex_unlock(&lower_dir_dentry->d_inode->i_mutex); + mutex_unlock(&d_inode(lower_dir_dentry)->i_mutex); if (IS_ERR(lower_dentry)) { rc = PTR_ERR(lower_dentry); ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned " @@ -458,24 +458,24 @@ static int ecryptfs_link(struct dentry *old_dentry, struct inode *dir, u64 file_size_save; int rc; - file_size_save = i_size_read(old_dentry->d_inode); + file_size_save = i_size_read(d_inode(old_dentry)); lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry); lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry); dget(lower_old_dentry); dget(lower_new_dentry); lower_dir_dentry = lock_parent(lower_new_dentry); - rc = vfs_link(lower_old_dentry, lower_dir_dentry->d_inode, + rc = vfs_link(lower_old_dentry, d_inode(lower_dir_dentry), lower_new_dentry, NULL); - if (rc || !lower_new_dentry->d_inode) + if (rc || d_really_is_negative(lower_new_dentry)) goto out_lock; rc = ecryptfs_interpose(lower_new_dentry, new_dentry, dir->i_sb); if (rc) goto out_lock; - fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode); - fsstack_copy_inode_size(dir, lower_dir_dentry->d_inode); - set_nlink(old_dentry->d_inode, - ecryptfs_inode_to_lower(old_dentry->d_inode)->i_nlink); - i_size_write(new_dentry->d_inode, file_size_save); + fsstack_copy_attr_times(dir, d_inode(lower_dir_dentry)); + fsstack_copy_inode_size(dir, d_inode(lower_dir_dentry)); + set_nlink(d_inode(old_dentry), + ecryptfs_inode_to_lower(d_inode(old_dentry))->i_nlink); + i_size_write(d_inode(new_dentry), file_size_save); out_lock: unlock_dir(lower_dir_dentry); dput(lower_new_dentry); @@ -485,7 +485,7 @@ out_lock: static int ecryptfs_unlink(struct inode *dir, struct dentry *dentry) { - return ecryptfs_do_unlink(dir, dentry, dentry->d_inode); + return ecryptfs_do_unlink(dir, dentry, d_inode(dentry)); } static int ecryptfs_symlink(struct inode *dir, struct dentry *dentry, @@ -510,20 +510,20 @@ static int ecryptfs_symlink(struct inode *dir, struct dentry *dentry, strlen(symname)); if (rc) goto out_lock; - rc = vfs_symlink(lower_dir_dentry->d_inode, lower_dentry, + rc = vfs_symlink(d_inode(lower_dir_dentry), lower_dentry, encoded_symname); kfree(encoded_symname); - if (rc || !lower_dentry->d_inode) + if (rc || d_really_is_negative(lower_dentry)) goto out_lock; rc = ecryptfs_interpose(lower_dentry, dentry, dir->i_sb); if (rc) goto out_lock; - fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode); - fsstack_copy_inode_size(dir, lower_dir_dentry->d_inode); + fsstack_copy_attr_times(dir, d_inode(lower_dir_dentry)); + fsstack_copy_inode_size(dir, d_inode(lower_dir_dentry)); out_lock: unlock_dir(lower_dir_dentry); dput(lower_dentry); - if (!dentry->d_inode) + if (d_really_is_negative(dentry)) d_drop(dentry); return rc; } @@ -536,18 +536,18 @@ static int ecryptfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode lower_dentry = ecryptfs_dentry_to_lower(dentry); lower_dir_dentry = lock_parent(lower_dentry); - rc = vfs_mkdir(lower_dir_dentry->d_inode, lower_dentry, mode); - if (rc || !lower_dentry->d_inode) + rc = vfs_mkdir(d_inode(lower_dir_dentry), lower_dentry, mode); + if (rc || d_really_is_negative(lower_dentry)) goto out; rc = ecryptfs_interpose(lower_dentry, dentry, dir->i_sb); if (rc) goto out; - fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode); - fsstack_copy_inode_size(dir, lower_dir_dentry->d_inode); - set_nlink(dir, lower_dir_dentry->d_inode->i_nlink); + fsstack_copy_attr_times(dir, d_inode(lower_dir_dentry)); + fsstack_copy_inode_size(dir, d_inode(lower_dir_dentry)); + set_nlink(dir, d_inode(lower_dir_dentry)->i_nlink); out: unlock_dir(lower_dir_dentry); - if (!dentry->d_inode) + if (d_really_is_negative(dentry)) d_drop(dentry); return rc; } @@ -562,12 +562,12 @@ static int ecryptfs_rmdir(struct inode *dir, struct dentry *dentry) dget(dentry); lower_dir_dentry = lock_parent(lower_dentry); dget(lower_dentry); - rc = vfs_rmdir(lower_dir_dentry->d_inode, lower_dentry); + rc = vfs_rmdir(d_inode(lower_dir_dentry), lower_dentry); dput(lower_dentry); - if (!rc && dentry->d_inode) - clear_nlink(dentry->d_inode); - fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode); - set_nlink(dir, lower_dir_dentry->d_inode->i_nlink); + if (!rc && d_really_is_positive(dentry)) + clear_nlink(d_inode(dentry)); + fsstack_copy_attr_times(dir, d_inode(lower_dir_dentry)); + set_nlink(dir, d_inode(lower_dir_dentry)->i_nlink); unlock_dir(lower_dir_dentry); if (!rc) d_drop(dentry); @@ -584,17 +584,17 @@ ecryptfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev lower_dentry = ecryptfs_dentry_to_lower(dentry); lower_dir_dentry = lock_parent(lower_dentry); - rc = vfs_mknod(lower_dir_dentry->d_inode, lower_dentry, mode, dev); - if (rc || !lower_dentry->d_inode) + rc = vfs_mknod(d_inode(lower_dir_dentry), lower_dentry, mode, dev); + if (rc || d_really_is_negative(lower_dentry)) goto out; rc = ecryptfs_interpose(lower_dentry, dentry, dir->i_sb); if (rc) goto out; - fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode); - fsstack_copy_inode_size(dir, lower_dir_dentry->d_inode); + fsstack_copy_attr_times(dir, d_inode(lower_dir_dentry)); + fsstack_copy_inode_size(dir, d_inode(lower_dir_dentry)); out: unlock_dir(lower_dir_dentry); - if (!dentry->d_inode) + if (d_really_is_negative(dentry)) d_drop(dentry); return rc; } @@ -617,7 +617,7 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry, dget(lower_new_dentry); lower_old_dir_dentry = dget_parent(lower_old_dentry); lower_new_dir_dentry = dget_parent(lower_new_dentry); - target_inode = new_dentry->d_inode; + target_inode = d_inode(new_dentry); trap = lock_rename(lower_old_dir_dentry, lower_new_dir_dentry); /* source should not be ancestor of target */ if (trap == lower_old_dentry) { @@ -629,17 +629,17 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry, rc = -ENOTEMPTY; goto out_lock; } - rc = vfs_rename(lower_old_dir_dentry->d_inode, lower_old_dentry, - lower_new_dir_dentry->d_inode, lower_new_dentry, + rc = vfs_rename(d_inode(lower_old_dir_dentry), lower_old_dentry, + d_inode(lower_new_dir_dentry), lower_new_dentry, NULL, 0); if (rc) goto out_lock; if (target_inode) fsstack_copy_attr_all(target_inode, ecryptfs_inode_to_lower(target_inode)); - fsstack_copy_attr_all(new_dir, lower_new_dir_dentry->d_inode); + fsstack_copy_attr_all(new_dir, d_inode(lower_new_dir_dentry)); if (new_dir != old_dir) - fsstack_copy_attr_all(old_dir, lower_old_dir_dentry->d_inode); + fsstack_copy_attr_all(old_dir, d_inode(lower_old_dir_dentry)); out_lock: unlock_rename(lower_old_dir_dentry, lower_new_dir_dentry); dput(lower_new_dir_dentry); @@ -662,7 +662,7 @@ static char *ecryptfs_readlink_lower(struct dentry *dentry, size_t *bufsiz) return ERR_PTR(-ENOMEM); old_fs = get_fs(); set_fs(get_ds()); - rc = lower_dentry->d_inode->i_op->readlink(lower_dentry, + rc = d_inode(lower_dentry)->i_op->readlink(lower_dentry, (char __user *)lower_buf, PATH_MAX); set_fs(old_fs); @@ -681,8 +681,8 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd) char *buf = ecryptfs_readlink_lower(dentry, &len); if (IS_ERR(buf)) goto out; - fsstack_copy_attr_atime(dentry->d_inode, - ecryptfs_dentry_to_lower(dentry)->d_inode); + fsstack_copy_attr_atime(d_inode(dentry), + d_inode(ecryptfs_dentry_to_lower(dentry))); buf[len] = '\0'; out: nd_set_link(nd, buf); @@ -738,7 +738,7 @@ static int truncate_upper(struct dentry *dentry, struct iattr *ia, struct iattr *lower_ia) { int rc = 0; - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct ecryptfs_crypt_stat *crypt_stat; loff_t i_size = i_size_read(inode); loff_t lower_size_before_truncate; @@ -751,7 +751,7 @@ static int truncate_upper(struct dentry *dentry, struct iattr *ia, rc = ecryptfs_get_lower_file(dentry, inode); if (rc) return rc; - crypt_stat = &ecryptfs_inode_to_private(dentry->d_inode)->crypt_stat; + crypt_stat = &ecryptfs_inode_to_private(d_inode(dentry))->crypt_stat; /* Switch on growing or shrinking file */ if (ia->ia_size > i_size) { char zero[] = { 0x00 }; @@ -858,7 +858,7 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length) struct iattr lower_ia = { .ia_valid = 0 }; int rc; - rc = ecryptfs_inode_newsize_ok(dentry->d_inode, new_length); + rc = ecryptfs_inode_newsize_ok(d_inode(dentry), new_length); if (rc) return rc; @@ -866,9 +866,9 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length) if (!rc && lower_ia.ia_valid & ATTR_SIZE) { struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry); - mutex_lock(&lower_dentry->d_inode->i_mutex); + mutex_lock(&d_inode(lower_dentry)->i_mutex); rc = notify_change(lower_dentry, &lower_ia, NULL); - mutex_unlock(&lower_dentry->d_inode->i_mutex); + mutex_unlock(&d_inode(lower_dentry)->i_mutex); } return rc; } @@ -900,10 +900,10 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia) struct inode *lower_inode; struct ecryptfs_crypt_stat *crypt_stat; - crypt_stat = &ecryptfs_inode_to_private(dentry->d_inode)->crypt_stat; + crypt_stat = &ecryptfs_inode_to_private(d_inode(dentry))->crypt_stat; if (!(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED)) ecryptfs_init_crypt_stat(crypt_stat); - inode = dentry->d_inode; + inode = d_inode(dentry); lower_inode = ecryptfs_inode_to_lower(inode); lower_dentry = ecryptfs_dentry_to_lower(dentry); mutex_lock(&crypt_stat->cs_mutex); @@ -967,9 +967,9 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia) if (lower_ia.ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID)) lower_ia.ia_valid &= ~ATTR_MODE; - mutex_lock(&lower_dentry->d_inode->i_mutex); + mutex_lock(&d_inode(lower_dentry)->i_mutex); rc = notify_change(lower_dentry, &lower_ia, NULL); - mutex_unlock(&lower_dentry->d_inode->i_mutex); + mutex_unlock(&d_inode(lower_dentry)->i_mutex); out: fsstack_copy_attr_all(inode, lower_inode); return rc; @@ -983,7 +983,7 @@ static int ecryptfs_getattr_link(struct vfsmount *mnt, struct dentry *dentry, mount_crypt_stat = &ecryptfs_superblock_to_private( dentry->d_sb)->mount_crypt_stat; - generic_fillattr(dentry->d_inode, stat); + generic_fillattr(d_inode(dentry), stat); if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) { char *target; size_t targetsiz; @@ -1007,9 +1007,9 @@ static int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry, rc = vfs_getattr(ecryptfs_dentry_to_lower_path(dentry), &lower_stat); if (!rc) { - fsstack_copy_attr_all(dentry->d_inode, - ecryptfs_inode_to_lower(dentry->d_inode)); - generic_fillattr(dentry->d_inode, stat); + fsstack_copy_attr_all(d_inode(dentry), + ecryptfs_inode_to_lower(d_inode(dentry))); + generic_fillattr(d_inode(dentry), stat); stat->blocks = lower_stat.blocks; } return rc; @@ -1023,14 +1023,14 @@ ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value, struct dentry *lower_dentry; lower_dentry = ecryptfs_dentry_to_lower(dentry); - if (!lower_dentry->d_inode->i_op->setxattr) { + if (!d_inode(lower_dentry)->i_op->setxattr) { rc = -EOPNOTSUPP; goto out; } rc = vfs_setxattr(lower_dentry, name, value, size, flags); - if (!rc && dentry->d_inode) - fsstack_copy_attr_all(dentry->d_inode, lower_dentry->d_inode); + if (!rc && d_really_is_positive(dentry)) + fsstack_copy_attr_all(d_inode(dentry), d_inode(lower_dentry)); out: return rc; } @@ -1041,14 +1041,14 @@ ecryptfs_getxattr_lower(struct dentry *lower_dentry, const char *name, { int rc = 0; - if (!lower_dentry->d_inode->i_op->getxattr) { + if (!d_inode(lower_dentry)->i_op->getxattr) { rc = -EOPNOTSUPP; goto out; } - mutex_lock(&lower_dentry->d_inode->i_mutex); - rc = lower_dentry->d_inode->i_op->getxattr(lower_dentry, name, value, + mutex_lock(&d_inode(lower_dentry)->i_mutex); + rc = d_inode(lower_dentry)->i_op->getxattr(lower_dentry, name, value, size); - mutex_unlock(&lower_dentry->d_inode->i_mutex); + mutex_unlock(&d_inode(lower_dentry)->i_mutex); out: return rc; } @@ -1068,13 +1068,13 @@ ecryptfs_listxattr(struct dentry *dentry, char *list, size_t size) struct dentry *lower_dentry; lower_dentry = ecryptfs_dentry_to_lower(dentry); - if (!lower_dentry->d_inode->i_op->listxattr) { + if (!d_inode(lower_dentry)->i_op->listxattr) { rc = -EOPNOTSUPP; goto out; } - mutex_lock(&lower_dentry->d_inode->i_mutex); - rc = lower_dentry->d_inode->i_op->listxattr(lower_dentry, list, size); - mutex_unlock(&lower_dentry->d_inode->i_mutex); + mutex_lock(&d_inode(lower_dentry)->i_mutex); + rc = d_inode(lower_dentry)->i_op->listxattr(lower_dentry, list, size); + mutex_unlock(&d_inode(lower_dentry)->i_mutex); out: return rc; } @@ -1085,13 +1085,13 @@ static int ecryptfs_removexattr(struct dentry *dentry, const char *name) struct dentry *lower_dentry; lower_dentry = ecryptfs_dentry_to_lower(dentry); - if (!lower_dentry->d_inode->i_op->removexattr) { + if (!d_inode(lower_dentry)->i_op->removexattr) { rc = -EOPNOTSUPP; goto out; } - mutex_lock(&lower_dentry->d_inode->i_mutex); - rc = lower_dentry->d_inode->i_op->removexattr(lower_dentry, name); - mutex_unlock(&lower_dentry->d_inode->i_mutex); + mutex_lock(&d_inode(lower_dentry)->i_mutex); + rc = d_inode(lower_dentry)->i_op->removexattr(lower_dentry, name); + mutex_unlock(&d_inode(lower_dentry)->i_mutex); out: return rc; } diff --git a/fs/ecryptfs/kthread.c b/fs/ecryptfs/kthread.c index f1ea610..866bb18 100644 --- a/fs/ecryptfs/kthread.c +++ b/fs/ecryptfs/kthread.c @@ -144,7 +144,7 @@ int ecryptfs_privileged_open(struct file **lower_file, /* Corresponding dput() and mntput() are done when the * lower file is fput() when all eCryptfs files for the inode are * released. */ - flags |= IS_RDONLY(lower_dentry->d_inode) ? O_RDONLY : O_RDWR; + flags |= IS_RDONLY(d_inode(lower_dentry)) ? O_RDONLY : O_RDWR; (*lower_file) = dentry_open(&req.path, flags, cred); if (!IS_ERR(*lower_file)) goto out; diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c index c095d32..4f4d047 100644 --- a/fs/ecryptfs/main.c +++ b/fs/ecryptfs/main.c @@ -546,11 +546,11 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags goto out_free; } - if (check_ruid && !uid_eq(path.dentry->d_inode->i_uid, current_uid())) { + if (check_ruid && !uid_eq(d_inode(path.dentry)->i_uid, current_uid())) { rc = -EPERM; printk(KERN_ERR "Mount of device (uid: %d) not owned by " "requested user (uid: %d)\n", - i_uid_read(path.dentry->d_inode), + i_uid_read(d_inode(path.dentry)), from_kuid(&init_user_ns, current_uid())); goto out_free; } @@ -584,7 +584,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags goto out_free; } - inode = ecryptfs_get_inode(path.dentry->d_inode, s); + inode = ecryptfs_get_inode(d_inode(path.dentry), s); rc = PTR_ERR(inode); if (IS_ERR(inode)) goto out_free; diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c index 4626976..cf20852 100644 --- a/fs/ecryptfs/mmap.c +++ b/fs/ecryptfs/mmap.c @@ -420,7 +420,7 @@ static int ecryptfs_write_inode_size_to_xattr(struct inode *ecryptfs_inode) void *xattr_virt; struct dentry *lower_dentry = ecryptfs_inode_to_private(ecryptfs_inode)->lower_file->f_path.dentry; - struct inode *lower_inode = lower_dentry->d_inode; + struct inode *lower_inode = d_inode(lower_dentry); int rc; if (!lower_inode->i_op->getxattr || !lower_inode->i_op->setxattr) { diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c index 07ab497..3381b9d 100644 --- a/fs/efivarfs/inode.c +++ b/fs/efivarfs/inode.c @@ -145,12 +145,12 @@ out: static int efivarfs_unlink(struct inode *dir, struct dentry *dentry) { - struct efivar_entry *var = dentry->d_inode->i_private; + struct efivar_entry *var = d_inode(dentry)->i_private; if (efivar_entry_delete(var)) return -EINVAL; - drop_nlink(dentry->d_inode); + drop_nlink(d_inode(dentry)); dput(dentry); return 0; }; diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c index ddbce42..59fedbc 100644 --- a/fs/efivarfs/super.c +++ b/fs/efivarfs/super.c @@ -144,7 +144,7 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor, name[len + EFI_VARIABLE_GUID_LEN+1] = '\0'; - inode = efivarfs_get_inode(sb, root->d_inode, S_IFREG | 0644, 0); + inode = efivarfs_get_inode(sb, d_inode(root), S_IFREG | 0644, 0); if (!inode) goto fail_name; diff --git a/fs/efs/namei.c b/fs/efs/namei.c index bbee8f0..40ba9cc 100644 --- a/fs/efs/namei.c +++ b/fs/efs/namei.c @@ -111,9 +111,9 @@ struct dentry *efs_get_parent(struct dentry *child) struct dentry *parent = ERR_PTR(-ENOENT); efs_ino_t ino; - ino = efs_find_entry(child->d_inode, "..", 2); + ino = efs_find_entry(d_inode(child), "..", 2); if (ino) - parent = d_obtain_alias(efs_iget(child->d_inode->i_sb, ino)); + parent = d_obtain_alias(efs_iget(d_inode(child)->i_sb, ino)); return parent; } @@ -926,10 +926,14 @@ static int de_thread(struct task_struct *tsk) if (!thread_group_leader(tsk)) { struct task_struct *leader = tsk->group_leader; - sig->notify_count = -1; /* for exit_notify() */ for (;;) { threadgroup_change_begin(tsk); write_lock_irq(&tasklist_lock); + /* + * Do this under tasklist_lock to ensure that + * exit_notify() can't miss ->group_exit_task + */ + sig->notify_count = -1; if (likely(leader->exit_state)) break; __set_current_state(TASK_KILLABLE); @@ -1078,7 +1082,13 @@ int flush_old_exec(struct linux_binprm * bprm) if (retval) goto out; + /* + * Must be called _before_ exec_mmap() as bprm->mm is + * not visibile until then. This also enables the update + * to be lockless. + */ set_mm_exe_file(bprm->mm, bprm->file); + /* * Release all of the old mmap stuff */ @@ -1265,6 +1275,53 @@ static void check_unsafe_exec(struct linux_binprm *bprm) spin_unlock(&p->fs->lock); } +static void bprm_fill_uid(struct linux_binprm *bprm) +{ + struct inode *inode; + unsigned int mode; + kuid_t uid; + kgid_t gid; + + /* clear any previous set[ug]id data from a previous binary */ + bprm->cred->euid = current_euid(); + bprm->cred->egid = current_egid(); + + if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) + return; + + if (task_no_new_privs(current)) + return; + + inode = file_inode(bprm->file); + mode = READ_ONCE(inode->i_mode); + if (!(mode & (S_ISUID|S_ISGID))) + return; + + /* Be careful if suid/sgid is set */ + mutex_lock(&inode->i_mutex); + + /* reload atomically mode/uid/gid now that lock held */ + mode = inode->i_mode; + uid = inode->i_uid; + gid = inode->i_gid; + mutex_unlock(&inode->i_mutex); + + /* We ignore suid/sgid if there are no mappings for them in the ns */ + if (!kuid_has_mapping(bprm->cred->user_ns, uid) || + !kgid_has_mapping(bprm->cred->user_ns, gid)) + return; + + if (mode & S_ISUID) { + bprm->per_clear |= PER_CLEAR_ON_SETID; + bprm->cred->euid = uid; + } + + if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) { + bprm->per_clear |= PER_CLEAR_ON_SETID; + bprm->cred->egid = gid; + } +} + /* * Fill the binprm structure from the inode. * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes @@ -1273,36 +1330,9 @@ static void check_unsafe_exec(struct linux_binprm *bprm) */ int prepare_binprm(struct linux_binprm *bprm) { - struct inode *inode = file_inode(bprm->file); - umode_t mode = inode->i_mode; int retval; - - /* clear any previous set[ug]id data from a previous binary */ - bprm->cred->euid = current_euid(); - bprm->cred->egid = current_egid(); - - if (!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) && - !task_no_new_privs(current) && - kuid_has_mapping(bprm->cred->user_ns, inode->i_uid) && - kgid_has_mapping(bprm->cred->user_ns, inode->i_gid)) { - /* Set-uid? */ - if (mode & S_ISUID) { - bprm->per_clear |= PER_CLEAR_ON_SETID; - bprm->cred->euid = inode->i_uid; - } - - /* Set-gid? */ - /* - * If setgid is set but no group execute bit then this - * is a candidate for mandatory locking, not a setgid - * executable. - */ - if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) { - bprm->per_clear |= PER_CLEAR_ON_SETID; - bprm->cred->egid = inode->i_gid; - } - } + bprm_fill_uid(bprm); /* fill in binprm security blob */ retval = security_bprm_set_creds(bprm); diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c index d7defd5..4deb0b0 100644 --- a/fs/exofs/dir.c +++ b/fs/exofs/dir.c @@ -379,7 +379,7 @@ ino_t exofs_parent_ino(struct dentry *child) struct exofs_dir_entry *de; ino_t ino; - de = exofs_dotdot(child->d_inode, &page); + de = exofs_dotdot(d_inode(child), &page); if (!de) return 0; @@ -429,7 +429,7 @@ int exofs_set_link(struct inode *dir, struct exofs_dir_entry *de, int exofs_add_link(struct dentry *dentry, struct inode *inode) { - struct inode *dir = dentry->d_parent->d_inode; + struct inode *dir = d_inode(dentry->d_parent); const unsigned char *name = dentry->d_name.name; int namelen = dentry->d_name.len; unsigned chunk_size = exofs_chunk_size(dir); diff --git a/fs/exofs/file.c b/fs/exofs/file.c index 1a376b4..906de66 100644 --- a/fs/exofs/file.c +++ b/fs/exofs/file.c @@ -67,8 +67,6 @@ static int exofs_flush(struct file *file, fl_owner_t id) const struct file_operations exofs_file_operations = { .llseek = generic_file_llseek, - .read = new_sync_read, - .write = new_sync_write, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, .mmap = generic_file_mmap, diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c index a198e94..786e4cc 100644 --- a/fs/exofs/inode.c +++ b/fs/exofs/inode.c @@ -963,8 +963,8 @@ static void exofs_invalidatepage(struct page *page, unsigned int offset, /* TODO: Should be easy enough to do proprly */ -static ssize_t exofs_direct_IO(int rw, struct kiocb *iocb, - struct iov_iter *iter, loff_t offset) +static ssize_t exofs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, + loff_t offset) { return 0; } @@ -1028,7 +1028,7 @@ static int _do_truncate(struct inode *inode, loff_t newsize) */ int exofs_setattr(struct dentry *dentry, struct iattr *iattr) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); int error; /* if we are about to modify an object, and it hasn't been diff --git a/fs/exofs/namei.c b/fs/exofs/namei.c index 2890746..5ae25e4 100644 --- a/fs/exofs/namei.c +++ b/fs/exofs/namei.c @@ -141,7 +141,7 @@ out_fail: static int exofs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { - struct inode *inode = old_dentry->d_inode; + struct inode *inode = d_inode(old_dentry); inode->i_ctime = CURRENT_TIME; inode_inc_link_count(inode); @@ -191,7 +191,7 @@ out_dir: static int exofs_unlink(struct inode *dir, struct dentry *dentry) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct exofs_dir_entry *de; struct page *page; int err = -ENOENT; @@ -213,7 +213,7 @@ out: static int exofs_rmdir(struct inode *dir, struct dentry *dentry) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); int err = -ENOTEMPTY; if (exofs_empty_dir(inode)) { @@ -230,8 +230,8 @@ static int exofs_rmdir(struct inode *dir, struct dentry *dentry) static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { - struct inode *old_inode = old_dentry->d_inode; - struct inode *new_inode = new_dentry->d_inode; + struct inode *old_inode = d_inode(old_dentry); + struct inode *new_inode = d_inode(new_dentry); struct page *dir_page = NULL; struct exofs_dir_entry *dir_de = NULL; struct page *old_page; diff --git a/fs/exofs/super.c b/fs/exofs/super.c index fcc2e56..b795c56 100644 --- a/fs/exofs/super.c +++ b/fs/exofs/super.c @@ -958,7 +958,7 @@ static struct dentry *exofs_get_parent(struct dentry *child) if (!ino) return ERR_PTR(-ESTALE); - return d_obtain_alias(exofs_iget(child->d_inode->i_sb, ino)); + return d_obtain_alias(exofs_iget(d_inode(child)->i_sb, ino)); } static struct inode *exofs_nfs_get_inode(struct super_block *sb, diff --git a/fs/exofs/symlink.c b/fs/exofs/symlink.c index 832e262..6f6f3a4 100644 --- a/fs/exofs/symlink.c +++ b/fs/exofs/symlink.c @@ -37,7 +37,7 @@ static void *exofs_follow_link(struct dentry *dentry, struct nameidata *nd) { - struct exofs_i_info *oi = exofs_i(dentry->d_inode); + struct exofs_i_info *oi = exofs_i(d_inode(dentry)); nd_set_link(nd, (char *)oi->i_data); return NULL; diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c index 6e1d4ab..796b491 100644 --- a/fs/ext2/dir.c +++ b/fs/ext2/dir.c @@ -486,7 +486,7 @@ void ext2_set_link(struct inode *dir, struct ext2_dir_entry_2 *de, */ int ext2_add_link (struct dentry *dentry, struct inode *inode) { - struct inode *dir = dentry->d_parent->d_inode; + struct inode *dir = d_inode(dentry->d_parent); const char *name = dentry->d_name.name; int namelen = dentry->d_name.len; unsigned chunk_size = ext2_chunk_size(dir); diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h index 678f9ab..8d15feb 100644 --- a/fs/ext2/ext2.h +++ b/fs/ext2/ext2.h @@ -793,7 +793,6 @@ extern int ext2_fsync(struct file *file, loff_t start, loff_t end, int datasync); extern const struct inode_operations ext2_file_inode_operations; extern const struct file_operations ext2_file_operations; -extern const struct file_operations ext2_dax_file_operations; /* inode.c */ extern const struct address_space_operations ext2_aops; diff --git a/fs/ext2/file.c b/fs/ext2/file.c index e317017..3a0a6c6 100644 --- a/fs/ext2/file.c +++ b/fs/ext2/file.c @@ -39,6 +39,7 @@ static int ext2_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) static const struct vm_operations_struct ext2_dax_vm_ops = { .fault = ext2_dax_fault, .page_mkwrite = ext2_dax_mkwrite, + .pfn_mkwrite = dax_pfn_mkwrite, }; static int ext2_file_mmap(struct file *file, struct vm_area_struct *vma) @@ -92,8 +93,6 @@ int ext2_fsync(struct file *file, loff_t start, loff_t end, int datasync) */ const struct file_operations ext2_file_operations = { .llseek = generic_file_llseek, - .read = new_sync_read, - .write = new_sync_write, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, .unlocked_ioctl = ext2_ioctl, @@ -108,24 +107,6 @@ const struct file_operations ext2_file_operations = { .splice_write = iter_file_splice_write, }; -#ifdef CONFIG_FS_DAX -const struct file_operations ext2_dax_file_operations = { - .llseek = generic_file_llseek, - .read = new_sync_read, - .write = new_sync_write, - .read_iter = generic_file_read_iter, - .write_iter = generic_file_write_iter, - .unlocked_ioctl = ext2_ioctl, -#ifdef CONFIG_COMPAT - .compat_ioctl = ext2_compat_ioctl, -#endif - .mmap = ext2_file_mmap, - .open = dquot_file_open, - .release = ext2_release_file, - .fsync = ext2_fsync, -}; -#endif - const struct inode_operations ext2_file_inode_operations = { #ifdef CONFIG_EXT2_FS_XATTR .setxattr = generic_setxattr, diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c index 6c14bb8..5c04a0d 100644 --- a/fs/ext2/ialloc.c +++ b/fs/ext2/ialloc.c @@ -278,7 +278,7 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent) avefreeb = free_blocks / ngroups; ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter); - if ((parent == sb->s_root->d_inode) || + if ((parent == d_inode(sb->s_root)) || (EXT2_I(parent)->i_flags & EXT2_TOPDIR_FL)) { struct ext2_group_desc *best_desc = NULL; int best_ndir = inodes_per_group; diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 6434bc0..f460ae3 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -31,7 +31,7 @@ #include <linux/mpage.h> #include <linux/fiemap.h> #include <linux/namei.h> -#include <linux/aio.h> +#include <linux/uio.h> #include "ext2.h" #include "acl.h" #include "xattr.h" @@ -851,8 +851,7 @@ static sector_t ext2_bmap(struct address_space *mapping, sector_t block) } static ssize_t -ext2_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, - loff_t offset) +ext2_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; @@ -861,12 +860,12 @@ ext2_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, ssize_t ret; if (IS_DAX(inode)) - ret = dax_do_io(rw, iocb, inode, iter, offset, ext2_get_block, - NULL, DIO_LOCKING); + ret = dax_do_io(iocb, inode, iter, offset, ext2_get_block, NULL, + DIO_LOCKING); else - ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, + ret = blockdev_direct_IO(iocb, inode, iter, offset, ext2_get_block); - if (ret < 0 && (rw & WRITE)) + if (ret < 0 && iov_iter_rw(iter) == WRITE) ext2_write_failed(mapping, offset + count); return ret; } @@ -1388,10 +1387,7 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino) if (S_ISREG(inode->i_mode)) { inode->i_op = &ext2_file_inode_operations; - if (test_opt(inode->i_sb, DAX)) { - inode->i_mapping->a_ops = &ext2_aops; - inode->i_fop = &ext2_dax_file_operations; - } else if (test_opt(inode->i_sb, NOBH)) { + if (test_opt(inode->i_sb, NOBH)) { inode->i_mapping->a_ops = &ext2_nobh_aops; inode->i_fop = &ext2_file_operations; } else { @@ -1548,7 +1544,7 @@ int ext2_write_inode(struct inode *inode, struct writeback_control *wbc) int ext2_setattr(struct dentry *dentry, struct iattr *iattr) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); int error; error = inode_change_ok(inode, iattr); diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c index 148f6e3..3e074a9 100644 --- a/fs/ext2/namei.c +++ b/fs/ext2/namei.c @@ -79,10 +79,10 @@ static struct dentry *ext2_lookup(struct inode * dir, struct dentry *dentry, uns struct dentry *ext2_get_parent(struct dentry *child) { struct qstr dotdot = QSTR_INIT("..", 2); - unsigned long ino = ext2_inode_by_name(child->d_inode, &dotdot); + unsigned long ino = ext2_inode_by_name(d_inode(child), &dotdot); if (!ino) return ERR_PTR(-ENOENT); - return d_obtain_alias(ext2_iget(child->d_inode->i_sb, ino)); + return d_obtain_alias(ext2_iget(d_inode(child)->i_sb, ino)); } /* @@ -104,10 +104,7 @@ static int ext2_create (struct inode * dir, struct dentry * dentry, umode_t mode return PTR_ERR(inode); inode->i_op = &ext2_file_inode_operations; - if (test_opt(inode->i_sb, DAX)) { - inode->i_mapping->a_ops = &ext2_aops; - inode->i_fop = &ext2_dax_file_operations; - } else if (test_opt(inode->i_sb, NOBH)) { + if (test_opt(inode->i_sb, NOBH)) { inode->i_mapping->a_ops = &ext2_nobh_aops; inode->i_fop = &ext2_file_operations; } else { @@ -125,10 +122,7 @@ static int ext2_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) return PTR_ERR(inode); inode->i_op = &ext2_file_inode_operations; - if (test_opt(inode->i_sb, DAX)) { - inode->i_mapping->a_ops = &ext2_aops; - inode->i_fop = &ext2_dax_file_operations; - } else if (test_opt(inode->i_sb, NOBH)) { + if (test_opt(inode->i_sb, NOBH)) { inode->i_mapping->a_ops = &ext2_nobh_aops; inode->i_fop = &ext2_file_operations; } else { @@ -214,7 +208,7 @@ out_fail: static int ext2_link (struct dentry * old_dentry, struct inode * dir, struct dentry *dentry) { - struct inode *inode = old_dentry->d_inode; + struct inode *inode = d_inode(old_dentry); int err; dquot_initialize(dir); @@ -281,7 +275,7 @@ out_dir: static int ext2_unlink(struct inode * dir, struct dentry *dentry) { - struct inode * inode = dentry->d_inode; + struct inode * inode = d_inode(dentry); struct ext2_dir_entry_2 * de; struct page * page; int err = -ENOENT; @@ -305,7 +299,7 @@ out: static int ext2_rmdir (struct inode * dir, struct dentry *dentry) { - struct inode * inode = dentry->d_inode; + struct inode * inode = d_inode(dentry); int err = -ENOTEMPTY; if (ext2_empty_dir(inode)) { @@ -322,8 +316,8 @@ static int ext2_rmdir (struct inode * dir, struct dentry *dentry) static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry, struct inode * new_dir, struct dentry * new_dentry ) { - struct inode * old_inode = old_dentry->d_inode; - struct inode * new_inode = new_dentry->d_inode; + struct inode * old_inode = d_inode(old_dentry); + struct inode * new_inode = d_inode(new_dentry); struct page * dir_page = NULL; struct ext2_dir_entry_2 * dir_de = NULL; struct page * old_page; diff --git a/fs/ext2/symlink.c b/fs/ext2/symlink.c index 565cf81..20608f1 100644 --- a/fs/ext2/symlink.c +++ b/fs/ext2/symlink.c @@ -23,7 +23,7 @@ static void *ext2_follow_link(struct dentry *dentry, struct nameidata *nd) { - struct ext2_inode_info *ei = EXT2_I(dentry->d_inode); + struct ext2_inode_info *ei = EXT2_I(d_inode(dentry)); nd_set_link(nd, (char *)ei->i_data); return NULL; } diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c index 9142614..0b6bfd3 100644 --- a/fs/ext2/xattr.c +++ b/fs/ext2/xattr.c @@ -243,7 +243,7 @@ cleanup: static int ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct buffer_head *bh = NULL; struct ext2_xattr_entry *entry; char *end; @@ -319,7 +319,7 @@ cleanup: /* * Inode operation listxattr() * - * dentry->d_inode->i_mutex: don't care + * d_inode(dentry)->i_mutex: don't care */ ssize_t ext2_listxattr(struct dentry *dentry, char *buffer, size_t size) diff --git a/fs/ext2/xattr_security.c b/fs/ext2/xattr_security.c index c0ebc4d..702fc68 100644 --- a/fs/ext2/xattr_security.c +++ b/fs/ext2/xattr_security.c @@ -28,7 +28,7 @@ ext2_xattr_security_get(struct dentry *dentry, const char *name, { if (strcmp(name, "") == 0) return -EINVAL; - return ext2_xattr_get(dentry->d_inode, EXT2_XATTR_INDEX_SECURITY, name, + return ext2_xattr_get(d_inode(dentry), EXT2_XATTR_INDEX_SECURITY, name, buffer, size); } @@ -38,7 +38,7 @@ ext2_xattr_security_set(struct dentry *dentry, const char *name, { if (strcmp(name, "") == 0) return -EINVAL; - return ext2_xattr_set(dentry->d_inode, EXT2_XATTR_INDEX_SECURITY, name, + return ext2_xattr_set(d_inode(dentry), EXT2_XATTR_INDEX_SECURITY, name, value, size, flags); } diff --git a/fs/ext2/xattr_trusted.c b/fs/ext2/xattr_trusted.c index 7e19257..42b6e98 100644 --- a/fs/ext2/xattr_trusted.c +++ b/fs/ext2/xattr_trusted.c @@ -32,7 +32,7 @@ ext2_xattr_trusted_get(struct dentry *dentry, const char *name, { if (strcmp(name, "") == 0) return -EINVAL; - return ext2_xattr_get(dentry->d_inode, EXT2_XATTR_INDEX_TRUSTED, name, + return ext2_xattr_get(d_inode(dentry), EXT2_XATTR_INDEX_TRUSTED, name, buffer, size); } @@ -42,7 +42,7 @@ ext2_xattr_trusted_set(struct dentry *dentry, const char *name, { if (strcmp(name, "") == 0) return -EINVAL; - return ext2_xattr_set(dentry->d_inode, EXT2_XATTR_INDEX_TRUSTED, name, + return ext2_xattr_set(d_inode(dentry), EXT2_XATTR_INDEX_TRUSTED, name, value, size, flags); } diff --git a/fs/ext2/xattr_user.c b/fs/ext2/xattr_user.c index f470e44..ecdc460 100644 --- a/fs/ext2/xattr_user.c +++ b/fs/ext2/xattr_user.c @@ -36,7 +36,7 @@ ext2_xattr_user_get(struct dentry *dentry, const char *name, return -EINVAL; if (!test_opt(dentry->d_sb, XATTR_USER)) return -EOPNOTSUPP; - return ext2_xattr_get(dentry->d_inode, EXT2_XATTR_INDEX_USER, + return ext2_xattr_get(d_inode(dentry), EXT2_XATTR_INDEX_USER, name, buffer, size); } @@ -49,7 +49,7 @@ ext2_xattr_user_set(struct dentry *dentry, const char *name, if (!test_opt(dentry->d_sb, XATTR_USER)) return -EOPNOTSUPP; - return ext2_xattr_set(dentry->d_inode, EXT2_XATTR_INDEX_USER, + return ext2_xattr_set(d_inode(dentry), EXT2_XATTR_INDEX_USER, name, value, size, flags); } diff --git a/fs/ext3/file.c b/fs/ext3/file.c index a062fa1..3b8f650 100644 --- a/fs/ext3/file.c +++ b/fs/ext3/file.c @@ -50,8 +50,6 @@ static int ext3_release_file (struct inode * inode, struct file * filp) const struct file_operations ext3_file_operations = { .llseek = generic_file_llseek, - .read = new_sync_read, - .write = new_sync_write, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, .unlocked_ioctl = ext3_ioctl, diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c index a1b8102..3ad242e 100644 --- a/fs/ext3/ialloc.c +++ b/fs/ext3/ialloc.c @@ -210,7 +210,7 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent) avefreeb = freeb / ngroups; ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter); - if ((parent == sb->s_root->d_inode) || + if ((parent == d_inode(sb->s_root)) || (EXT3_I(parent)->i_flags & EXT3_TOPDIR_FL)) { int best_ndir = inodes_per_group; int best_group = -1; diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index 2c6ccc4..2ee2dc4 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c @@ -27,7 +27,7 @@ #include <linux/writeback.h> #include <linux/mpage.h> #include <linux/namei.h> -#include <linux/aio.h> +#include <linux/uio.h> #include "ext3.h" #include "xattr.h" #include "acl.h" @@ -1820,8 +1820,8 @@ static int ext3_releasepage(struct page *page, gfp_t wait) * crashes then stale disk data _may_ be exposed inside the file. But current * VFS code falls back into buffered path in that case so we are safe. */ -static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb, - struct iov_iter *iter, loff_t offset) +static ssize_t ext3_direct_IO(struct kiocb *iocb, struct iov_iter *iter, + loff_t offset) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; @@ -1832,9 +1832,9 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb, size_t count = iov_iter_count(iter); int retries = 0; - trace_ext3_direct_IO_enter(inode, offset, count, rw); + trace_ext3_direct_IO_enter(inode, offset, count, iov_iter_rw(iter)); - if (rw == WRITE) { + if (iov_iter_rw(iter) == WRITE) { loff_t final_size = offset + count; if (final_size > inode->i_size) { @@ -1856,12 +1856,12 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb, } retry: - ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, ext3_get_block); + ret = blockdev_direct_IO(iocb, inode, iter, offset, ext3_get_block); /* * In case of error extending write may have instantiated a few * blocks outside i_size. Trim these off again. */ - if (unlikely((rw & WRITE) && ret < 0)) { + if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) { loff_t isize = i_size_read(inode); loff_t end = offset + count; @@ -1908,7 +1908,7 @@ retry: ret = err; } out: - trace_ext3_direct_IO_exit(inode, offset, count, rw, ret); + trace_ext3_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), ret); return ret; } @@ -3240,7 +3240,7 @@ int ext3_write_inode(struct inode *inode, struct writeback_control *wbc) */ int ext3_setattr(struct dentry *dentry, struct iattr *attr) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); int error, rc = 0; const unsigned int ia_valid = attr->ia_valid; diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c index f197736..4264b9b 100644 --- a/fs/ext3/namei.c +++ b/fs/ext3/namei.c @@ -1049,19 +1049,19 @@ struct dentry *ext3_get_parent(struct dentry *child) struct ext3_dir_entry_2 * de; struct buffer_head *bh; - bh = ext3_find_entry(child->d_inode, &dotdot, &de); + bh = ext3_find_entry(d_inode(child), &dotdot, &de); if (!bh) return ERR_PTR(-ENOENT); ino = le32_to_cpu(de->inode); brelse(bh); - if (!ext3_valid_inum(child->d_inode->i_sb, ino)) { - ext3_error(child->d_inode->i_sb, "ext3_get_parent", + if (!ext3_valid_inum(d_inode(child)->i_sb, ino)) { + ext3_error(d_inode(child)->i_sb, "ext3_get_parent", "bad inode number: %lu", ino); return ERR_PTR(-EIO); } - return d_obtain_alias(ext3_iget(child->d_inode->i_sb, ino)); + return d_obtain_alias(ext3_iget(d_inode(child)->i_sb, ino)); } #define S_SHIFT 12 @@ -1243,7 +1243,7 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry, struct inode *inode, struct ext3_dir_entry_2 *de, struct buffer_head * bh) { - struct inode *dir = dentry->d_parent->d_inode; + struct inode *dir = d_inode(dentry->d_parent); const char *name = dentry->d_name.name; int namelen = dentry->d_name.len; unsigned long offset = 0; @@ -1330,7 +1330,7 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry, static int make_indexed_dir(handle_t *handle, struct dentry *dentry, struct inode *inode, struct buffer_head *bh) { - struct inode *dir = dentry->d_parent->d_inode; + struct inode *dir = d_inode(dentry->d_parent); const char *name = dentry->d_name.name; int namelen = dentry->d_name.len; struct buffer_head *bh2; @@ -1435,7 +1435,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry, static int ext3_add_entry (handle_t *handle, struct dentry *dentry, struct inode *inode) { - struct inode *dir = dentry->d_parent->d_inode; + struct inode *dir = d_inode(dentry->d_parent); struct buffer_head * bh; struct ext3_dir_entry_2 *de; struct super_block * sb; @@ -1489,7 +1489,7 @@ static int ext3_dx_add_entry(handle_t *handle, struct dentry *dentry, struct dx_entry *entries, *at; struct dx_hash_info hinfo; struct buffer_head * bh; - struct inode *dir = dentry->d_parent->d_inode; + struct inode *dir = d_inode(dentry->d_parent); struct super_block * sb = dir->i_sb; struct ext3_dir_entry_2 *de; int err; @@ -2111,7 +2111,7 @@ static int ext3_rmdir (struct inode * dir, struct dentry *dentry) /* Initialize quotas before so that eventual writes go in * separate transaction */ dquot_initialize(dir); - dquot_initialize(dentry->d_inode); + dquot_initialize(d_inode(dentry)); handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb)); if (IS_ERR(handle)) @@ -2125,7 +2125,7 @@ static int ext3_rmdir (struct inode * dir, struct dentry *dentry) if (IS_DIRSYNC(dir)) handle->h_sync = 1; - inode = dentry->d_inode; + inode = d_inode(dentry); retval = -EIO; if (le32_to_cpu(de->inode) != inode->i_ino) @@ -2173,7 +2173,7 @@ static int ext3_unlink(struct inode * dir, struct dentry *dentry) /* Initialize quotas before so that eventual writes go * in separate transaction */ dquot_initialize(dir); - dquot_initialize(dentry->d_inode); + dquot_initialize(d_inode(dentry)); handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb)); if (IS_ERR(handle)) @@ -2187,7 +2187,7 @@ static int ext3_unlink(struct inode * dir, struct dentry *dentry) if (!bh) goto end_unlink; - inode = dentry->d_inode; + inode = d_inode(dentry); retval = -EIO; if (le32_to_cpu(de->inode) != inode->i_ino) @@ -2328,7 +2328,7 @@ static int ext3_link (struct dentry * old_dentry, struct inode * dir, struct dentry *dentry) { handle_t *handle; - struct inode *inode = old_dentry->d_inode; + struct inode *inode = d_inode(old_dentry); int err, retries = 0; if (inode->i_nlink >= EXT3_LINK_MAX) @@ -2391,8 +2391,8 @@ static int ext3_rename (struct inode * old_dir, struct dentry *old_dentry, /* Initialize quotas before so that eventual writes go * in separate transaction */ - if (new_dentry->d_inode) - dquot_initialize(new_dentry->d_inode); + if (d_really_is_positive(new_dentry)) + dquot_initialize(d_inode(new_dentry)); handle = ext3_journal_start(old_dir, 2 * EXT3_DATA_TRANS_BLOCKS(old_dir->i_sb) + EXT3_INDEX_EXTRA_TRANS_BLOCKS + 2); @@ -2409,12 +2409,12 @@ static int ext3_rename (struct inode * old_dir, struct dentry *old_dentry, * and merrily kill the link to whatever was created under the * same name. Goodbye sticky bit ;-< */ - old_inode = old_dentry->d_inode; + old_inode = d_inode(old_dentry); retval = -ENOENT; if (!old_bh || le32_to_cpu(old_de->inode) != old_inode->i_ino) goto end_rename; - new_inode = new_dentry->d_inode; + new_inode = d_inode(new_dentry); new_bh = ext3_find_entry(new_dir, &new_dentry->d_name, &new_de); if (new_bh) { if (!new_inode) { diff --git a/fs/ext3/super.c b/fs/ext3/super.c index d4dbf3c..a9312f0 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -789,7 +789,7 @@ static const struct quotactl_ops ext3_qctl_operations = { .quota_on = ext3_quota_on, .quota_off = dquot_quota_off, .quota_sync = dquot_quota_sync, - .get_info = dquot_get_dqinfo, + .get_state = dquot_get_state, .set_info = dquot_set_dqinfo, .get_dqblk = dquot_get_dqblk, .set_dqblk = dquot_set_dqblk @@ -1170,7 +1170,7 @@ static int parse_options (char *options, struct super_block *sb, return 0; } - journal_inode = path.dentry->d_inode; + journal_inode = d_inode(path.dentry); if (!S_ISBLK(journal_inode->i_mode)) { ext3_msg(sb, KERN_ERR, "error: journal path %s " "is not a block device", journal_path); @@ -2947,7 +2947,7 @@ static int ext3_write_info(struct super_block *sb, int type) handle_t *handle; /* Data block + inode block */ - handle = ext3_journal_start(sb->s_root->d_inode, 2); + handle = ext3_journal_start(d_inode(sb->s_root), 2); if (IS_ERR(handle)) return PTR_ERR(handle); ret = dquot_commit_info(sb, type); @@ -2994,7 +2994,7 @@ static int ext3_quota_on(struct super_block *sb, int type, int format_id, * When we journal data on quota file, we have to flush journal to see * all updates to the file when we bypass pagecache... */ - if (ext3_should_journal_data(path->dentry->d_inode)) { + if (ext3_should_journal_data(d_inode(path->dentry))) { /* * We don't need to lock updates but journal_flush() could * otherwise be livelocked... diff --git a/fs/ext3/symlink.c b/fs/ext3/symlink.c index 6b01c3e..ea96df3 100644 --- a/fs/ext3/symlink.c +++ b/fs/ext3/symlink.c @@ -23,7 +23,7 @@ static void * ext3_follow_link(struct dentry *dentry, struct nameidata *nd) { - struct ext3_inode_info *ei = EXT3_I(dentry->d_inode); + struct ext3_inode_info *ei = EXT3_I(d_inode(dentry)); nd_set_link(nd, (char*)ei->i_data); return NULL; } diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c index c6874be..7cf3650 100644 --- a/fs/ext3/xattr.c +++ b/fs/ext3/xattr.c @@ -137,7 +137,7 @@ ext3_xattr_handler(int name_index) /* * Inode operation listxattr() * - * dentry->d_inode->i_mutex: don't care + * d_inode(dentry)->i_mutex: don't care */ ssize_t ext3_listxattr(struct dentry *dentry, char *buffer, size_t size) @@ -355,7 +355,7 @@ ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry, static int ext3_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct buffer_head *bh = NULL; int error; @@ -391,7 +391,7 @@ cleanup: static int ext3_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct ext3_xattr_ibody_header *header; struct ext3_inode *raw_inode; struct ext3_iloc iloc; @@ -432,7 +432,7 @@ ext3_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size) { int i_error, b_error; - down_read(&EXT3_I(dentry->d_inode)->xattr_sem); + down_read(&EXT3_I(d_inode(dentry))->xattr_sem); i_error = ext3_xattr_ibody_list(dentry, buffer, buffer_size); if (i_error < 0) { b_error = 0; @@ -445,7 +445,7 @@ ext3_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size) if (b_error < 0) i_error = 0; } - up_read(&EXT3_I(dentry->d_inode)->xattr_sem); + up_read(&EXT3_I(d_inode(dentry))->xattr_sem); return i_error + b_error; } @@ -546,8 +546,7 @@ ext3_xattr_set_entry(struct ext3_xattr_info *i, struct ext3_xattr_search *s) free += EXT3_XATTR_LEN(name_len); } if (i->value) { - if (free < EXT3_XATTR_SIZE(i->value_len) || - free < EXT3_XATTR_LEN(name_len) + + if (free < EXT3_XATTR_LEN(name_len) + EXT3_XATTR_SIZE(i->value_len)) return -ENOSPC; } diff --git a/fs/ext3/xattr_security.c b/fs/ext3/xattr_security.c index 722c2bf..c9506d5 100644 --- a/fs/ext3/xattr_security.c +++ b/fs/ext3/xattr_security.c @@ -29,7 +29,7 @@ ext3_xattr_security_get(struct dentry *dentry, const char *name, { if (strcmp(name, "") == 0) return -EINVAL; - return ext3_xattr_get(dentry->d_inode, EXT3_XATTR_INDEX_SECURITY, + return ext3_xattr_get(d_inode(dentry), EXT3_XATTR_INDEX_SECURITY, name, buffer, size); } @@ -39,7 +39,7 @@ ext3_xattr_security_set(struct dentry *dentry, const char *name, { if (strcmp(name, "") == 0) return -EINVAL; - return ext3_xattr_set(dentry->d_inode, EXT3_XATTR_INDEX_SECURITY, + return ext3_xattr_set(d_inode(dentry), EXT3_XATTR_INDEX_SECURITY, name, value, size, flags); } diff --git a/fs/ext3/xattr_trusted.c b/fs/ext3/xattr_trusted.c index d75727c..206cc66 100644 --- a/fs/ext3/xattr_trusted.c +++ b/fs/ext3/xattr_trusted.c @@ -32,7 +32,7 @@ ext3_xattr_trusted_get(struct dentry *dentry, const char *name, { if (strcmp(name, "") == 0) return -EINVAL; - return ext3_xattr_get(dentry->d_inode, EXT3_XATTR_INDEX_TRUSTED, + return ext3_xattr_get(d_inode(dentry), EXT3_XATTR_INDEX_TRUSTED, name, buffer, size); } @@ -42,7 +42,7 @@ ext3_xattr_trusted_set(struct dentry *dentry, const char *name, { if (strcmp(name, "") == 0) return -EINVAL; - return ext3_xattr_set(dentry->d_inode, EXT3_XATTR_INDEX_TRUSTED, name, + return ext3_xattr_set(d_inode(dentry), EXT3_XATTR_INDEX_TRUSTED, name, value, size, flags); } diff --git a/fs/ext3/xattr_user.c b/fs/ext3/xattr_user.c index 5612af3..021508a 100644 --- a/fs/ext3/xattr_user.c +++ b/fs/ext3/xattr_user.c @@ -34,7 +34,7 @@ ext3_xattr_user_get(struct dentry *dentry, const char *name, void *buffer, return -EINVAL; if (!test_opt(dentry->d_sb, XATTR_USER)) return -EOPNOTSUPP; - return ext3_xattr_get(dentry->d_inode, EXT3_XATTR_INDEX_USER, + return ext3_xattr_get(d_inode(dentry), EXT3_XATTR_INDEX_USER, name, buffer, size); } @@ -46,7 +46,7 @@ ext3_xattr_user_set(struct dentry *dentry, const char *name, return -EINVAL; if (!test_opt(dentry->d_sb, XATTR_USER)) return -EOPNOTSUPP; - return ext3_xattr_set(dentry->d_inode, EXT3_XATTR_INDEX_USER, + return ext3_xattr_set(d_inode(dentry), EXT3_XATTR_INDEX_USER, name, value, size, flags); } diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig index efea5d5..18228c2 100644 --- a/fs/ext4/Kconfig +++ b/fs/ext4/Kconfig @@ -64,6 +64,23 @@ config EXT4_FS_SECURITY If you are not using a security module that requires using extended attributes for file security labels, say N. +config EXT4_FS_ENCRYPTION + bool "Ext4 Encryption" + depends on EXT4_FS + select CRYPTO_AES + select CRYPTO_CBC + select CRYPTO_ECB + select CRYPTO_XTS + select CRYPTO_CTS + select CRYPTO_SHA256 + select KEYS + select ENCRYPTED_KEYS + help + Enable encryption of ext4 files and directories. This + feature is similar to ecryptfs, but it is more memory + efficient since it avoids caching the encrypted and + decrypted pages in the page cache. + config EXT4_DEBUG bool "EXT4 debugging support" depends on EXT4_FS diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile index 0310fec..75285ea 100644 --- a/fs/ext4/Makefile +++ b/fs/ext4/Makefile @@ -8,7 +8,9 @@ ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o page-io.o \ ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \ ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \ mmp.o indirect.o extents_status.o xattr.o xattr_user.o \ - xattr_trusted.o inline.o + xattr_trusted.o inline.o readpage.o ext4-$(CONFIG_EXT4_FS_POSIX_ACL) += acl.o ext4-$(CONFIG_EXT4_FS_SECURITY) += xattr_security.o +ext4-$(CONFIG_EXT4_FS_ENCRYPTION) += crypto_policy.o crypto.o \ + crypto_key.o crypto_fname.o diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c index d40c8db..69b1e73 100644 --- a/fs/ext4/acl.c +++ b/fs/ext4/acl.c @@ -4,11 +4,6 @@ * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de> */ -#include <linux/init.h> -#include <linux/sched.h> -#include <linux/slab.h> -#include <linux/capability.h> -#include <linux/fs.h> #include "ext4_jbd2.h" #include "ext4.h" #include "xattr.h" diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index 83a6f49..955bf49 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -14,7 +14,6 @@ #include <linux/time.h> #include <linux/capability.h> #include <linux/fs.h> -#include <linux/jbd2.h> #include <linux/quotaops.h> #include <linux/buffer_head.h> #include "ext4.h" @@ -641,8 +640,6 @@ ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode, * fail EDQUOT for metdata, but we do account for it. */ if (!(*errp) && (flags & EXT4_MB_DELALLOC_RESERVED)) { - spin_lock(&EXT4_I(inode)->i_block_reservation_lock); - spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); dquot_alloc_block_nofail(inode, EXT4_C2B(EXT4_SB(inode->i_sb), ar.len)); } diff --git a/fs/ext4/bitmap.c b/fs/ext4/bitmap.c index b610779..4a606afb 100644 --- a/fs/ext4/bitmap.c +++ b/fs/ext4/bitmap.c @@ -8,7 +8,6 @@ */ #include <linux/buffer_head.h> -#include <linux/jbd2.h> #include "ext4.h" unsigned int ext4_count_free(char *bitmap, unsigned int numchars) diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c index 41eb9dc..3522340 100644 --- a/fs/ext4/block_validity.c +++ b/fs/ext4/block_validity.c @@ -16,7 +16,6 @@ #include <linux/swap.h> #include <linux/pagemap.h> #include <linux/blkdev.h> -#include <linux/mutex.h> #include <linux/slab.h> #include "ext4.h" diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c new file mode 100644 index 0000000..8ff1527 --- /dev/null +++ b/fs/ext4/crypto.c @@ -0,0 +1,558 @@ +/* + * linux/fs/ext4/crypto.c + * + * Copyright (C) 2015, Google, Inc. + * + * This contains encryption functions for ext4 + * + * Written by Michael Halcrow, 2014. + * + * Filename encryption additions + * Uday Savagaonkar, 2014 + * Encryption policy handling additions + * Ildar Muslukhov, 2014 + * + * This has not yet undergone a rigorous security audit. + * + * The usage of AES-XTS should conform to recommendations in NIST + * Special Publication 800-38E and IEEE P1619/D16. + */ + +#include <crypto/hash.h> +#include <crypto/sha.h> +#include <keys/user-type.h> +#include <keys/encrypted-type.h> +#include <linux/crypto.h> +#include <linux/ecryptfs.h> +#include <linux/gfp.h> +#include <linux/kernel.h> +#include <linux/key.h> +#include <linux/list.h> +#include <linux/mempool.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/random.h> +#include <linux/scatterlist.h> +#include <linux/spinlock_types.h> + +#include "ext4_extents.h" +#include "xattr.h" + +/* Encryption added and removed here! (L: */ + +static unsigned int num_prealloc_crypto_pages = 32; +static unsigned int num_prealloc_crypto_ctxs = 128; + +module_param(num_prealloc_crypto_pages, uint, 0444); +MODULE_PARM_DESC(num_prealloc_crypto_pages, + "Number of crypto pages to preallocate"); +module_param(num_prealloc_crypto_ctxs, uint, 0444); +MODULE_PARM_DESC(num_prealloc_crypto_ctxs, + "Number of crypto contexts to preallocate"); + +static mempool_t *ext4_bounce_page_pool; + +static LIST_HEAD(ext4_free_crypto_ctxs); +static DEFINE_SPINLOCK(ext4_crypto_ctx_lock); + +/** + * ext4_release_crypto_ctx() - Releases an encryption context + * @ctx: The encryption context to release. + * + * If the encryption context was allocated from the pre-allocated pool, returns + * it to that pool. Else, frees it. + * + * If there's a bounce page in the context, this frees that. + */ +void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx) +{ + unsigned long flags; + + if (ctx->bounce_page) { + if (ctx->flags & EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL) + __free_page(ctx->bounce_page); + else + mempool_free(ctx->bounce_page, ext4_bounce_page_pool); + ctx->bounce_page = NULL; + } + ctx->control_page = NULL; + if (ctx->flags & EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL) { + if (ctx->tfm) + crypto_free_tfm(ctx->tfm); + kfree(ctx); + } else { + spin_lock_irqsave(&ext4_crypto_ctx_lock, flags); + list_add(&ctx->free_list, &ext4_free_crypto_ctxs); + spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags); + } +} + +/** + * ext4_alloc_and_init_crypto_ctx() - Allocates and inits an encryption context + * @mask: The allocation mask. + * + * Return: An allocated and initialized encryption context on success. An error + * value or NULL otherwise. + */ +static struct ext4_crypto_ctx *ext4_alloc_and_init_crypto_ctx(gfp_t mask) +{ + struct ext4_crypto_ctx *ctx = kzalloc(sizeof(struct ext4_crypto_ctx), + mask); + + if (!ctx) + return ERR_PTR(-ENOMEM); + return ctx; +} + +/** + * ext4_get_crypto_ctx() - Gets an encryption context + * @inode: The inode for which we are doing the crypto + * + * Allocates and initializes an encryption context. + * + * Return: An allocated and initialized encryption context on success; error + * value or NULL otherwise. + */ +struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode) +{ + struct ext4_crypto_ctx *ctx = NULL; + int res = 0; + unsigned long flags; + struct ext4_encryption_key *key = &EXT4_I(inode)->i_encryption_key; + + if (!ext4_read_workqueue) + ext4_init_crypto(); + + /* + * We first try getting the ctx from a free list because in + * the common case the ctx will have an allocated and + * initialized crypto tfm, so it's probably a worthwhile + * optimization. For the bounce page, we first try getting it + * from the kernel allocator because that's just about as fast + * as getting it from a list and because a cache of free pages + * should generally be a "last resort" option for a filesystem + * to be able to do its job. + */ + spin_lock_irqsave(&ext4_crypto_ctx_lock, flags); + ctx = list_first_entry_or_null(&ext4_free_crypto_ctxs, + struct ext4_crypto_ctx, free_list); + if (ctx) + list_del(&ctx->free_list); + spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags); + if (!ctx) { + ctx = ext4_alloc_and_init_crypto_ctx(GFP_NOFS); + if (IS_ERR(ctx)) { + res = PTR_ERR(ctx); + goto out; + } + ctx->flags |= EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL; + } else { + ctx->flags &= ~EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL; + } + + /* Allocate a new Crypto API context if we don't already have + * one or if it isn't the right mode. */ + BUG_ON(key->mode == EXT4_ENCRYPTION_MODE_INVALID); + if (ctx->tfm && (ctx->mode != key->mode)) { + crypto_free_tfm(ctx->tfm); + ctx->tfm = NULL; + ctx->mode = EXT4_ENCRYPTION_MODE_INVALID; + } + if (!ctx->tfm) { + switch (key->mode) { + case EXT4_ENCRYPTION_MODE_AES_256_XTS: + ctx->tfm = crypto_ablkcipher_tfm( + crypto_alloc_ablkcipher("xts(aes)", 0, 0)); + break; + case EXT4_ENCRYPTION_MODE_AES_256_GCM: + /* TODO(mhalcrow): AEAD w/ gcm(aes); + * crypto_aead_setauthsize() */ + ctx->tfm = ERR_PTR(-ENOTSUPP); + break; + default: + BUG(); + } + if (IS_ERR_OR_NULL(ctx->tfm)) { + res = PTR_ERR(ctx->tfm); + ctx->tfm = NULL; + goto out; + } + ctx->mode = key->mode; + } + BUG_ON(key->size != ext4_encryption_key_size(key->mode)); + + /* There shouldn't be a bounce page attached to the crypto + * context at this point. */ + BUG_ON(ctx->bounce_page); + +out: + if (res) { + if (!IS_ERR_OR_NULL(ctx)) + ext4_release_crypto_ctx(ctx); + ctx = ERR_PTR(res); + } + return ctx; +} + +struct workqueue_struct *ext4_read_workqueue; +static DEFINE_MUTEX(crypto_init); + +/** + * ext4_exit_crypto() - Shutdown the ext4 encryption system + */ +void ext4_exit_crypto(void) +{ + struct ext4_crypto_ctx *pos, *n; + + list_for_each_entry_safe(pos, n, &ext4_free_crypto_ctxs, free_list) { + if (pos->bounce_page) { + if (pos->flags & + EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL) { + __free_page(pos->bounce_page); + } else { + mempool_free(pos->bounce_page, + ext4_bounce_page_pool); + } + } + if (pos->tfm) + crypto_free_tfm(pos->tfm); + kfree(pos); + } + INIT_LIST_HEAD(&ext4_free_crypto_ctxs); + if (ext4_bounce_page_pool) + mempool_destroy(ext4_bounce_page_pool); + ext4_bounce_page_pool = NULL; + if (ext4_read_workqueue) + destroy_workqueue(ext4_read_workqueue); + ext4_read_workqueue = NULL; +} + +/** + * ext4_init_crypto() - Set up for ext4 encryption. + * + * We only call this when we start accessing encrypted files, since it + * results in memory getting allocated that wouldn't otherwise be used. + * + * Return: Zero on success, non-zero otherwise. + */ +int ext4_init_crypto(void) +{ + int i, res; + + mutex_lock(&crypto_init); + if (ext4_read_workqueue) + goto already_initialized; + ext4_read_workqueue = alloc_workqueue("ext4_crypto", WQ_HIGHPRI, 0); + if (!ext4_read_workqueue) { + res = -ENOMEM; + goto fail; + } + + for (i = 0; i < num_prealloc_crypto_ctxs; i++) { + struct ext4_crypto_ctx *ctx; + + ctx = ext4_alloc_and_init_crypto_ctx(GFP_KERNEL); + if (IS_ERR(ctx)) { + res = PTR_ERR(ctx); + goto fail; + } + list_add(&ctx->free_list, &ext4_free_crypto_ctxs); + } + + ext4_bounce_page_pool = + mempool_create_page_pool(num_prealloc_crypto_pages, 0); + if (!ext4_bounce_page_pool) { + res = -ENOMEM; + goto fail; + } +already_initialized: + mutex_unlock(&crypto_init); + return 0; +fail: + ext4_exit_crypto(); + mutex_unlock(&crypto_init); + return res; +} + +void ext4_restore_control_page(struct page *data_page) +{ + struct ext4_crypto_ctx *ctx = + (struct ext4_crypto_ctx *)page_private(data_page); + + set_page_private(data_page, (unsigned long)NULL); + ClearPagePrivate(data_page); + unlock_page(data_page); + ext4_release_crypto_ctx(ctx); +} + +/** + * ext4_crypt_complete() - The completion callback for page encryption + * @req: The asynchronous encryption request context + * @res: The result of the encryption operation + */ +static void ext4_crypt_complete(struct crypto_async_request *req, int res) +{ + struct ext4_completion_result *ecr = req->data; + + if (res == -EINPROGRESS) + return; + ecr->res = res; + complete(&ecr->completion); +} + +typedef enum { + EXT4_DECRYPT = 0, + EXT4_ENCRYPT, +} ext4_direction_t; + +static int ext4_page_crypto(struct ext4_crypto_ctx *ctx, + struct inode *inode, + ext4_direction_t rw, + pgoff_t index, + struct page *src_page, + struct page *dest_page) + +{ + u8 xts_tweak[EXT4_XTS_TWEAK_SIZE]; + struct ablkcipher_request *req = NULL; + DECLARE_EXT4_COMPLETION_RESULT(ecr); + struct scatterlist dst, src; + struct ext4_inode_info *ei = EXT4_I(inode); + struct crypto_ablkcipher *atfm = __crypto_ablkcipher_cast(ctx->tfm); + int res = 0; + + BUG_ON(!ctx->tfm); + BUG_ON(ctx->mode != ei->i_encryption_key.mode); + + if (ctx->mode != EXT4_ENCRYPTION_MODE_AES_256_XTS) { + printk_ratelimited(KERN_ERR + "%s: unsupported crypto algorithm: %d\n", + __func__, ctx->mode); + return -ENOTSUPP; + } + + crypto_ablkcipher_clear_flags(atfm, ~0); + crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_REQ_WEAK_KEY); + + res = crypto_ablkcipher_setkey(atfm, ei->i_encryption_key.raw, + ei->i_encryption_key.size); + if (res) { + printk_ratelimited(KERN_ERR + "%s: crypto_ablkcipher_setkey() failed\n", + __func__); + return res; + } + req = ablkcipher_request_alloc(atfm, GFP_NOFS); + if (!req) { + printk_ratelimited(KERN_ERR + "%s: crypto_request_alloc() failed\n", + __func__); + return -ENOMEM; + } + ablkcipher_request_set_callback( + req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, + ext4_crypt_complete, &ecr); + + BUILD_BUG_ON(EXT4_XTS_TWEAK_SIZE < sizeof(index)); + memcpy(xts_tweak, &index, sizeof(index)); + memset(&xts_tweak[sizeof(index)], 0, + EXT4_XTS_TWEAK_SIZE - sizeof(index)); + + sg_init_table(&dst, 1); + sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0); + sg_init_table(&src, 1); + sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0); + ablkcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE, + xts_tweak); + if (rw == EXT4_DECRYPT) + res = crypto_ablkcipher_decrypt(req); + else + res = crypto_ablkcipher_encrypt(req); + if (res == -EINPROGRESS || res == -EBUSY) { + BUG_ON(req->base.data != &ecr); + wait_for_completion(&ecr.completion); + res = ecr.res; + } + ablkcipher_request_free(req); + if (res) { + printk_ratelimited( + KERN_ERR + "%s: crypto_ablkcipher_encrypt() returned %d\n", + __func__, res); + return res; + } + return 0; +} + +/** + * ext4_encrypt() - Encrypts a page + * @inode: The inode for which the encryption should take place + * @plaintext_page: The page to encrypt. Must be locked. + * + * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx + * encryption context. + * + * Called on the page write path. The caller must call + * ext4_restore_control_page() on the returned ciphertext page to + * release the bounce buffer and the encryption context. + * + * Return: An allocated page with the encrypted content on success. Else, an + * error value or NULL. + */ +struct page *ext4_encrypt(struct inode *inode, + struct page *plaintext_page) +{ + struct ext4_crypto_ctx *ctx; + struct page *ciphertext_page = NULL; + int err; + + BUG_ON(!PageLocked(plaintext_page)); + + ctx = ext4_get_crypto_ctx(inode); + if (IS_ERR(ctx)) + return (struct page *) ctx; + + /* The encryption operation will require a bounce page. */ + ciphertext_page = alloc_page(GFP_NOFS); + if (!ciphertext_page) { + /* This is a potential bottleneck, but at least we'll have + * forward progress. */ + ciphertext_page = mempool_alloc(ext4_bounce_page_pool, + GFP_NOFS); + if (WARN_ON_ONCE(!ciphertext_page)) { + ciphertext_page = mempool_alloc(ext4_bounce_page_pool, + GFP_NOFS | __GFP_WAIT); + } + ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL; + } else { + ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL; + } + ctx->bounce_page = ciphertext_page; + ctx->control_page = plaintext_page; + err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, plaintext_page->index, + plaintext_page, ciphertext_page); + if (err) { + ext4_release_crypto_ctx(ctx); + return ERR_PTR(err); + } + SetPagePrivate(ciphertext_page); + set_page_private(ciphertext_page, (unsigned long)ctx); + lock_page(ciphertext_page); + return ciphertext_page; +} + +/** + * ext4_decrypt() - Decrypts a page in-place + * @ctx: The encryption context. + * @page: The page to decrypt. Must be locked. + * + * Decrypts page in-place using the ctx encryption context. + * + * Called from the read completion callback. + * + * Return: Zero on success, non-zero otherwise. + */ +int ext4_decrypt(struct ext4_crypto_ctx *ctx, struct page *page) +{ + BUG_ON(!PageLocked(page)); + + return ext4_page_crypto(ctx, page->mapping->host, + EXT4_DECRYPT, page->index, page, page); +} + +/* + * Convenience function which takes care of allocating and + * deallocating the encryption context + */ +int ext4_decrypt_one(struct inode *inode, struct page *page) +{ + int ret; + + struct ext4_crypto_ctx *ctx = ext4_get_crypto_ctx(inode); + + if (!ctx) + return -ENOMEM; + ret = ext4_decrypt(ctx, page); + ext4_release_crypto_ctx(ctx); + return ret; +} + +int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex) +{ + struct ext4_crypto_ctx *ctx; + struct page *ciphertext_page = NULL; + struct bio *bio; + ext4_lblk_t lblk = ex->ee_block; + ext4_fsblk_t pblk = ext4_ext_pblock(ex); + unsigned int len = ext4_ext_get_actual_len(ex); + int err = 0; + + BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE); + + ctx = ext4_get_crypto_ctx(inode); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + ciphertext_page = alloc_page(GFP_NOFS); + if (!ciphertext_page) { + /* This is a potential bottleneck, but at least we'll have + * forward progress. */ + ciphertext_page = mempool_alloc(ext4_bounce_page_pool, + GFP_NOFS); + if (WARN_ON_ONCE(!ciphertext_page)) { + ciphertext_page = mempool_alloc(ext4_bounce_page_pool, + GFP_NOFS | __GFP_WAIT); + } + ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL; + } else { + ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL; + } + ctx->bounce_page = ciphertext_page; + + while (len--) { + err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, lblk, + ZERO_PAGE(0), ciphertext_page); + if (err) + goto errout; + + bio = bio_alloc(GFP_KERNEL, 1); + if (!bio) { + err = -ENOMEM; + goto errout; + } + bio->bi_bdev = inode->i_sb->s_bdev; + bio->bi_iter.bi_sector = pblk; + err = bio_add_page(bio, ciphertext_page, + inode->i_sb->s_blocksize, 0); + if (err) { + bio_put(bio); + goto errout; + } + err = submit_bio_wait(WRITE, bio); + if (err) + goto errout; + } + err = 0; +errout: + ext4_release_crypto_ctx(ctx); + return err; +} + +bool ext4_valid_contents_enc_mode(uint32_t mode) +{ + return (mode == EXT4_ENCRYPTION_MODE_AES_256_XTS); +} + +/** + * ext4_validate_encryption_key_size() - Validate the encryption key size + * @mode: The key mode. + * @size: The key size to validate. + * + * Return: The validated key size for @mode. Zero if invalid. + */ +uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size) +{ + if (size == ext4_encryption_key_size(mode)) + return size; + return 0; +} diff --git a/fs/ext4/crypto_fname.c b/fs/ext4/crypto_fname.c new file mode 100644 index 0000000..ca2f594 --- /dev/null +++ b/fs/ext4/crypto_fname.c @@ -0,0 +1,709 @@ +/* + * linux/fs/ext4/crypto_fname.c + * + * Copyright (C) 2015, Google, Inc. + * + * This contains functions for filename crypto management in ext4 + * + * Written by Uday Savagaonkar, 2014. + * + * This has not yet undergone a rigorous security audit. + * + */ + +#include <crypto/hash.h> +#include <crypto/sha.h> +#include <keys/encrypted-type.h> +#include <keys/user-type.h> +#include <linux/crypto.h> +#include <linux/gfp.h> +#include <linux/kernel.h> +#include <linux/key.h> +#include <linux/key.h> +#include <linux/list.h> +#include <linux/mempool.h> +#include <linux/random.h> +#include <linux/scatterlist.h> +#include <linux/spinlock_types.h> + +#include "ext4.h" +#include "ext4_crypto.h" +#include "xattr.h" + +/** + * ext4_dir_crypt_complete() - + */ +static void ext4_dir_crypt_complete(struct crypto_async_request *req, int res) +{ + struct ext4_completion_result *ecr = req->data; + + if (res == -EINPROGRESS) + return; + ecr->res = res; + complete(&ecr->completion); +} + +bool ext4_valid_filenames_enc_mode(uint32_t mode) +{ + return (mode == EXT4_ENCRYPTION_MODE_AES_256_CTS); +} + +/** + * ext4_fname_encrypt() - + * + * This function encrypts the input filename, and returns the length of the + * ciphertext. Errors are returned as negative numbers. We trust the caller to + * allocate sufficient memory to oname string. + */ +static int ext4_fname_encrypt(struct ext4_fname_crypto_ctx *ctx, + const struct qstr *iname, + struct ext4_str *oname) +{ + u32 ciphertext_len; + struct ablkcipher_request *req = NULL; + DECLARE_EXT4_COMPLETION_RESULT(ecr); + struct crypto_ablkcipher *tfm = ctx->ctfm; + int res = 0; + char iv[EXT4_CRYPTO_BLOCK_SIZE]; + struct scatterlist sg[1]; + char *workbuf; + + if (iname->len <= 0 || iname->len > ctx->lim) + return -EIO; + + ciphertext_len = (iname->len < EXT4_CRYPTO_BLOCK_SIZE) ? + EXT4_CRYPTO_BLOCK_SIZE : iname->len; + ciphertext_len = (ciphertext_len > ctx->lim) + ? ctx->lim : ciphertext_len; + + /* Allocate request */ + req = ablkcipher_request_alloc(tfm, GFP_NOFS); + if (!req) { + printk_ratelimited( + KERN_ERR "%s: crypto_request_alloc() failed\n", __func__); + return -ENOMEM; + } + ablkcipher_request_set_callback(req, + CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, + ext4_dir_crypt_complete, &ecr); + + /* Map the workpage */ + workbuf = kmap(ctx->workpage); + + /* Copy the input */ + memcpy(workbuf, iname->name, iname->len); + if (iname->len < ciphertext_len) + memset(workbuf + iname->len, 0, ciphertext_len - iname->len); + + /* Initialize IV */ + memset(iv, 0, EXT4_CRYPTO_BLOCK_SIZE); + + /* Create encryption request */ + sg_init_table(sg, 1); + sg_set_page(sg, ctx->workpage, PAGE_SIZE, 0); + ablkcipher_request_set_crypt(req, sg, sg, iname->len, iv); + res = crypto_ablkcipher_encrypt(req); + if (res == -EINPROGRESS || res == -EBUSY) { + BUG_ON(req->base.data != &ecr); + wait_for_completion(&ecr.completion); + res = ecr.res; + } + if (res >= 0) { + /* Copy the result to output */ + memcpy(oname->name, workbuf, ciphertext_len); + res = ciphertext_len; + } + kunmap(ctx->workpage); + ablkcipher_request_free(req); + if (res < 0) { + printk_ratelimited( + KERN_ERR "%s: Error (error code %d)\n", __func__, res); + } + oname->len = ciphertext_len; + return res; +} + +/* + * ext4_fname_decrypt() + * This function decrypts the input filename, and returns + * the length of the plaintext. + * Errors are returned as negative numbers. + * We trust the caller to allocate sufficient memory to oname string. + */ +static int ext4_fname_decrypt(struct ext4_fname_crypto_ctx *ctx, + const struct ext4_str *iname, + struct ext4_str *oname) +{ + struct ext4_str tmp_in[2], tmp_out[1]; + struct ablkcipher_request *req = NULL; + DECLARE_EXT4_COMPLETION_RESULT(ecr); + struct scatterlist sg[1]; + struct crypto_ablkcipher *tfm = ctx->ctfm; + int res = 0; + char iv[EXT4_CRYPTO_BLOCK_SIZE]; + char *workbuf; + + if (iname->len <= 0 || iname->len > ctx->lim) + return -EIO; + + tmp_in[0].name = iname->name; + tmp_in[0].len = iname->len; + tmp_out[0].name = oname->name; + + /* Allocate request */ + req = ablkcipher_request_alloc(tfm, GFP_NOFS); + if (!req) { + printk_ratelimited( + KERN_ERR "%s: crypto_request_alloc() failed\n", __func__); + return -ENOMEM; + } + ablkcipher_request_set_callback(req, + CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, + ext4_dir_crypt_complete, &ecr); + + /* Map the workpage */ + workbuf = kmap(ctx->workpage); + + /* Copy the input */ + memcpy(workbuf, iname->name, iname->len); + + /* Initialize IV */ + memset(iv, 0, EXT4_CRYPTO_BLOCK_SIZE); + + /* Create encryption request */ + sg_init_table(sg, 1); + sg_set_page(sg, ctx->workpage, PAGE_SIZE, 0); + ablkcipher_request_set_crypt(req, sg, sg, iname->len, iv); + res = crypto_ablkcipher_decrypt(req); + if (res == -EINPROGRESS || res == -EBUSY) { + BUG_ON(req->base.data != &ecr); + wait_for_completion(&ecr.completion); + res = ecr.res; + } + if (res >= 0) { + /* Copy the result to output */ + memcpy(oname->name, workbuf, iname->len); + res = iname->len; + } + kunmap(ctx->workpage); + ablkcipher_request_free(req); + if (res < 0) { + printk_ratelimited( + KERN_ERR "%s: Error in ext4_fname_encrypt (error code %d)\n", + __func__, res); + return res; + } + + oname->len = strnlen(oname->name, iname->len); + return oname->len; +} + +/** + * ext4_fname_encode_digest() - + * + * Encodes the input digest using characters from the set [a-zA-Z0-9_+]. + * The encoded string is roughly 4/3 times the size of the input string. + */ +int ext4_fname_encode_digest(char *dst, char *src, u32 len) +{ + static const char *lookup_table = + "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_+"; + u32 current_chunk, num_chunks, i; + char tmp_buf[3]; + u32 c0, c1, c2, c3; + + current_chunk = 0; + num_chunks = len/3; + for (i = 0; i < num_chunks; i++) { + c0 = src[3*i] & 0x3f; + c1 = (((src[3*i]>>6)&0x3) | ((src[3*i+1] & 0xf)<<2)) & 0x3f; + c2 = (((src[3*i+1]>>4)&0xf) | ((src[3*i+2] & 0x3)<<4)) & 0x3f; + c3 = (src[3*i+2]>>2) & 0x3f; + dst[4*i] = lookup_table[c0]; + dst[4*i+1] = lookup_table[c1]; + dst[4*i+2] = lookup_table[c2]; + dst[4*i+3] = lookup_table[c3]; + } + if (i*3 < len) { + memset(tmp_buf, 0, 3); + memcpy(tmp_buf, &src[3*i], len-3*i); + c0 = tmp_buf[0] & 0x3f; + c1 = (((tmp_buf[0]>>6)&0x3) | ((tmp_buf[1] & 0xf)<<2)) & 0x3f; + c2 = (((tmp_buf[1]>>4)&0xf) | ((tmp_buf[2] & 0x3)<<4)) & 0x3f; + c3 = (tmp_buf[2]>>2) & 0x3f; + dst[4*i] = lookup_table[c0]; + dst[4*i+1] = lookup_table[c1]; + dst[4*i+2] = lookup_table[c2]; + dst[4*i+3] = lookup_table[c3]; + i++; + } + return (i * 4); +} + +/** + * ext4_fname_hash() - + * + * This function computes the hash of the input filename, and sets the output + * buffer to the *encoded* digest. It returns the length of the digest as its + * return value. Errors are returned as negative numbers. We trust the caller + * to allocate sufficient memory to oname string. + */ +static int ext4_fname_hash(struct ext4_fname_crypto_ctx *ctx, + const struct ext4_str *iname, + struct ext4_str *oname) +{ + struct scatterlist sg; + struct hash_desc desc = { + .tfm = (struct crypto_hash *)ctx->htfm, + .flags = CRYPTO_TFM_REQ_MAY_SLEEP + }; + int res = 0; + + if (iname->len <= EXT4_FNAME_CRYPTO_DIGEST_SIZE) { + res = ext4_fname_encode_digest(oname->name, iname->name, + iname->len); + oname->len = res; + return res; + } + + sg_init_one(&sg, iname->name, iname->len); + res = crypto_hash_init(&desc); + if (res) { + printk(KERN_ERR + "%s: Error initializing crypto hash; res = [%d]\n", + __func__, res); + goto out; + } + res = crypto_hash_update(&desc, &sg, iname->len); + if (res) { + printk(KERN_ERR + "%s: Error updating crypto hash; res = [%d]\n", + __func__, res); + goto out; + } + res = crypto_hash_final(&desc, + &oname->name[EXT4_FNAME_CRYPTO_DIGEST_SIZE]); + if (res) { + printk(KERN_ERR + "%s: Error finalizing crypto hash; res = [%d]\n", + __func__, res); + goto out; + } + /* Encode the digest as a printable string--this will increase the + * size of the digest */ + oname->name[0] = 'I'; + res = ext4_fname_encode_digest(oname->name+1, + &oname->name[EXT4_FNAME_CRYPTO_DIGEST_SIZE], + EXT4_FNAME_CRYPTO_DIGEST_SIZE) + 1; + oname->len = res; +out: + return res; +} + +/** + * ext4_free_fname_crypto_ctx() - + * + * Frees up a crypto context. + */ +void ext4_free_fname_crypto_ctx(struct ext4_fname_crypto_ctx *ctx) +{ + if (ctx == NULL || IS_ERR(ctx)) + return; + + if (ctx->ctfm && !IS_ERR(ctx->ctfm)) + crypto_free_ablkcipher(ctx->ctfm); + if (ctx->htfm && !IS_ERR(ctx->htfm)) + crypto_free_hash(ctx->htfm); + if (ctx->workpage && !IS_ERR(ctx->workpage)) + __free_page(ctx->workpage); + kfree(ctx); +} + +/** + * ext4_put_fname_crypto_ctx() - + * + * Return: The crypto context onto free list. If the free list is above a + * threshold, completely frees up the context, and returns the memory. + * + * TODO: Currently we directly free the crypto context. Eventually we should + * add code it to return to free list. Such an approach will increase + * efficiency of directory lookup. + */ +void ext4_put_fname_crypto_ctx(struct ext4_fname_crypto_ctx **ctx) +{ + if (*ctx == NULL || IS_ERR(*ctx)) + return; + ext4_free_fname_crypto_ctx(*ctx); + *ctx = NULL; +} + +/** + * ext4_search_fname_crypto_ctx() - + */ +static struct ext4_fname_crypto_ctx *ext4_search_fname_crypto_ctx( + const struct ext4_encryption_key *key) +{ + return NULL; +} + +/** + * ext4_alloc_fname_crypto_ctx() - + */ +struct ext4_fname_crypto_ctx *ext4_alloc_fname_crypto_ctx( + const struct ext4_encryption_key *key) +{ + struct ext4_fname_crypto_ctx *ctx; + + ctx = kmalloc(sizeof(struct ext4_fname_crypto_ctx), GFP_NOFS); + if (ctx == NULL) + return ERR_PTR(-ENOMEM); + if (key->mode == EXT4_ENCRYPTION_MODE_INVALID) { + /* This will automatically set key mode to invalid + * As enum for ENCRYPTION_MODE_INVALID is zero */ + memset(&ctx->key, 0, sizeof(ctx->key)); + } else { + memcpy(&ctx->key, key, sizeof(struct ext4_encryption_key)); + } + ctx->has_valid_key = (EXT4_ENCRYPTION_MODE_INVALID == key->mode) + ? 0 : 1; + ctx->ctfm_key_is_ready = 0; + ctx->ctfm = NULL; + ctx->htfm = NULL; + ctx->workpage = NULL; + return ctx; +} + +/** + * ext4_get_fname_crypto_ctx() - + * + * Allocates a free crypto context and initializes it to hold + * the crypto material for the inode. + * + * Return: NULL if not encrypted. Error value on error. Valid pointer otherwise. + */ +struct ext4_fname_crypto_ctx *ext4_get_fname_crypto_ctx( + struct inode *inode, u32 max_ciphertext_len) +{ + struct ext4_fname_crypto_ctx *ctx; + struct ext4_inode_info *ei = EXT4_I(inode); + int res; + + /* Check if the crypto policy is set on the inode */ + res = ext4_encrypted_inode(inode); + if (res == 0) + return NULL; + + if (!ext4_has_encryption_key(inode)) + ext4_generate_encryption_key(inode); + + /* Get a crypto context based on the key. + * A new context is allocated if no context matches the requested key. + */ + ctx = ext4_search_fname_crypto_ctx(&(ei->i_encryption_key)); + if (ctx == NULL) + ctx = ext4_alloc_fname_crypto_ctx(&(ei->i_encryption_key)); + if (IS_ERR(ctx)) + return ctx; + + if (ctx->has_valid_key) { + if (ctx->key.mode != EXT4_ENCRYPTION_MODE_AES_256_CTS) { + printk_once(KERN_WARNING + "ext4: unsupported key mode %d\n", + ctx->key.mode); + return ERR_PTR(-ENOKEY); + } + + /* As a first cut, we will allocate new tfm in every call. + * later, we will keep the tfm around, in case the key gets + * re-used */ + if (ctx->ctfm == NULL) { + ctx->ctfm = crypto_alloc_ablkcipher("cts(cbc(aes))", + 0, 0); + } + if (IS_ERR(ctx->ctfm)) { + res = PTR_ERR(ctx->ctfm); + printk( + KERN_DEBUG "%s: error (%d) allocating crypto tfm\n", + __func__, res); + ctx->ctfm = NULL; + ext4_put_fname_crypto_ctx(&ctx); + return ERR_PTR(res); + } + if (ctx->ctfm == NULL) { + printk( + KERN_DEBUG "%s: could not allocate crypto tfm\n", + __func__); + ext4_put_fname_crypto_ctx(&ctx); + return ERR_PTR(-ENOMEM); + } + if (ctx->workpage == NULL) + ctx->workpage = alloc_page(GFP_NOFS); + if (IS_ERR(ctx->workpage)) { + res = PTR_ERR(ctx->workpage); + printk( + KERN_DEBUG "%s: error (%d) allocating work page\n", + __func__, res); + ctx->workpage = NULL; + ext4_put_fname_crypto_ctx(&ctx); + return ERR_PTR(res); + } + if (ctx->workpage == NULL) { + printk( + KERN_DEBUG "%s: could not allocate work page\n", + __func__); + ext4_put_fname_crypto_ctx(&ctx); + return ERR_PTR(-ENOMEM); + } + ctx->lim = max_ciphertext_len; + crypto_ablkcipher_clear_flags(ctx->ctfm, ~0); + crypto_tfm_set_flags(crypto_ablkcipher_tfm(ctx->ctfm), + CRYPTO_TFM_REQ_WEAK_KEY); + + /* If we are lucky, we will get a context that is already + * set up with the right key. Else, we will have to + * set the key */ + if (!ctx->ctfm_key_is_ready) { + /* Since our crypto objectives for filename encryption + * are pretty weak, + * we directly use the inode master key */ + res = crypto_ablkcipher_setkey(ctx->ctfm, + ctx->key.raw, ctx->key.size); + if (res) { + ext4_put_fname_crypto_ctx(&ctx); + return ERR_PTR(-EIO); + } + ctx->ctfm_key_is_ready = 1; + } else { + /* In the current implementation, key should never be + * marked "ready" for a context that has just been + * allocated. So we should never reach here */ + BUG(); + } + } + if (ctx->htfm == NULL) + ctx->htfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(ctx->htfm)) { + res = PTR_ERR(ctx->htfm); + printk(KERN_DEBUG "%s: error (%d) allocating hash tfm\n", + __func__, res); + ctx->htfm = NULL; + ext4_put_fname_crypto_ctx(&ctx); + return ERR_PTR(res); + } + if (ctx->htfm == NULL) { + printk(KERN_DEBUG "%s: could not allocate hash tfm\n", + __func__); + ext4_put_fname_crypto_ctx(&ctx); + return ERR_PTR(-ENOMEM); + } + + return ctx; +} + +/** + * ext4_fname_crypto_round_up() - + * + * Return: The next multiple of block size + */ +u32 ext4_fname_crypto_round_up(u32 size, u32 blksize) +{ + return ((size+blksize-1)/blksize)*blksize; +} + +/** + * ext4_fname_crypto_namelen_on_disk() - + */ +int ext4_fname_crypto_namelen_on_disk(struct ext4_fname_crypto_ctx *ctx, + u32 namelen) +{ + u32 ciphertext_len; + + if (ctx == NULL) + return -EIO; + if (!(ctx->has_valid_key)) + return -EACCES; + ciphertext_len = (namelen < EXT4_CRYPTO_BLOCK_SIZE) ? + EXT4_CRYPTO_BLOCK_SIZE : namelen; + ciphertext_len = (ciphertext_len > ctx->lim) + ? ctx->lim : ciphertext_len; + return (int) ciphertext_len; +} + +/** + * ext4_fname_crypto_alloc_obuff() - + * + * Allocates an output buffer that is sufficient for the crypto operation + * specified by the context and the direction. + */ +int ext4_fname_crypto_alloc_buffer(struct ext4_fname_crypto_ctx *ctx, + u32 ilen, struct ext4_str *crypto_str) +{ + unsigned int olen; + + if (!ctx) + return -EIO; + olen = ext4_fname_crypto_round_up(ilen, EXT4_CRYPTO_BLOCK_SIZE); + crypto_str->len = olen; + if (olen < EXT4_FNAME_CRYPTO_DIGEST_SIZE*2) + olen = EXT4_FNAME_CRYPTO_DIGEST_SIZE*2; + /* Allocated buffer can hold one more character to null-terminate the + * string */ + crypto_str->name = kmalloc(olen+1, GFP_NOFS); + if (!(crypto_str->name)) + return -ENOMEM; + return 0; +} + +/** + * ext4_fname_crypto_free_buffer() - + * + * Frees the buffer allocated for crypto operation. + */ +void ext4_fname_crypto_free_buffer(struct ext4_str *crypto_str) +{ + if (!crypto_str) + return; + kfree(crypto_str->name); + crypto_str->name = NULL; +} + +/** + * ext4_fname_disk_to_usr() - converts a filename from disk space to user space + */ +int _ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx, + const struct ext4_str *iname, + struct ext4_str *oname) +{ + if (ctx == NULL) + return -EIO; + if (iname->len < 3) { + /*Check for . and .. */ + if (iname->name[0] == '.' && iname->name[iname->len-1] == '.') { + oname->name[0] = '.'; + oname->name[iname->len-1] = '.'; + oname->len = iname->len; + return oname->len; + } + } + if (ctx->has_valid_key) + return ext4_fname_decrypt(ctx, iname, oname); + else + return ext4_fname_hash(ctx, iname, oname); +} + +int ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx, + const struct ext4_dir_entry_2 *de, + struct ext4_str *oname) +{ + struct ext4_str iname = {.name = (unsigned char *) de->name, + .len = de->name_len }; + + return _ext4_fname_disk_to_usr(ctx, &iname, oname); +} + + +/** + * ext4_fname_usr_to_disk() - converts a filename from user space to disk space + */ +int ext4_fname_usr_to_disk(struct ext4_fname_crypto_ctx *ctx, + const struct qstr *iname, + struct ext4_str *oname) +{ + int res; + + if (ctx == NULL) + return -EIO; + if (iname->len < 3) { + /*Check for . and .. */ + if (iname->name[0] == '.' && + iname->name[iname->len-1] == '.') { + oname->name[0] = '.'; + oname->name[iname->len-1] = '.'; + oname->len = iname->len; + return oname->len; + } + } + if (ctx->has_valid_key) { + res = ext4_fname_encrypt(ctx, iname, oname); + return res; + } + /* Without a proper key, a user is not allowed to modify the filenames + * in a directory. Consequently, a user space name cannot be mapped to + * a disk-space name */ + return -EACCES; +} + +/* + * Calculate the htree hash from a filename from user space + */ +int ext4_fname_usr_to_hash(struct ext4_fname_crypto_ctx *ctx, + const struct qstr *iname, + struct dx_hash_info *hinfo) +{ + struct ext4_str tmp, tmp2; + int ret = 0; + + if (!ctx || !ctx->has_valid_key || + ((iname->name[0] == '.') && + ((iname->len == 1) || + ((iname->name[1] == '.') && (iname->len == 2))))) { + ext4fs_dirhash(iname->name, iname->len, hinfo); + return 0; + } + + /* First encrypt the plaintext name */ + ret = ext4_fname_crypto_alloc_buffer(ctx, iname->len, &tmp); + if (ret < 0) + return ret; + + ret = ext4_fname_encrypt(ctx, iname, &tmp); + if (ret < 0) + goto out; + + tmp2.len = (4 * ((EXT4_FNAME_CRYPTO_DIGEST_SIZE + 2) / 3)) + 1; + tmp2.name = kmalloc(tmp2.len + 1, GFP_KERNEL); + if (tmp2.name == NULL) { + ret = -ENOMEM; + goto out; + } + + ret = ext4_fname_hash(ctx, &tmp, &tmp2); + if (ret > 0) + ext4fs_dirhash(tmp2.name, tmp2.len, hinfo); + ext4_fname_crypto_free_buffer(&tmp2); +out: + ext4_fname_crypto_free_buffer(&tmp); + return ret; +} + +/** + * ext4_fname_disk_to_htree() - converts a filename from disk space to htree-access string + */ +int ext4_fname_disk_to_hash(struct ext4_fname_crypto_ctx *ctx, + const struct ext4_dir_entry_2 *de, + struct dx_hash_info *hinfo) +{ + struct ext4_str iname = {.name = (unsigned char *) de->name, + .len = de->name_len}; + struct ext4_str tmp; + int ret; + + if (!ctx || + ((iname.name[0] == '.') && + ((iname.len == 1) || + ((iname.name[1] == '.') && (iname.len == 2))))) { + ext4fs_dirhash(iname.name, iname.len, hinfo); + return 0; + } + + tmp.len = (4 * ((EXT4_FNAME_CRYPTO_DIGEST_SIZE + 2) / 3)) + 1; + tmp.name = kmalloc(tmp.len + 1, GFP_KERNEL); + if (tmp.name == NULL) + return -ENOMEM; + + ret = ext4_fname_hash(ctx, &iname, &tmp); + if (ret > 0) + ext4fs_dirhash(tmp.name, tmp.len, hinfo); + ext4_fname_crypto_free_buffer(&tmp); + return ret; +} diff --git a/fs/ext4/crypto_key.c b/fs/ext4/crypto_key.c new file mode 100644 index 0000000..c8392af --- /dev/null +++ b/fs/ext4/crypto_key.c @@ -0,0 +1,165 @@ +/* + * linux/fs/ext4/crypto_key.c + * + * Copyright (C) 2015, Google, Inc. + * + * This contains encryption key functions for ext4 + * + * Written by Michael Halcrow, Ildar Muslukhov, and Uday Savagaonkar, 2015. + */ + +#include <keys/encrypted-type.h> +#include <keys/user-type.h> +#include <linux/random.h> +#include <linux/scatterlist.h> +#include <uapi/linux/keyctl.h> + +#include "ext4.h" +#include "xattr.h" + +static void derive_crypt_complete(struct crypto_async_request *req, int rc) +{ + struct ext4_completion_result *ecr = req->data; + + if (rc == -EINPROGRESS) + return; + + ecr->res = rc; + complete(&ecr->completion); +} + +/** + * ext4_derive_key_aes() - Derive a key using AES-128-ECB + * @deriving_key: Encryption key used for derivatio. + * @source_key: Source key to which to apply derivation. + * @derived_key: Derived key. + * + * Return: Zero on success; non-zero otherwise. + */ +static int ext4_derive_key_aes(char deriving_key[EXT4_AES_128_ECB_KEY_SIZE], + char source_key[EXT4_AES_256_XTS_KEY_SIZE], + char derived_key[EXT4_AES_256_XTS_KEY_SIZE]) +{ + int res = 0; + struct ablkcipher_request *req = NULL; + DECLARE_EXT4_COMPLETION_RESULT(ecr); + struct scatterlist src_sg, dst_sg; + struct crypto_ablkcipher *tfm = crypto_alloc_ablkcipher("ecb(aes)", 0, + 0); + + if (IS_ERR(tfm)) { + res = PTR_ERR(tfm); + tfm = NULL; + goto out; + } + crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY); + req = ablkcipher_request_alloc(tfm, GFP_NOFS); + if (!req) { + res = -ENOMEM; + goto out; + } + ablkcipher_request_set_callback(req, + CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, + derive_crypt_complete, &ecr); + res = crypto_ablkcipher_setkey(tfm, deriving_key, + EXT4_AES_128_ECB_KEY_SIZE); + if (res < 0) + goto out; + sg_init_one(&src_sg, source_key, EXT4_AES_256_XTS_KEY_SIZE); + sg_init_one(&dst_sg, derived_key, EXT4_AES_256_XTS_KEY_SIZE); + ablkcipher_request_set_crypt(req, &src_sg, &dst_sg, + EXT4_AES_256_XTS_KEY_SIZE, NULL); + res = crypto_ablkcipher_encrypt(req); + if (res == -EINPROGRESS || res == -EBUSY) { + BUG_ON(req->base.data != &ecr); + wait_for_completion(&ecr.completion); + res = ecr.res; + } + +out: + if (req) + ablkcipher_request_free(req); + if (tfm) + crypto_free_ablkcipher(tfm); + return res; +} + +/** + * ext4_generate_encryption_key() - generates an encryption key + * @inode: The inode to generate the encryption key for. + */ +int ext4_generate_encryption_key(struct inode *inode) +{ + struct ext4_inode_info *ei = EXT4_I(inode); + struct ext4_encryption_key *crypt_key = &ei->i_encryption_key; + char full_key_descriptor[EXT4_KEY_DESC_PREFIX_SIZE + + (EXT4_KEY_DESCRIPTOR_SIZE * 2) + 1]; + struct key *keyring_key = NULL; + struct ext4_encryption_key *master_key; + struct ext4_encryption_context ctx; + struct user_key_payload *ukp; + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); + int res = ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION, + EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, + &ctx, sizeof(ctx)); + + if (res != sizeof(ctx)) { + if (res > 0) + res = -EINVAL; + goto out; + } + res = 0; + + if (S_ISREG(inode->i_mode)) + crypt_key->mode = ctx.contents_encryption_mode; + else if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) + crypt_key->mode = ctx.filenames_encryption_mode; + else { + printk(KERN_ERR "ext4 crypto: Unsupported inode type.\n"); + BUG(); + } + crypt_key->size = ext4_encryption_key_size(crypt_key->mode); + BUG_ON(!crypt_key->size); + if (DUMMY_ENCRYPTION_ENABLED(sbi)) { + memset(crypt_key->raw, 0x42, EXT4_AES_256_XTS_KEY_SIZE); + goto out; + } + memcpy(full_key_descriptor, EXT4_KEY_DESC_PREFIX, + EXT4_KEY_DESC_PREFIX_SIZE); + sprintf(full_key_descriptor + EXT4_KEY_DESC_PREFIX_SIZE, + "%*phN", EXT4_KEY_DESCRIPTOR_SIZE, + ctx.master_key_descriptor); + full_key_descriptor[EXT4_KEY_DESC_PREFIX_SIZE + + (2 * EXT4_KEY_DESCRIPTOR_SIZE)] = '\0'; + keyring_key = request_key(&key_type_logon, full_key_descriptor, NULL); + if (IS_ERR(keyring_key)) { + res = PTR_ERR(keyring_key); + keyring_key = NULL; + goto out; + } + BUG_ON(keyring_key->type != &key_type_logon); + ukp = ((struct user_key_payload *)keyring_key->payload.data); + if (ukp->datalen != sizeof(struct ext4_encryption_key)) { + res = -EINVAL; + goto out; + } + master_key = (struct ext4_encryption_key *)ukp->data; + BUILD_BUG_ON(EXT4_AES_128_ECB_KEY_SIZE != + EXT4_KEY_DERIVATION_NONCE_SIZE); + BUG_ON(master_key->size != EXT4_AES_256_XTS_KEY_SIZE); + res = ext4_derive_key_aes(ctx.nonce, master_key->raw, crypt_key->raw); +out: + if (keyring_key) + key_put(keyring_key); + if (res < 0) + crypt_key->mode = EXT4_ENCRYPTION_MODE_INVALID; + return res; +} + +int ext4_has_encryption_key(struct inode *inode) +{ + struct ext4_inode_info *ei = EXT4_I(inode); + struct ext4_encryption_key *crypt_key = &ei->i_encryption_key; + + return (crypt_key->mode != EXT4_ENCRYPTION_MODE_INVALID); +} diff --git a/fs/ext4/crypto_policy.c b/fs/ext4/crypto_policy.c new file mode 100644 index 0000000..30eaf9e --- /dev/null +++ b/fs/ext4/crypto_policy.c @@ -0,0 +1,194 @@ +/* + * linux/fs/ext4/crypto_policy.c + * + * Copyright (C) 2015, Google, Inc. + * + * This contains encryption policy functions for ext4 + * + * Written by Michael Halcrow, 2015. + */ + +#include <linux/random.h> +#include <linux/string.h> +#include <linux/types.h> + +#include "ext4.h" +#include "xattr.h" + +static int ext4_inode_has_encryption_context(struct inode *inode) +{ + int res = ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION, + EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, NULL, 0); + return (res > 0); +} + +/* + * check whether the policy is consistent with the encryption context + * for the inode + */ +static int ext4_is_encryption_context_consistent_with_policy( + struct inode *inode, const struct ext4_encryption_policy *policy) +{ + struct ext4_encryption_context ctx; + int res = ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION, + EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx, + sizeof(ctx)); + if (res != sizeof(ctx)) + return 0; + return (memcmp(ctx.master_key_descriptor, policy->master_key_descriptor, + EXT4_KEY_DESCRIPTOR_SIZE) == 0 && + (ctx.contents_encryption_mode == + policy->contents_encryption_mode) && + (ctx.filenames_encryption_mode == + policy->filenames_encryption_mode)); +} + +static int ext4_create_encryption_context_from_policy( + struct inode *inode, const struct ext4_encryption_policy *policy) +{ + struct ext4_encryption_context ctx; + int res = 0; + + ctx.format = EXT4_ENCRYPTION_CONTEXT_FORMAT_V1; + memcpy(ctx.master_key_descriptor, policy->master_key_descriptor, + EXT4_KEY_DESCRIPTOR_SIZE); + if (!ext4_valid_contents_enc_mode(policy->contents_encryption_mode)) { + printk(KERN_WARNING + "%s: Invalid contents encryption mode %d\n", __func__, + policy->contents_encryption_mode); + res = -EINVAL; + goto out; + } + if (!ext4_valid_filenames_enc_mode(policy->filenames_encryption_mode)) { + printk(KERN_WARNING + "%s: Invalid filenames encryption mode %d\n", __func__, + policy->filenames_encryption_mode); + res = -EINVAL; + goto out; + } + ctx.contents_encryption_mode = policy->contents_encryption_mode; + ctx.filenames_encryption_mode = policy->filenames_encryption_mode; + BUILD_BUG_ON(sizeof(ctx.nonce) != EXT4_KEY_DERIVATION_NONCE_SIZE); + get_random_bytes(ctx.nonce, EXT4_KEY_DERIVATION_NONCE_SIZE); + + res = ext4_xattr_set(inode, EXT4_XATTR_INDEX_ENCRYPTION, + EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx, + sizeof(ctx), 0); +out: + if (!res) + ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT); + return res; +} + +int ext4_process_policy(const struct ext4_encryption_policy *policy, + struct inode *inode) +{ + if (policy->version != 0) + return -EINVAL; + + if (!ext4_inode_has_encryption_context(inode)) { + if (!ext4_empty_dir(inode)) + return -ENOTEMPTY; + return ext4_create_encryption_context_from_policy(inode, + policy); + } + + if (ext4_is_encryption_context_consistent_with_policy(inode, policy)) + return 0; + + printk(KERN_WARNING "%s: Policy inconsistent with encryption context\n", + __func__); + return -EINVAL; +} + +int ext4_get_policy(struct inode *inode, struct ext4_encryption_policy *policy) +{ + struct ext4_encryption_context ctx; + + int res = ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION, + EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, + &ctx, sizeof(ctx)); + if (res != sizeof(ctx)) + return -ENOENT; + if (ctx.format != EXT4_ENCRYPTION_CONTEXT_FORMAT_V1) + return -EINVAL; + policy->version = 0; + policy->contents_encryption_mode = ctx.contents_encryption_mode; + policy->filenames_encryption_mode = ctx.filenames_encryption_mode; + memcpy(&policy->master_key_descriptor, ctx.master_key_descriptor, + EXT4_KEY_DESCRIPTOR_SIZE); + return 0; +} + +int ext4_is_child_context_consistent_with_parent(struct inode *parent, + struct inode *child) +{ + struct ext4_encryption_context parent_ctx, child_ctx; + int res; + + if ((parent == NULL) || (child == NULL)) { + pr_err("parent %p child %p\n", parent, child); + BUG_ON(1); + } + /* no restrictions if the parent directory is not encrypted */ + if (!ext4_encrypted_inode(parent)) + return 1; + res = ext4_xattr_get(parent, EXT4_XATTR_INDEX_ENCRYPTION, + EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, + &parent_ctx, sizeof(parent_ctx)); + if (res != sizeof(parent_ctx)) + return 0; + /* if the child directory is not encrypted, this is always a problem */ + if (!ext4_encrypted_inode(child)) + return 0; + res = ext4_xattr_get(child, EXT4_XATTR_INDEX_ENCRYPTION, + EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, + &child_ctx, sizeof(child_ctx)); + if (res != sizeof(child_ctx)) + return 0; + return (memcmp(parent_ctx.master_key_descriptor, + child_ctx.master_key_descriptor, + EXT4_KEY_DESCRIPTOR_SIZE) == 0 && + (parent_ctx.contents_encryption_mode == + child_ctx.contents_encryption_mode) && + (parent_ctx.filenames_encryption_mode == + child_ctx.filenames_encryption_mode)); +} + +/** + * ext4_inherit_context() - Sets a child context from its parent + * @parent: Parent inode from which the context is inherited. + * @child: Child inode that inherits the context from @parent. + * + * Return: Zero on success, non-zero otherwise + */ +int ext4_inherit_context(struct inode *parent, struct inode *child) +{ + struct ext4_encryption_context ctx; + int res = ext4_xattr_get(parent, EXT4_XATTR_INDEX_ENCRYPTION, + EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, + &ctx, sizeof(ctx)); + + if (res != sizeof(ctx)) { + if (DUMMY_ENCRYPTION_ENABLED(EXT4_SB(parent->i_sb))) { + ctx.format = EXT4_ENCRYPTION_CONTEXT_FORMAT_V1; + ctx.contents_encryption_mode = + EXT4_ENCRYPTION_MODE_AES_256_XTS; + ctx.filenames_encryption_mode = + EXT4_ENCRYPTION_MODE_AES_256_CTS; + memset(ctx.master_key_descriptor, 0x42, + EXT4_KEY_DESCRIPTOR_SIZE); + res = 0; + } else { + goto out; + } + } + get_random_bytes(ctx.nonce, EXT4_KEY_DERIVATION_NONCE_SIZE); + res = ext4_xattr_set(child, EXT4_XATTR_INDEX_ENCRYPTION, + EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx, + sizeof(ctx), 0); +out: + if (!res) + ext4_set_inode_flag(child, EXT4_INODE_ENCRYPT); + return res; +} diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c index c24143e..61db51a5 100644 --- a/fs/ext4/dir.c +++ b/fs/ext4/dir.c @@ -22,10 +22,8 @@ */ #include <linux/fs.h> -#include <linux/jbd2.h> #include <linux/buffer_head.h> #include <linux/slab.h> -#include <linux/rbtree.h> #include "ext4.h" #include "xattr.h" @@ -110,7 +108,10 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx) int err; struct inode *inode = file_inode(file); struct super_block *sb = inode->i_sb; + struct buffer_head *bh = NULL; int dir_has_error = 0; + struct ext4_fname_crypto_ctx *enc_ctx = NULL; + struct ext4_str fname_crypto_str = {.name = NULL, .len = 0}; if (is_dx_dir(inode)) { err = ext4_dx_readdir(file, ctx); @@ -127,17 +128,28 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx) if (ext4_has_inline_data(inode)) { int has_inline_data = 1; - int ret = ext4_read_inline_dir(file, ctx, + err = ext4_read_inline_dir(file, ctx, &has_inline_data); if (has_inline_data) - return ret; + return err; + } + + enc_ctx = ext4_get_fname_crypto_ctx(inode, EXT4_NAME_LEN); + if (IS_ERR(enc_ctx)) + return PTR_ERR(enc_ctx); + if (enc_ctx) { + err = ext4_fname_crypto_alloc_buffer(enc_ctx, EXT4_NAME_LEN, + &fname_crypto_str); + if (err < 0) { + ext4_put_fname_crypto_ctx(&enc_ctx); + return err; + } } offset = ctx->pos & (sb->s_blocksize - 1); while (ctx->pos < inode->i_size) { struct ext4_map_blocks map; - struct buffer_head *bh = NULL; map.m_lblk = ctx->pos >> EXT4_BLOCK_SIZE_BITS(sb); map.m_len = 1; @@ -180,6 +192,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx) (unsigned long long)ctx->pos); ctx->pos += sb->s_blocksize - offset; brelse(bh); + bh = NULL; continue; } set_buffer_verified(bh); @@ -226,25 +239,44 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx) offset += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize); if (le32_to_cpu(de->inode)) { - if (!dir_emit(ctx, de->name, - de->name_len, - le32_to_cpu(de->inode), - get_dtype(sb, de->file_type))) { - brelse(bh); - return 0; + if (enc_ctx == NULL) { + /* Directory is not encrypted */ + if (!dir_emit(ctx, de->name, + de->name_len, + le32_to_cpu(de->inode), + get_dtype(sb, de->file_type))) + goto done; + } else { + /* Directory is encrypted */ + err = ext4_fname_disk_to_usr(enc_ctx, + de, &fname_crypto_str); + if (err < 0) + goto errout; + if (!dir_emit(ctx, + fname_crypto_str.name, err, + le32_to_cpu(de->inode), + get_dtype(sb, de->file_type))) + goto done; } } ctx->pos += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize); } - offset = 0; + if ((ctx->pos < inode->i_size) && !dir_relax(inode)) + goto done; brelse(bh); - if (ctx->pos < inode->i_size) { - if (!dir_relax(inode)) - return 0; - } + bh = NULL; + offset = 0; } - return 0; +done: + err = 0; +errout: +#ifdef CONFIG_EXT4_FS_ENCRYPTION + ext4_put_fname_crypto_ctx(&enc_ctx); + ext4_fname_crypto_free_buffer(&fname_crypto_str); +#endif + brelse(bh); + return err; } static inline int is_32bit_api(void) @@ -384,10 +416,15 @@ void ext4_htree_free_dir_info(struct dir_private_info *p) /* * Given a directory entry, enter it into the fname rb tree. + * + * When filename encryption is enabled, the dirent will hold the + * encrypted filename, while the htree will hold decrypted filename. + * The decrypted filename is passed in via ent_name. parameter. */ int ext4_htree_store_dirent(struct file *dir_file, __u32 hash, __u32 minor_hash, - struct ext4_dir_entry_2 *dirent) + struct ext4_dir_entry_2 *dirent, + struct ext4_str *ent_name) { struct rb_node **p, *parent = NULL; struct fname *fname, *new_fn; @@ -398,17 +435,17 @@ int ext4_htree_store_dirent(struct file *dir_file, __u32 hash, p = &info->root.rb_node; /* Create and allocate the fname structure */ - len = sizeof(struct fname) + dirent->name_len + 1; + len = sizeof(struct fname) + ent_name->len + 1; new_fn = kzalloc(len, GFP_KERNEL); if (!new_fn) return -ENOMEM; new_fn->hash = hash; new_fn->minor_hash = minor_hash; new_fn->inode = le32_to_cpu(dirent->inode); - new_fn->name_len = dirent->name_len; + new_fn->name_len = ent_name->len; new_fn->file_type = dirent->file_type; - memcpy(new_fn->name, dirent->name, dirent->name_len); - new_fn->name[dirent->name_len] = 0; + memcpy(new_fn->name, ent_name->name, ent_name->len); + new_fn->name[ent_name->len] = 0; while (*p) { parent = *p; diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index f63c3d5..ef267ad 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -422,7 +422,7 @@ enum { EXT4_INODE_DIRTY = 8, EXT4_INODE_COMPRBLK = 9, /* One or more compressed clusters */ EXT4_INODE_NOCOMPR = 10, /* Don't compress */ - EXT4_INODE_ENCRYPT = 11, /* Compression error */ + EXT4_INODE_ENCRYPT = 11, /* Encrypted file */ /* End compression flags --- maybe not all used */ EXT4_INODE_INDEX = 12, /* hash-indexed directory */ EXT4_INODE_IMAGIC = 13, /* AFS directory */ @@ -582,6 +582,15 @@ enum { #define EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER 0x0010 #define EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER 0x0020 +/* Encryption algorithms */ +#define EXT4_ENCRYPTION_MODE_INVALID 0 +#define EXT4_ENCRYPTION_MODE_AES_256_XTS 1 +#define EXT4_ENCRYPTION_MODE_AES_256_GCM 2 +#define EXT4_ENCRYPTION_MODE_AES_256_CBC 3 +#define EXT4_ENCRYPTION_MODE_AES_256_CTS 4 + +#include "ext4_crypto.h" + /* * ioctl commands */ @@ -603,6 +612,9 @@ enum { #define EXT4_IOC_RESIZE_FS _IOW('f', 16, __u64) #define EXT4_IOC_SWAP_BOOT _IO('f', 17) #define EXT4_IOC_PRECACHE_EXTENTS _IO('f', 18) +#define EXT4_IOC_SET_ENCRYPTION_POLICY _IOR('f', 19, struct ext4_encryption_policy) +#define EXT4_IOC_GET_ENCRYPTION_PWSALT _IOW('f', 20, __u8[16]) +#define EXT4_IOC_GET_ENCRYPTION_POLICY _IOW('f', 21, struct ext4_encryption_policy) #if defined(__KERNEL__) && defined(CONFIG_COMPAT) /* @@ -939,6 +951,11 @@ struct ext4_inode_info { /* Precomputed uuid+inum+igen checksum for seeding inode checksums */ __u32 i_csum_seed; + +#ifdef CONFIG_EXT4_FS_ENCRYPTION + /* Encryption params */ + struct ext4_encryption_key i_encryption_key; +#endif }; /* @@ -1142,7 +1159,8 @@ struct ext4_super_block { __le32 s_raid_stripe_width; /* blocks on all data disks (N*stride)*/ __u8 s_log_groups_per_flex; /* FLEX_BG group size */ __u8 s_checksum_type; /* metadata checksum algorithm used */ - __le16 s_reserved_pad; + __u8 s_encryption_level; /* versioning level for encryption */ + __u8 s_reserved_pad; /* Padding to next 32bits */ __le64 s_kbytes_written; /* nr of lifetime kilobytes written */ __le32 s_snapshot_inum; /* Inode number of active snapshot */ __le32 s_snapshot_id; /* sequential ID of active snapshot */ @@ -1169,7 +1187,9 @@ struct ext4_super_block { __le32 s_overhead_clusters; /* overhead blocks/clusters in fs */ __le32 s_backup_bgs[2]; /* groups with sparse_super2 SBs */ __u8 s_encrypt_algos[4]; /* Encryption algorithms in use */ - __le32 s_reserved[105]; /* Padding to the end of the block */ + __u8 s_encrypt_pw_salt[16]; /* Salt used for string2key algorithm */ + __le32 s_lpf_ino; /* Location of the lost+found inode */ + __le32 s_reserved[100]; /* Padding to the end of the block */ __le32 s_checksum; /* crc32c(superblock) */ }; @@ -1180,8 +1200,16 @@ struct ext4_super_block { /* * run-time mount flags */ -#define EXT4_MF_MNTDIR_SAMPLED 0x0001 -#define EXT4_MF_FS_ABORTED 0x0002 /* Fatal error detected */ +#define EXT4_MF_MNTDIR_SAMPLED 0x0001 +#define EXT4_MF_FS_ABORTED 0x0002 /* Fatal error detected */ +#define EXT4_MF_TEST_DUMMY_ENCRYPTION 0x0004 + +#ifdef CONFIG_EXT4_FS_ENCRYPTION +#define DUMMY_ENCRYPTION_ENABLED(sbi) (unlikely((sbi)->s_mount_flags & \ + EXT4_MF_TEST_DUMMY_ENCRYPTION)) +#else +#define DUMMY_ENCRYPTION_ENABLED(sbi) (0) +#endif /* Number of quota types we support */ #define EXT4_MAXQUOTAS 2 @@ -1351,6 +1379,12 @@ struct ext4_sb_info { struct ratelimit_state s_err_ratelimit_state; struct ratelimit_state s_warning_ratelimit_state; struct ratelimit_state s_msg_ratelimit_state; + +#ifdef CONFIG_EXT4_FS_ENCRYPTION + /* Encryption */ + uint32_t s_file_encryption_mode; + uint32_t s_dir_encryption_mode; +#endif }; static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb) @@ -1466,6 +1500,18 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei) #define EXT4_SB(sb) (sb) #endif +/* + * Returns true if the inode is inode is encrypted + */ +static inline int ext4_encrypted_inode(struct inode *inode) +{ +#ifdef CONFIG_EXT4_FS_ENCRYPTION + return ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT); +#else + return 0; +#endif +} + #define NEXT_ORPHAN(inode) EXT4_I(inode)->i_dtime /* @@ -1575,8 +1621,9 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei) EXT4_FEATURE_INCOMPAT_EXTENTS| \ EXT4_FEATURE_INCOMPAT_64BIT| \ EXT4_FEATURE_INCOMPAT_FLEX_BG| \ - EXT4_FEATURE_INCOMPAT_MMP | \ - EXT4_FEATURE_INCOMPAT_INLINE_DATA) + EXT4_FEATURE_INCOMPAT_MMP | \ + EXT4_FEATURE_INCOMPAT_INLINE_DATA | \ + EXT4_FEATURE_INCOMPAT_ENCRYPT) #define EXT4_FEATURE_RO_COMPAT_SUPP (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \ EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \ EXT4_FEATURE_RO_COMPAT_GDT_CSUM| \ @@ -2001,6 +2048,99 @@ extern unsigned ext4_free_clusters_after_init(struct super_block *sb, struct ext4_group_desc *gdp); ext4_fsblk_t ext4_inode_to_goal_block(struct inode *); +/* crypto_policy.c */ +int ext4_is_child_context_consistent_with_parent(struct inode *parent, + struct inode *child); +int ext4_inherit_context(struct inode *parent, struct inode *child); +void ext4_to_hex(char *dst, char *src, size_t src_size); +int ext4_process_policy(const struct ext4_encryption_policy *policy, + struct inode *inode); +int ext4_get_policy(struct inode *inode, + struct ext4_encryption_policy *policy); + +/* crypto.c */ +bool ext4_valid_contents_enc_mode(uint32_t mode); +uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size); +extern struct workqueue_struct *ext4_read_workqueue; +struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode); +void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx); +void ext4_restore_control_page(struct page *data_page); +struct page *ext4_encrypt(struct inode *inode, + struct page *plaintext_page); +int ext4_decrypt(struct ext4_crypto_ctx *ctx, struct page *page); +int ext4_decrypt_one(struct inode *inode, struct page *page); +int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex); + +#ifdef CONFIG_EXT4_FS_ENCRYPTION +int ext4_init_crypto(void); +void ext4_exit_crypto(void); +static inline int ext4_sb_has_crypto(struct super_block *sb) +{ + return EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_ENCRYPT); +} +#else +static inline int ext4_init_crypto(void) { return 0; } +static inline void ext4_exit_crypto(void) { } +static inline int ext4_sb_has_crypto(struct super_block *sb) +{ + return 0; +} +#endif + +/* crypto_fname.c */ +bool ext4_valid_filenames_enc_mode(uint32_t mode); +u32 ext4_fname_crypto_round_up(u32 size, u32 blksize); +int ext4_fname_crypto_alloc_buffer(struct ext4_fname_crypto_ctx *ctx, + u32 ilen, struct ext4_str *crypto_str); +int _ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx, + const struct ext4_str *iname, + struct ext4_str *oname); +int ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx, + const struct ext4_dir_entry_2 *de, + struct ext4_str *oname); +int ext4_fname_usr_to_disk(struct ext4_fname_crypto_ctx *ctx, + const struct qstr *iname, + struct ext4_str *oname); +int ext4_fname_usr_to_hash(struct ext4_fname_crypto_ctx *ctx, + const struct qstr *iname, + struct dx_hash_info *hinfo); +int ext4_fname_disk_to_hash(struct ext4_fname_crypto_ctx *ctx, + const struct ext4_dir_entry_2 *de, + struct dx_hash_info *hinfo); +int ext4_fname_crypto_namelen_on_disk(struct ext4_fname_crypto_ctx *ctx, + u32 namelen); + +#ifdef CONFIG_EXT4_FS_ENCRYPTION +void ext4_put_fname_crypto_ctx(struct ext4_fname_crypto_ctx **ctx); +struct ext4_fname_crypto_ctx *ext4_get_fname_crypto_ctx(struct inode *inode, + u32 max_len); +void ext4_fname_crypto_free_buffer(struct ext4_str *crypto_str); +#else +static inline +void ext4_put_fname_crypto_ctx(struct ext4_fname_crypto_ctx **ctx) { } +static inline +struct ext4_fname_crypto_ctx *ext4_get_fname_crypto_ctx(struct inode *inode, + u32 max_len) +{ + return NULL; +} +static inline void ext4_fname_crypto_free_buffer(struct ext4_str *p) { } +#endif + + +/* crypto_key.c */ +int ext4_generate_encryption_key(struct inode *inode); + +#ifdef CONFIG_EXT4_FS_ENCRYPTION +int ext4_has_encryption_key(struct inode *inode); +#else +static inline int ext4_has_encryption_key(struct inode *inode) +{ + return 0; +} +#endif + + /* dir.c */ extern int __ext4_check_dir_entry(const char *, unsigned int, struct inode *, struct file *, @@ -2011,17 +2151,20 @@ extern int __ext4_check_dir_entry(const char *, unsigned int, struct inode *, unlikely(__ext4_check_dir_entry(__func__, __LINE__, (dir), (filp), \ (de), (bh), (buf), (size), (offset))) extern int ext4_htree_store_dirent(struct file *dir_file, __u32 hash, - __u32 minor_hash, - struct ext4_dir_entry_2 *dirent); + __u32 minor_hash, + struct ext4_dir_entry_2 *dirent, + struct ext4_str *ent_name); extern void ext4_htree_free_dir_info(struct dir_private_info *p); extern int ext4_find_dest_de(struct inode *dir, struct inode *inode, struct buffer_head *bh, void *buf, int buf_size, const char *name, int namelen, struct ext4_dir_entry_2 **dest_de); -void ext4_insert_dentry(struct inode *inode, +int ext4_insert_dentry(struct inode *dir, + struct inode *inode, struct ext4_dir_entry_2 *de, int buf_size, + const struct qstr *iname, const char *name, int namelen); static inline void ext4_update_dx_flag(struct inode *inode) { @@ -2099,6 +2242,7 @@ extern int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, extern int ext4_trim_fs(struct super_block *, struct fstrim_range *); /* inode.c */ +int ext4_inode_is_fast_symlink(struct inode *inode); struct buffer_head *ext4_getblk(handle_t *, struct inode *, ext4_lblk_t, int); struct buffer_head *ext4_bread(handle_t *, struct inode *, ext4_lblk_t, int); int ext4_get_block_write(struct inode *inode, sector_t iblock, @@ -2152,8 +2296,8 @@ extern void ext4_da_update_reserve_space(struct inode *inode, /* indirect.c */ extern int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, int flags); -extern ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb, - struct iov_iter *iter, loff_t offset); +extern ssize_t ext4_ind_direct_IO(struct kiocb *iocb, struct iov_iter *iter, + loff_t offset); extern int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock); extern int ext4_ind_trans_blocks(struct inode *inode, int nrblocks); extern void ext4_ind_truncate(handle_t *, struct inode *inode); @@ -2189,6 +2333,7 @@ extern int ext4_generic_delete_entry(handle_t *handle, void *entry_buf, int buf_size, int csum_size); +extern int ext4_empty_dir(struct inode *inode); /* resize.c */ extern int ext4_group_add(struct super_block *sb, @@ -2593,7 +2738,6 @@ extern const struct file_operations ext4_dir_operations; /* file.c */ extern const struct inode_operations ext4_file_inode_operations; extern const struct file_operations ext4_file_operations; -extern const struct file_operations ext4_dax_file_operations; extern loff_t ext4_llseek(struct file *file, loff_t offset, int origin); /* inline.c */ @@ -2699,6 +2843,10 @@ static inline void ext4_set_de_type(struct super_block *sb, de->file_type = ext4_type_by_mode[(mode & S_IFMT)>>S_SHIFT]; } +/* readpages.c */ +extern int ext4_mpage_readpages(struct address_space *mapping, + struct list_head *pages, struct page *page, + unsigned nr_pages); /* symlink.c */ extern const struct inode_operations ext4_symlink_inode_operations; diff --git a/fs/ext4/ext4_crypto.h b/fs/ext4/ext4_crypto.h new file mode 100644 index 0000000..c2ba35a --- /dev/null +++ b/fs/ext4/ext4_crypto.h @@ -0,0 +1,147 @@ +/* + * linux/fs/ext4/ext4_crypto.h + * + * Copyright (C) 2015, Google, Inc. + * + * This contains encryption header content for ext4 + * + * Written by Michael Halcrow, 2015. + */ + +#ifndef _EXT4_CRYPTO_H +#define _EXT4_CRYPTO_H + +#include <linux/fs.h> + +#define EXT4_KEY_DESCRIPTOR_SIZE 8 + +/* Policy provided via an ioctl on the topmost directory */ +struct ext4_encryption_policy { + char version; + char contents_encryption_mode; + char filenames_encryption_mode; + char master_key_descriptor[EXT4_KEY_DESCRIPTOR_SIZE]; +} __attribute__((__packed__)); + +#define EXT4_ENCRYPTION_CONTEXT_FORMAT_V1 1 +#define EXT4_KEY_DERIVATION_NONCE_SIZE 16 + +/** + * Encryption context for inode + * + * Protector format: + * 1 byte: Protector format (1 = this version) + * 1 byte: File contents encryption mode + * 1 byte: File names encryption mode + * 1 byte: Reserved + * 8 bytes: Master Key descriptor + * 16 bytes: Encryption Key derivation nonce + */ +struct ext4_encryption_context { + char format; + char contents_encryption_mode; + char filenames_encryption_mode; + char reserved; + char master_key_descriptor[EXT4_KEY_DESCRIPTOR_SIZE]; + char nonce[EXT4_KEY_DERIVATION_NONCE_SIZE]; +} __attribute__((__packed__)); + +/* Encryption parameters */ +#define EXT4_XTS_TWEAK_SIZE 16 +#define EXT4_AES_128_ECB_KEY_SIZE 16 +#define EXT4_AES_256_GCM_KEY_SIZE 32 +#define EXT4_AES_256_CBC_KEY_SIZE 32 +#define EXT4_AES_256_CTS_KEY_SIZE 32 +#define EXT4_AES_256_XTS_KEY_SIZE 64 +#define EXT4_MAX_KEY_SIZE 64 + +#define EXT4_KEY_DESC_PREFIX "ext4:" +#define EXT4_KEY_DESC_PREFIX_SIZE 5 + +struct ext4_encryption_key { + uint32_t mode; + char raw[EXT4_MAX_KEY_SIZE]; + uint32_t size; +}; + +#define EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001 +#define EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL 0x00000002 + +struct ext4_crypto_ctx { + struct crypto_tfm *tfm; /* Crypto API context */ + struct page *bounce_page; /* Ciphertext page on write path */ + struct page *control_page; /* Original page on write path */ + struct bio *bio; /* The bio for this context */ + struct work_struct work; /* Work queue for read complete path */ + struct list_head free_list; /* Free list */ + int flags; /* Flags */ + int mode; /* Encryption mode for tfm */ +}; + +struct ext4_completion_result { + struct completion completion; + int res; +}; + +#define DECLARE_EXT4_COMPLETION_RESULT(ecr) \ + struct ext4_completion_result ecr = { \ + COMPLETION_INITIALIZER((ecr).completion), 0 } + +static inline int ext4_encryption_key_size(int mode) +{ + switch (mode) { + case EXT4_ENCRYPTION_MODE_AES_256_XTS: + return EXT4_AES_256_XTS_KEY_SIZE; + case EXT4_ENCRYPTION_MODE_AES_256_GCM: + return EXT4_AES_256_GCM_KEY_SIZE; + case EXT4_ENCRYPTION_MODE_AES_256_CBC: + return EXT4_AES_256_CBC_KEY_SIZE; + case EXT4_ENCRYPTION_MODE_AES_256_CTS: + return EXT4_AES_256_CTS_KEY_SIZE; + default: + BUG(); + } + return 0; +} + +#define EXT4_FNAME_NUM_SCATTER_ENTRIES 4 +#define EXT4_CRYPTO_BLOCK_SIZE 16 +#define EXT4_FNAME_CRYPTO_DIGEST_SIZE 32 + +struct ext4_str { + unsigned char *name; + u32 len; +}; + +struct ext4_fname_crypto_ctx { + u32 lim; + char tmp_buf[EXT4_CRYPTO_BLOCK_SIZE]; + struct crypto_ablkcipher *ctfm; + struct crypto_hash *htfm; + struct page *workpage; + struct ext4_encryption_key key; + unsigned has_valid_key : 1; + unsigned ctfm_key_is_ready : 1; +}; + +/** + * For encrypted symlinks, the ciphertext length is stored at the beginning + * of the string in little-endian format. + */ +struct ext4_encrypted_symlink_data { + __le16 len; + char encrypted_path[1]; +} __attribute__((__packed__)); + +/** + * This function is used to calculate the disk space required to + * store a filename of length l in encrypted symlink format. + */ +static inline u32 encrypted_symlink_data_len(u32 l) +{ + if (l < EXT4_CRYPTO_BLOCK_SIZE) + l = EXT4_CRYPTO_BLOCK_SIZE; + return (l + sizeof(struct ext4_encrypted_symlink_data) - 1); +} + +#endif /* _EXT4_CRYPTO_H */ diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index bed4308..973816b 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -1717,12 +1717,6 @@ ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1, { unsigned short ext1_ee_len, ext2_ee_len; - /* - * Make sure that both extents are initialized. We don't merge - * unwritten extents so that we can be sure that end_io code has - * the extent that was written properly split out and conversion to - * initialized is trivial. - */ if (ext4_ext_is_unwritten(ex1) != ext4_ext_is_unwritten(ex2)) return 0; @@ -3128,6 +3122,9 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) ee_len = ext4_ext_get_actual_len(ex); ee_pblock = ext4_ext_pblock(ex); + if (ext4_encrypted_inode(inode)) + return ext4_encrypted_zeroout(inode, ex); + ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS); if (ret > 0) ret = 0; @@ -4535,19 +4532,7 @@ got_allocated_blocks: */ reserved_clusters = get_reserved_cluster_alloc(inode, map->m_lblk, allocated); - if (map_from_cluster) { - if (reserved_clusters) { - /* - * We have clusters reserved for this range. - * But since we are not doing actual allocation - * and are simply using blocks from previously - * allocated cluster, we should release the - * reservation and not claim quota. - */ - ext4_da_update_reserve_space(inode, - reserved_clusters, 0); - } - } else { + if (!map_from_cluster) { BUG_ON(allocated_clusters < reserved_clusters); if (reserved_clusters < allocated_clusters) { struct ext4_inode_info *ei = EXT4_I(inode); @@ -4803,12 +4788,6 @@ static long ext4_zero_range(struct file *file, loff_t offset, else max_blocks -= lblk; - flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT | - EXT4_GET_BLOCKS_CONVERT_UNWRITTEN | - EXT4_EX_NOCACHE; - if (mode & FALLOC_FL_KEEP_SIZE) - flags |= EXT4_GET_BLOCKS_KEEP_SIZE; - mutex_lock(&inode->i_mutex); /* @@ -4825,15 +4804,28 @@ static long ext4_zero_range(struct file *file, loff_t offset, ret = inode_newsize_ok(inode, new_size); if (ret) goto out_mutex; - /* - * If we have a partial block after EOF we have to allocate - * the entire block. - */ - if (partial_end) - max_blocks += 1; } + flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT; + if (mode & FALLOC_FL_KEEP_SIZE) + flags |= EXT4_GET_BLOCKS_KEEP_SIZE; + + /* Preallocate the range including the unaligned edges */ + if (partial_begin || partial_end) { + ret = ext4_alloc_file_blocks(file, + round_down(offset, 1 << blkbits) >> blkbits, + (round_up((offset + len), 1 << blkbits) - + round_down(offset, 1 << blkbits)) >> blkbits, + new_size, flags, mode); + if (ret) + goto out_mutex; + + } + + /* Zero range excluding the unaligned edges */ if (max_blocks > 0) { + flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN | + EXT4_EX_NOCACHE); /* Now release the pages and zero block aligned part of pages*/ truncate_pagecache_range(inode, start, end - 1); @@ -4847,19 +4839,6 @@ static long ext4_zero_range(struct file *file, loff_t offset, flags, mode); if (ret) goto out_dio; - /* - * Remove entire range from the extent status tree. - * - * ext4_es_remove_extent(inode, lblk, max_blocks) is - * NOT sufficient. I'm not sure why this is the case, - * but let's be conservative and remove the extent - * status tree for the entire inode. There should be - * no outstanding delalloc extents thanks to the - * filemap_write_and_wait_range() call above. - */ - ret = ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS); - if (ret) - goto out_dio; } if (!partial_begin && !partial_end) goto out_dio; @@ -4922,6 +4901,20 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) ext4_lblk_t lblk; unsigned int blkbits = inode->i_blkbits; + /* + * Encrypted inodes can't handle collapse range or insert + * range since we would need to re-encrypt blocks with a + * different IV or XTS tweak (which are based on the logical + * block number). + * + * XXX It's not clear why zero range isn't working, but we'll + * leave it disabled for encrypted inodes for now. This is a + * bug we should fix.... + */ + if (ext4_encrypted_inode(inode) && + (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE))) + return -EOPNOTSUPP; + /* Return error if mode is not supported */ if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE)) diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c index e04d457..d33d5a68 100644 --- a/fs/ext4/extents_status.c +++ b/fs/ext4/extents_status.c @@ -9,12 +9,10 @@ * * Ext4 extents status tree core functions. */ -#include <linux/rbtree.h> #include <linux/list_sort.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include "ext4.h" -#include "extents_status.h" #include <trace/events/ext4.h> diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 33a09da..0613c25 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -20,12 +20,11 @@ #include <linux/time.h> #include <linux/fs.h> -#include <linux/jbd2.h> #include <linux/mount.h> #include <linux/path.h> -#include <linux/aio.h> #include <linux/quotaops.h> #include <linux/pagevec.h> +#include <linux/uio.h> #include "ext4.h" #include "ext4_jbd2.h" #include "xattr.h" @@ -95,11 +94,9 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from) struct inode *inode = file_inode(iocb->ki_filp); struct mutex *aio_mutex = NULL; struct blk_plug plug; - int o_direct = io_is_direct(file); + int o_direct = iocb->ki_flags & IOCB_DIRECT; int overwrite = 0; - size_t length = iov_iter_count(from); ssize_t ret; - loff_t pos = iocb->ki_pos; /* * Unaligned direct AIO must be serialized; see comment above @@ -108,16 +105,17 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from) if (o_direct && ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) && !is_sync_kiocb(iocb) && - (file->f_flags & O_APPEND || - ext4_unaligned_aio(inode, from, pos))) { + (iocb->ki_flags & IOCB_APPEND || + ext4_unaligned_aio(inode, from, iocb->ki_pos))) { aio_mutex = ext4_aio_mutex(inode); mutex_lock(aio_mutex); ext4_unwritten_wait(inode); } mutex_lock(&inode->i_mutex); - if (file->f_flags & O_APPEND) - iocb->ki_pos = pos = i_size_read(inode); + ret = generic_write_checks(iocb, from); + if (ret <= 0) + goto out; /* * If we have encountered a bitmap-format file, the size limit @@ -126,22 +124,19 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from) if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); - if ((pos > sbi->s_bitmap_maxbytes) || - (pos == sbi->s_bitmap_maxbytes && length > 0)) { - mutex_unlock(&inode->i_mutex); + if (iocb->ki_pos >= sbi->s_bitmap_maxbytes) { ret = -EFBIG; - goto errout; + goto out; } - - if (pos + length > sbi->s_bitmap_maxbytes) - iov_iter_truncate(from, sbi->s_bitmap_maxbytes - pos); + iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos); } iocb->private = &overwrite; if (o_direct) { + size_t length = iov_iter_count(from); + loff_t pos = iocb->ki_pos; blk_start_plug(&plug); - /* check whether we do a DIO overwrite or not */ if (ext4_should_dioread_nolock(inode) && !aio_mutex && !file->f_mapping->nrpages && pos + length <= i_size_read(inode)) { @@ -185,7 +180,12 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from) if (o_direct) blk_finish_plug(&plug); -errout: + if (aio_mutex) + mutex_unlock(aio_mutex); + return ret; + +out: + mutex_unlock(&inode->i_mutex); if (aio_mutex) mutex_unlock(aio_mutex); return ret; @@ -206,6 +206,7 @@ static int ext4_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) static const struct vm_operations_struct ext4_dax_vm_ops = { .fault = ext4_dax_fault, .page_mkwrite = ext4_dax_mkwrite, + .pfn_mkwrite = dax_pfn_mkwrite, }; #else #define ext4_dax_vm_ops ext4_file_vm_ops @@ -219,6 +220,13 @@ static const struct vm_operations_struct ext4_file_vm_ops = { static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma) { + struct inode *inode = file->f_mapping->host; + + if (ext4_encrypted_inode(inode)) { + int err = ext4_generate_encryption_key(inode); + if (err) + return 0; + } file_accessed(file); if (IS_DAX(file_inode(file))) { vma->vm_ops = &ext4_dax_vm_ops; @@ -236,6 +244,7 @@ static int ext4_file_open(struct inode * inode, struct file * filp) struct vfsmount *mnt = filp->f_path.mnt; struct path path; char buf[64], *cp; + int ret; if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) && !(sb->s_flags & MS_RDONLY))) { @@ -274,11 +283,17 @@ static int ext4_file_open(struct inode * inode, struct file * filp) * writing and the journal is present */ if (filp->f_mode & FMODE_WRITE) { - int ret = ext4_inode_attach_jinode(inode); + ret = ext4_inode_attach_jinode(inode); if (ret < 0) return ret; } - return dquot_file_open(inode, filp); + ret = dquot_file_open(inode, filp); + if (!ret && ext4_encrypted_inode(inode)) { + ret = ext4_generate_encryption_key(inode); + if (ret) + ret = -EACCES; + } + return ret; } /* @@ -607,8 +622,6 @@ loff_t ext4_llseek(struct file *file, loff_t offset, int whence) const struct file_operations ext4_file_operations = { .llseek = ext4_llseek, - .read = new_sync_read, - .write = new_sync_write, .read_iter = generic_file_read_iter, .write_iter = ext4_file_write_iter, .unlocked_ioctl = ext4_ioctl, @@ -624,26 +637,6 @@ const struct file_operations ext4_file_operations = { .fallocate = ext4_fallocate, }; -#ifdef CONFIG_FS_DAX -const struct file_operations ext4_dax_file_operations = { - .llseek = ext4_llseek, - .read = new_sync_read, - .write = new_sync_write, - .read_iter = generic_file_read_iter, - .write_iter = ext4_file_write_iter, - .unlocked_ioctl = ext4_ioctl, -#ifdef CONFIG_COMPAT - .compat_ioctl = ext4_compat_ioctl, -#endif - .mmap = ext4_file_mmap, - .open = ext4_file_open, - .release = ext4_release_file, - .fsync = ext4_sync_file, - /* Splice not yet supported with DAX */ - .fallocate = ext4_fallocate, -}; -#endif - const struct inode_operations ext4_file_inode_operations = { .setattr = ext4_setattr, .getattr = ext4_getattr, diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c index a8bc47f..8850254 100644 --- a/fs/ext4/fsync.c +++ b/fs/ext4/fsync.c @@ -26,7 +26,6 @@ #include <linux/fs.h> #include <linux/sched.h> #include <linux/writeback.h> -#include <linux/jbd2.h> #include <linux/blkdev.h> #include "ext4.h" @@ -56,7 +55,7 @@ static int ext4_sync_parent(struct inode *inode) dentry = d_find_any_alias(inode); if (!dentry) break; - next = igrab(dentry->d_parent->d_inode); + next = igrab(d_inode(dentry->d_parent)); dput(dentry); if (!next) break; diff --git a/fs/ext4/hash.c b/fs/ext4/hash.c index 3d586f0..e026aa9 100644 --- a/fs/ext4/hash.c +++ b/fs/ext4/hash.c @@ -10,7 +10,6 @@ */ #include <linux/fs.h> -#include <linux/jbd2.h> #include <linux/cryptohash.h> #include "ext4.h" diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index ac644c3..1eaa6cb 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -14,7 +14,6 @@ #include <linux/time.h> #include <linux/fs.h> -#include <linux/jbd2.h> #include <linux/stat.h> #include <linux/string.h> #include <linux/quotaops.h> @@ -444,7 +443,7 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent, ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter); if (S_ISDIR(mode) && - ((parent == sb->s_root->d_inode) || + ((parent == d_inode(sb->s_root)) || (ext4_test_inode_flag(parent, EXT4_INODE_TOPDIR)))) { int best_ndir = inodes_per_group; int ret = -1; @@ -997,6 +996,12 @@ got: ei->i_block_group = group; ei->i_last_alloc_group = ~0; + /* If the directory encrypted, then we should encrypt the inode. */ + if ((S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) && + (ext4_encrypted_inode(dir) || + DUMMY_ENCRYPTION_ENABLED(sbi))) + ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT); + ext4_set_inode_flags(inode); if (IS_DIRSYNC(inode)) ext4_handle_sync(handle); @@ -1029,11 +1034,28 @@ got: ext4_set_inode_state(inode, EXT4_STATE_NEW); ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize; - +#ifdef CONFIG_EXT4_FS_ENCRYPTION + if ((sbi->s_file_encryption_mode == EXT4_ENCRYPTION_MODE_INVALID) && + (sbi->s_dir_encryption_mode == EXT4_ENCRYPTION_MODE_INVALID)) { + ei->i_inline_off = 0; + if (EXT4_HAS_INCOMPAT_FEATURE(sb, + EXT4_FEATURE_INCOMPAT_INLINE_DATA)) + ext4_set_inode_state(inode, + EXT4_STATE_MAY_INLINE_DATA); + } else { + /* Inline data and encryption are incompatible + * We turn off inline data since encryption is enabled */ + ei->i_inline_off = 1; + if (EXT4_HAS_INCOMPAT_FEATURE(sb, + EXT4_FEATURE_INCOMPAT_INLINE_DATA)) + ext4_clear_inode_state(inode, + EXT4_STATE_MAY_INLINE_DATA); + } +#else ei->i_inline_off = 0; if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_INLINE_DATA)) ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA); - +#endif ret = inode; err = dquot_alloc_inode(inode); if (err) diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c index 45fe924..9588240 100644 --- a/fs/ext4/indirect.c +++ b/fs/ext4/indirect.c @@ -20,9 +20,9 @@ * (sct@redhat.com), 1993, 1998 */ -#include <linux/aio.h> #include "ext4_jbd2.h" #include "truncate.h" +#include <linux/uio.h> #include <trace/events/ext4.h> @@ -642,8 +642,8 @@ out: * crashes then stale disk data _may_ be exposed inside the file. But current * VFS code falls back into buffered path in that case so we are safe. */ -ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb, - struct iov_iter *iter, loff_t offset) +ssize_t ext4_ind_direct_IO(struct kiocb *iocb, struct iov_iter *iter, + loff_t offset) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; @@ -654,7 +654,7 @@ ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb, size_t count = iov_iter_count(iter); int retries = 0; - if (rw == WRITE) { + if (iov_iter_rw(iter) == WRITE) { loff_t final_size = offset + count; if (final_size > inode->i_size) { @@ -676,37 +676,38 @@ ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb, } retry: - if (rw == READ && ext4_should_dioread_nolock(inode)) { + if (iov_iter_rw(iter) == READ && ext4_should_dioread_nolock(inode)) { /* * Nolock dioread optimization may be dynamically disabled * via ext4_inode_block_unlocked_dio(). Check inode's state * while holding extra i_dio_count ref. */ - atomic_inc(&inode->i_dio_count); + inode_dio_begin(inode); smp_mb(); if (unlikely(ext4_test_inode_state(inode, EXT4_STATE_DIOREAD_LOCK))) { - inode_dio_done(inode); + inode_dio_end(inode); goto locked; } if (IS_DAX(inode)) - ret = dax_do_io(rw, iocb, inode, iter, offset, + ret = dax_do_io(iocb, inode, iter, offset, ext4_get_block, NULL, 0); else - ret = __blockdev_direct_IO(rw, iocb, inode, - inode->i_sb->s_bdev, iter, offset, - ext4_get_block, NULL, NULL, 0); - inode_dio_done(inode); + ret = __blockdev_direct_IO(iocb, inode, + inode->i_sb->s_bdev, iter, + offset, ext4_get_block, NULL, + NULL, 0); + inode_dio_end(inode); } else { locked: if (IS_DAX(inode)) - ret = dax_do_io(rw, iocb, inode, iter, offset, + ret = dax_do_io(iocb, inode, iter, offset, ext4_get_block, NULL, DIO_LOCKING); else - ret = blockdev_direct_IO(rw, iocb, inode, iter, - offset, ext4_get_block); + ret = blockdev_direct_IO(iocb, inode, iter, offset, + ext4_get_block); - if (unlikely((rw & WRITE) && ret < 0)) { + if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) { loff_t isize = i_size_read(inode); loff_t end = offset + count; diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index 4b143fe..095c7a2 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -11,11 +11,13 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ + +#include <linux/fiemap.h> + #include "ext4_jbd2.h" #include "ext4.h" #include "xattr.h" #include "truncate.h" -#include <linux/fiemap.h> #define EXT4_XATTR_SYSTEM_DATA "data" #define EXT4_MIN_INLINE_DATA_SIZE ((sizeof(__le32) * EXT4_N_BLOCKS)) @@ -972,7 +974,7 @@ void ext4_show_inline_dir(struct inode *dir, struct buffer_head *bh, offset = 0; while ((void *)de < dlimit) { de_len = ext4_rec_len_from_disk(de->rec_len, inline_size); - trace_printk("de: off %u rlen %u name %*.s nlen %u ino %u\n", + trace_printk("de: off %u rlen %u name %.*s nlen %u ino %u\n", offset, de_len, de->name_len, de->name, de->name_len, le32_to_cpu(de->inode)); if (ext4_check_dir_entry(dir, NULL, de, bh, @@ -998,7 +1000,7 @@ static int ext4_add_dirent_to_inline(handle_t *handle, struct ext4_iloc *iloc, void *inline_start, int inline_size) { - struct inode *dir = dentry->d_parent->d_inode; + struct inode *dir = d_inode(dentry->d_parent); const char *name = dentry->d_name.name; int namelen = dentry->d_name.len; int err; @@ -1014,7 +1016,8 @@ static int ext4_add_dirent_to_inline(handle_t *handle, err = ext4_journal_get_write_access(handle, iloc->bh); if (err) return err; - ext4_insert_dentry(inode, de, inline_size, name, namelen); + ext4_insert_dentry(dir, inode, de, inline_size, &dentry->d_name, + name, namelen); ext4_show_inline_dir(dir, iloc->bh, inline_start, inline_size); @@ -1251,7 +1254,7 @@ int ext4_try_add_inline_entry(handle_t *handle, struct dentry *dentry, int ret, inline_size; void *inline_start; struct ext4_iloc iloc; - struct inode *dir = dentry->d_parent->d_inode; + struct inode *dir = d_inode(dentry->d_parent); ret = ext4_get_inode_loc(dir, &iloc); if (ret) @@ -1327,6 +1330,7 @@ int htree_inlinedir_to_tree(struct file *dir_file, struct ext4_iloc iloc; void *dir_buf = NULL; struct ext4_dir_entry_2 fake; + struct ext4_str tmp_str; ret = ext4_get_inode_loc(inode, &iloc); if (ret) @@ -1398,8 +1402,10 @@ int htree_inlinedir_to_tree(struct file *dir_file, continue; if (de->inode == 0) continue; - err = ext4_htree_store_dirent(dir_file, - hinfo->hash, hinfo->minor_hash, de); + tmp_str.name = de->name; + tmp_str.len = de->name_len; + err = ext4_htree_store_dirent(dir_file, hinfo->hash, + hinfo->minor_hash, de, &tmp_str); if (err) { count = err; goto out; diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 5cb9a21..cbd0654 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -20,7 +20,6 @@ #include <linux/fs.h> #include <linux/time.h> -#include <linux/jbd2.h> #include <linux/highuid.h> #include <linux/pagemap.h> #include <linux/quotaops.h> @@ -36,8 +35,6 @@ #include <linux/kernel.h> #include <linux/printk.h> #include <linux/slab.h> -#include <linux/ratelimit.h> -#include <linux/aio.h> #include <linux/bitops.h> #include "ext4_jbd2.h" @@ -141,7 +138,7 @@ static int ext4_meta_trans_blocks(struct inode *inode, int lblocks, /* * Test whether an inode is a fast symlink. */ -static int ext4_inode_is_fast_symlink(struct inode *inode) +int ext4_inode_is_fast_symlink(struct inode *inode) { int ea_blocks = EXT4_I(inode)->i_file_acl ? EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0; @@ -888,6 +885,95 @@ int do_journal_get_write_access(handle_t *handle, static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create); + +#ifdef CONFIG_EXT4_FS_ENCRYPTION +static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len, + get_block_t *get_block) +{ + unsigned from = pos & (PAGE_CACHE_SIZE - 1); + unsigned to = from + len; + struct inode *inode = page->mapping->host; + unsigned block_start, block_end; + sector_t block; + int err = 0; + unsigned blocksize = inode->i_sb->s_blocksize; + unsigned bbits; + struct buffer_head *bh, *head, *wait[2], **wait_bh = wait; + bool decrypt = false; + + BUG_ON(!PageLocked(page)); + BUG_ON(from > PAGE_CACHE_SIZE); + BUG_ON(to > PAGE_CACHE_SIZE); + BUG_ON(from > to); + + if (!page_has_buffers(page)) + create_empty_buffers(page, blocksize, 0); + head = page_buffers(page); + bbits = ilog2(blocksize); + block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); + + for (bh = head, block_start = 0; bh != head || !block_start; + block++, block_start = block_end, bh = bh->b_this_page) { + block_end = block_start + blocksize; + if (block_end <= from || block_start >= to) { + if (PageUptodate(page)) { + if (!buffer_uptodate(bh)) + set_buffer_uptodate(bh); + } + continue; + } + if (buffer_new(bh)) + clear_buffer_new(bh); + if (!buffer_mapped(bh)) { + WARN_ON(bh->b_size != blocksize); + err = get_block(inode, block, bh, 1); + if (err) + break; + if (buffer_new(bh)) { + unmap_underlying_metadata(bh->b_bdev, + bh->b_blocknr); + if (PageUptodate(page)) { + clear_buffer_new(bh); + set_buffer_uptodate(bh); + mark_buffer_dirty(bh); + continue; + } + if (block_end > to || block_start < from) + zero_user_segments(page, to, block_end, + block_start, from); + continue; + } + } + if (PageUptodate(page)) { + if (!buffer_uptodate(bh)) + set_buffer_uptodate(bh); + continue; + } + if (!buffer_uptodate(bh) && !buffer_delay(bh) && + !buffer_unwritten(bh) && + (block_start < from || block_end > to)) { + ll_rw_block(READ, 1, &bh); + *wait_bh++ = bh; + decrypt = ext4_encrypted_inode(inode) && + S_ISREG(inode->i_mode); + } + } + /* + * If we issued read requests, let them complete. + */ + while (wait_bh > wait) { + wait_on_buffer(*--wait_bh); + if (!buffer_uptodate(*wait_bh)) + err = -EIO; + } + if (unlikely(err)) + page_zero_new_buffers(page, from, to); + else if (decrypt) + err = ext4_decrypt_one(inode, page); + return err; +} +#endif + static int ext4_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) @@ -950,11 +1036,19 @@ retry_journal: /* In case writeback began while the page was unlocked */ wait_for_stable_page(page); +#ifdef CONFIG_EXT4_FS_ENCRYPTION + if (ext4_should_dioread_nolock(inode)) + ret = ext4_block_write_begin(page, pos, len, + ext4_get_block_write); + else + ret = ext4_block_write_begin(page, pos, len, + ext4_get_block); +#else if (ext4_should_dioread_nolock(inode)) ret = __block_write_begin(page, pos, len, ext4_get_block_write); else ret = __block_write_begin(page, pos, len, ext4_get_block); - +#endif if (!ret && ext4_should_journal_data(inode)) { ret = ext4_walk_page_buffers(handle, page_buffers(page), from, to, NULL, @@ -2576,7 +2670,12 @@ retry_journal: /* In case writeback began while the page was unlocked */ wait_for_stable_page(page); +#ifdef CONFIG_EXT4_FS_ENCRYPTION + ret = ext4_block_write_begin(page, pos, len, + ext4_da_get_block_prep); +#else ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep); +#endif if (ret < 0) { unlock_page(page); ext4_journal_stop(handle); @@ -2822,7 +2921,7 @@ static int ext4_readpage(struct file *file, struct page *page) ret = ext4_readpage_inline(inode, page); if (ret == -EAGAIN) - return mpage_readpage(page, ext4_get_block); + return ext4_mpage_readpages(page->mapping, NULL, page, 1); return ret; } @@ -2837,7 +2936,7 @@ ext4_readpages(struct file *file, struct address_space *mapping, if (ext4_has_inline_data(inode)) return 0; - return mpage_readpages(mapping, pages, nr_pages, ext4_get_block); + return ext4_mpage_readpages(mapping, pages, NULL, nr_pages); } static void ext4_invalidatepage(struct page *page, unsigned int offset, @@ -2953,8 +3052,8 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, * if the machine crashes during the write. * */ -static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, - struct iov_iter *iter, loff_t offset) +static ssize_t ext4_ext_direct_IO(struct kiocb *iocb, struct iov_iter *iter, + loff_t offset) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; @@ -2967,8 +3066,8 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, ext4_io_end_t *io_end = NULL; /* Use the old path for reads and writes beyond i_size. */ - if (rw != WRITE || final_size > inode->i_size) - return ext4_ind_direct_IO(rw, iocb, iter, offset); + if (iov_iter_rw(iter) != WRITE || final_size > inode->i_size) + return ext4_ind_direct_IO(iocb, iter, offset); BUG_ON(iocb->private == NULL); @@ -2977,8 +3076,8 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, * conversion. This also disallows race between truncate() and * overwrite DIO as i_dio_count needs to be incremented under i_mutex. */ - if (rw == WRITE) - atomic_inc(&inode->i_dio_count); + if (iov_iter_rw(iter) == WRITE) + inode_dio_begin(inode); /* If we do a overwrite dio, i_mutex locking can be released */ overwrite = *((int *)iocb->private); @@ -3034,11 +3133,14 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, get_block_func = ext4_get_block_write; dio_flags = DIO_LOCKING; } +#ifdef CONFIG_EXT4_FS_ENCRYPTION + BUG_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)); +#endif if (IS_DAX(inode)) - ret = dax_do_io(rw, iocb, inode, iter, offset, get_block_func, + ret = dax_do_io(iocb, inode, iter, offset, get_block_func, ext4_end_io_dio, dio_flags); else - ret = __blockdev_direct_IO(rw, iocb, inode, + ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter, offset, get_block_func, ext4_end_io_dio, NULL, dio_flags); @@ -3079,8 +3181,8 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, } retake_lock: - if (rw == WRITE) - inode_dio_done(inode); + if (iov_iter_rw(iter) == WRITE) + inode_dio_end(inode); /* take i_mutex locking again if we do a ovewrite dio */ if (overwrite) { up_read(&EXT4_I(inode)->i_data_sem); @@ -3090,14 +3192,19 @@ retake_lock: return ret; } -static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb, - struct iov_iter *iter, loff_t offset) +static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter, + loff_t offset) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; size_t count = iov_iter_count(iter); ssize_t ret; +#ifdef CONFIG_EXT4_FS_ENCRYPTION + if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)) + return 0; +#endif + /* * If we are doing data journalling we don't support O_DIRECT */ @@ -3108,12 +3215,12 @@ static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb, if (ext4_has_inline_data(inode)) return 0; - trace_ext4_direct_IO_enter(inode, offset, count, rw); + trace_ext4_direct_IO_enter(inode, offset, count, iov_iter_rw(iter)); if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) - ret = ext4_ext_direct_IO(rw, iocb, iter, offset); + ret = ext4_ext_direct_IO(iocb, iter, offset); else - ret = ext4_ind_direct_IO(rw, iocb, iter, offset); - trace_ext4_direct_IO_exit(inode, offset, count, rw, ret); + ret = ext4_ind_direct_IO(iocb, iter, offset); + trace_ext4_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), ret); return ret; } @@ -3262,6 +3369,13 @@ static int __ext4_block_zero_page_range(handle_t *handle, /* Uhhuh. Read error. Complain and punt. */ if (!buffer_uptodate(bh)) goto unlock; + if (S_ISREG(inode->i_mode) && + ext4_encrypted_inode(inode)) { + /* We expect the key to be set. */ + BUG_ON(!ext4_has_encryption_key(inode)); + BUG_ON(blocksize != PAGE_CACHE_SIZE); + WARN_ON_ONCE(ext4_decrypt_one(inode, page)); + } } if (ext4_should_journal_data(inode)) { BUFFER_TRACE(bh, "get write access"); @@ -4091,16 +4205,14 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) if (S_ISREG(inode->i_mode)) { inode->i_op = &ext4_file_inode_operations; - if (test_opt(inode->i_sb, DAX)) - inode->i_fop = &ext4_dax_file_operations; - else - inode->i_fop = &ext4_file_operations; + inode->i_fop = &ext4_file_operations; ext4_set_aops(inode); } else if (S_ISDIR(inode->i_mode)) { inode->i_op = &ext4_dir_inode_operations; inode->i_fop = &ext4_dir_operations; } else if (S_ISLNK(inode->i_mode)) { - if (ext4_inode_is_fast_symlink(inode)) { + if (ext4_inode_is_fast_symlink(inode) && + !ext4_encrypted_inode(inode)) { inode->i_op = &ext4_fast_symlink_inode_operations; nd_terminate_link(ei->i_data, inode->i_size, sizeof(ei->i_data) - 1); @@ -4525,7 +4637,7 @@ static void ext4_wait_for_tail_page_commit(struct inode *inode) */ int ext4_setattr(struct dentry *dentry, struct iattr *attr) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); int error, rc = 0; int orphan = 0; const unsigned int ia_valid = attr->ia_valid; @@ -4673,7 +4785,7 @@ int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, struct inode *inode; unsigned long long delalloc_blocks; - inode = dentry->d_inode; + inode = d_inode(dentry); generic_fillattr(inode, stat); /* diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index f58a0d1..2cb9e17 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -8,12 +8,12 @@ */ #include <linux/fs.h> -#include <linux/jbd2.h> #include <linux/capability.h> #include <linux/time.h> #include <linux/compat.h> #include <linux/mount.h> #include <linux/file.h> +#include <linux/random.h> #include <asm/uaccess.h> #include "ext4_jbd2.h" #include "ext4.h" @@ -196,6 +196,16 @@ journal_err_out: return err; } +static int uuid_is_zero(__u8 u[16]) +{ + int i; + + for (i = 0; i < 16; i++) + if (u[i]) + return 0; + return 1; +} + long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = file_inode(filp); @@ -615,7 +625,78 @@ resizefs_out: } case EXT4_IOC_PRECACHE_EXTENTS: return ext4_ext_precache(inode); + case EXT4_IOC_SET_ENCRYPTION_POLICY: { +#ifdef CONFIG_EXT4_FS_ENCRYPTION + struct ext4_encryption_policy policy; + int err = 0; + + if (copy_from_user(&policy, + (struct ext4_encryption_policy __user *)arg, + sizeof(policy))) { + err = -EFAULT; + goto encryption_policy_out; + } + err = ext4_process_policy(&policy, inode); +encryption_policy_out: + return err; +#else + return -EOPNOTSUPP; +#endif + } + case EXT4_IOC_GET_ENCRYPTION_PWSALT: { + int err, err2; + struct ext4_sb_info *sbi = EXT4_SB(sb); + handle_t *handle; + + if (!ext4_sb_has_crypto(sb)) + return -EOPNOTSUPP; + if (uuid_is_zero(sbi->s_es->s_encrypt_pw_salt)) { + err = mnt_want_write_file(filp); + if (err) + return err; + handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1); + if (IS_ERR(handle)) { + err = PTR_ERR(handle); + goto pwsalt_err_exit; + } + err = ext4_journal_get_write_access(handle, sbi->s_sbh); + if (err) + goto pwsalt_err_journal; + generate_random_uuid(sbi->s_es->s_encrypt_pw_salt); + err = ext4_handle_dirty_metadata(handle, NULL, + sbi->s_sbh); + pwsalt_err_journal: + err2 = ext4_journal_stop(handle); + if (err2 && !err) + err = err2; + pwsalt_err_exit: + mnt_drop_write_file(filp); + if (err) + return err; + } + if (copy_to_user((void *) arg, sbi->s_es->s_encrypt_pw_salt, + 16)) + return -EFAULT; + return 0; + } + case EXT4_IOC_GET_ENCRYPTION_POLICY: { +#ifdef CONFIG_EXT4_FS_ENCRYPTION + struct ext4_encryption_policy policy; + int err = 0; + + if (!ext4_encrypted_inode(inode)) + return -ENOENT; + err = ext4_get_policy(inode, &policy); + if (err) + return err; + if (copy_to_user((void *)arg, &policy, sizeof(policy))) + return -EFAULT; + return 0; +#else + return -EOPNOTSUPP; +#endif + } default: return -ENOTTY; } @@ -680,6 +761,9 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case FITRIM: case EXT4_IOC_RESIZE_FS: case EXT4_IOC_PRECACHE_EXTENTS: + case EXT4_IOC_SET_ENCRYPTION_POLICY: + case EXT4_IOC_GET_ENCRYPTION_PWSALT: + case EXT4_IOC_GET_ENCRYPTION_POLICY: break; default: return -ENOIOCTLCMD; diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c index 3cb267a..b52374e 100644 --- a/fs/ext4/migrate.c +++ b/fs/ext4/migrate.c @@ -475,7 +475,7 @@ int ext4_ext_migrate(struct inode *inode) EXT4_INODES_PER_GROUP(inode->i_sb)) + 1; owner[0] = i_uid_read(inode); owner[1] = i_gid_read(inode); - tmp_inode = ext4_new_inode(handle, inode->i_sb->s_root->d_inode, + tmp_inode = ext4_new_inode(handle, d_inode(inode->i_sb->s_root), S_IFREG, NULL, goal, owner); if (IS_ERR(tmp_inode)) { retval = PTR_ERR(tmp_inode); diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 28fe71a..7223b0b 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -26,7 +26,6 @@ #include <linux/fs.h> #include <linux/pagemap.h> -#include <linux/jbd2.h> #include <linux/time.h> #include <linux/fcntl.h> #include <linux/stat.h> @@ -254,8 +253,9 @@ static struct dx_frame *dx_probe(const struct qstr *d_name, struct dx_hash_info *hinfo, struct dx_frame *frame); static void dx_release(struct dx_frame *frames); -static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize, - struct dx_hash_info *hinfo, struct dx_map_entry map[]); +static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de, + unsigned blocksize, struct dx_hash_info *hinfo, + struct dx_map_entry map[]); static void dx_sort_map(struct dx_map_entry *map, unsigned count); static struct ext4_dir_entry_2 *dx_move_dirents(char *from, char *to, struct dx_map_entry *offsets, int count, unsigned blocksize); @@ -586,8 +586,10 @@ struct stats unsigned bcount; }; -static struct stats dx_show_leaf(struct dx_hash_info *hinfo, struct ext4_dir_entry_2 *de, - int size, int show_names) +static struct stats dx_show_leaf(struct inode *dir, + struct dx_hash_info *hinfo, + struct ext4_dir_entry_2 *de, + int size, int show_names) { unsigned names = 0, space = 0; char *base = (char *) de; @@ -600,12 +602,80 @@ static struct stats dx_show_leaf(struct dx_hash_info *hinfo, struct ext4_dir_ent { if (show_names) { +#ifdef CONFIG_EXT4_FS_ENCRYPTION + int len; + char *name; + struct ext4_str fname_crypto_str + = {.name = NULL, .len = 0}; + struct ext4_fname_crypto_ctx *ctx = NULL; + int res; + + name = de->name; + len = de->name_len; + ctx = ext4_get_fname_crypto_ctx(dir, + EXT4_NAME_LEN); + if (IS_ERR(ctx)) { + printk(KERN_WARNING "Error acquiring" + " crypto ctxt--skipping crypto\n"); + ctx = NULL; + } + if (ctx == NULL) { + /* Directory is not encrypted */ + ext4fs_dirhash(de->name, + de->name_len, &h); + printk("%*.s:(U)%x.%u ", len, + name, h.hash, + (unsigned) ((char *) de + - base)); + } else { + /* Directory is encrypted */ + res = ext4_fname_crypto_alloc_buffer( + ctx, de->name_len, + &fname_crypto_str); + if (res < 0) { + printk(KERN_WARNING "Error " + "allocating crypto " + "buffer--skipping " + "crypto\n"); + ext4_put_fname_crypto_ctx(&ctx); + ctx = NULL; + } + res = ext4_fname_disk_to_usr(ctx, de, + &fname_crypto_str); + if (res < 0) { + printk(KERN_WARNING "Error " + "converting filename " + "from disk to usr" + "\n"); + name = "??"; + len = 2; + } else { + name = fname_crypto_str.name; + len = fname_crypto_str.len; + } + res = ext4_fname_disk_to_hash(ctx, de, + &h); + if (res < 0) { + printk(KERN_WARNING "Error " + "converting filename " + "from disk to htree" + "\n"); + h.hash = 0xDEADBEEF; + } + printk("%*.s:(E)%x.%u ", len, name, + h.hash, (unsigned) ((char *) de + - base)); + ext4_put_fname_crypto_ctx(&ctx); + ext4_fname_crypto_free_buffer( + &fname_crypto_str); + } +#else int len = de->name_len; char *name = de->name; - while (len--) printk("%c", *name++); ext4fs_dirhash(de->name, de->name_len, &h); - printk(":%x.%u ", h.hash, + printk("%*.s:%x.%u ", len, name, h.hash, (unsigned) ((char *) de - base)); +#endif } space += EXT4_DIR_REC_LEN(de->name_len); names++; @@ -623,7 +693,6 @@ struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir, unsigned count = dx_get_count(entries), names = 0, space = 0, i; unsigned bcount = 0; struct buffer_head *bh; - int err; printk("%i indexed blocks...\n", count); for (i = 0; i < count; i++, entries++) { @@ -637,7 +706,8 @@ struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir, continue; stats = levels? dx_show_entries(hinfo, dir, ((struct dx_node *) bh->b_data)->entries, levels - 1): - dx_show_leaf(hinfo, (struct ext4_dir_entry_2 *) bh->b_data, blocksize, 0); + dx_show_leaf(dir, hinfo, (struct ext4_dir_entry_2 *) + bh->b_data, blocksize, 0); names += stats.names; space += stats.space; bcount += stats.bcount; @@ -687,8 +757,28 @@ dx_probe(const struct qstr *d_name, struct inode *dir, if (hinfo->hash_version <= DX_HASH_TEA) hinfo->hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned; hinfo->seed = EXT4_SB(dir->i_sb)->s_hash_seed; +#ifdef CONFIG_EXT4_FS_ENCRYPTION + if (d_name) { + struct ext4_fname_crypto_ctx *ctx = NULL; + int res; + + /* Check if the directory is encrypted */ + ctx = ext4_get_fname_crypto_ctx(dir, EXT4_NAME_LEN); + if (IS_ERR(ctx)) { + ret_err = ERR_PTR(PTR_ERR(ctx)); + goto fail; + } + res = ext4_fname_usr_to_hash(ctx, d_name, hinfo); + if (res < 0) { + ret_err = ERR_PTR(res); + goto fail; + } + ext4_put_fname_crypto_ctx(&ctx); + } +#else if (d_name) ext4fs_dirhash(d_name->name, d_name->len, hinfo); +#endif hash = hinfo->hash; if (root->info.unused_flags & 1) { @@ -773,6 +863,7 @@ fail: brelse(frame->bh); frame--; } + if (ret_err == ERR_PTR(ERR_BAD_DX_DIR)) ext4_warning(dir->i_sb, "Corrupt dir inode %lu, running e2fsck is " @@ -878,6 +969,8 @@ static int htree_dirblock_to_tree(struct file *dir_file, struct buffer_head *bh; struct ext4_dir_entry_2 *de, *top; int err = 0, count = 0; + struct ext4_fname_crypto_ctx *ctx = NULL; + struct ext4_str fname_crypto_str = {.name = NULL, .len = 0}, tmp_str; dxtrace(printk(KERN_INFO "In htree dirblock_to_tree: block %lu\n", (unsigned long)block)); @@ -889,6 +982,24 @@ static int htree_dirblock_to_tree(struct file *dir_file, top = (struct ext4_dir_entry_2 *) ((char *) de + dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0)); +#ifdef CONFIG_EXT4_FS_ENCRYPTION + /* Check if the directory is encrypted */ + ctx = ext4_get_fname_crypto_ctx(dir, EXT4_NAME_LEN); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + brelse(bh); + return err; + } + if (ctx != NULL) { + err = ext4_fname_crypto_alloc_buffer(ctx, EXT4_NAME_LEN, + &fname_crypto_str); + if (err < 0) { + ext4_put_fname_crypto_ctx(&ctx); + brelse(bh); + return err; + } + } +#endif for (; de < top; de = ext4_next_entry(de, dir->i_sb->s_blocksize)) { if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data, bh->b_size, @@ -897,21 +1008,52 @@ static int htree_dirblock_to_tree(struct file *dir_file, /* silently ignore the rest of the block */ break; } +#ifdef CONFIG_EXT4_FS_ENCRYPTION + err = ext4_fname_disk_to_hash(ctx, de, hinfo); + if (err < 0) { + count = err; + goto errout; + } +#else ext4fs_dirhash(de->name, de->name_len, hinfo); +#endif if ((hinfo->hash < start_hash) || ((hinfo->hash == start_hash) && (hinfo->minor_hash < start_minor_hash))) continue; if (de->inode == 0) continue; - if ((err = ext4_htree_store_dirent(dir_file, - hinfo->hash, hinfo->minor_hash, de)) != 0) { - brelse(bh); - return err; + if (ctx == NULL) { + /* Directory is not encrypted */ + tmp_str.name = de->name; + tmp_str.len = de->name_len; + err = ext4_htree_store_dirent(dir_file, + hinfo->hash, hinfo->minor_hash, de, + &tmp_str); + } else { + /* Directory is encrypted */ + err = ext4_fname_disk_to_usr(ctx, de, + &fname_crypto_str); + if (err < 0) { + count = err; + goto errout; + } + err = ext4_htree_store_dirent(dir_file, + hinfo->hash, hinfo->minor_hash, de, + &fname_crypto_str); + } + if (err != 0) { + count = err; + goto errout; } count++; } +errout: brelse(bh); +#ifdef CONFIG_EXT4_FS_ENCRYPTION + ext4_put_fname_crypto_ctx(&ctx); + ext4_fname_crypto_free_buffer(&fname_crypto_str); +#endif return count; } @@ -935,6 +1077,7 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash, int count = 0; int ret, err; __u32 hashval; + struct ext4_str tmp_str; dxtrace(printk(KERN_DEBUG "In htree_fill_tree, start hash: %x:%x\n", start_hash, start_minor_hash)); @@ -970,14 +1113,22 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash, /* Add '.' and '..' from the htree header */ if (!start_hash && !start_minor_hash) { de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data; - if ((err = ext4_htree_store_dirent(dir_file, 0, 0, de)) != 0) + tmp_str.name = de->name; + tmp_str.len = de->name_len; + err = ext4_htree_store_dirent(dir_file, 0, 0, + de, &tmp_str); + if (err != 0) goto errout; count++; } if (start_hash < 2 || (start_hash ==2 && start_minor_hash==0)) { de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data; de = ext4_next_entry(de, dir->i_sb->s_blocksize); - if ((err = ext4_htree_store_dirent(dir_file, 2, 0, de)) != 0) + tmp_str.name = de->name; + tmp_str.len = de->name_len; + err = ext4_htree_store_dirent(dir_file, 2, 0, + de, &tmp_str); + if (err != 0) goto errout; count++; } @@ -1035,17 +1186,33 @@ static inline int search_dirblock(struct buffer_head *bh, * Create map of hash values, offsets, and sizes, stored at end of block. * Returns number of entries mapped. */ -static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize, - struct dx_hash_info *hinfo, +static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de, + unsigned blocksize, struct dx_hash_info *hinfo, struct dx_map_entry *map_tail) { int count = 0; char *base = (char *) de; struct dx_hash_info h = *hinfo; +#ifdef CONFIG_EXT4_FS_ENCRYPTION + struct ext4_fname_crypto_ctx *ctx = NULL; + int err; + + ctx = ext4_get_fname_crypto_ctx(dir, EXT4_NAME_LEN); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); +#endif while ((char *) de < base + blocksize) { if (de->name_len && de->inode) { +#ifdef CONFIG_EXT4_FS_ENCRYPTION + err = ext4_fname_disk_to_hash(ctx, de, &h); + if (err < 0) { + ext4_put_fname_crypto_ctx(&ctx); + return err; + } +#else ext4fs_dirhash(de->name, de->name_len, &h); +#endif map_tail--; map_tail->hash = h.hash; map_tail->offs = ((char *) de - base)>>2; @@ -1056,6 +1223,9 @@ static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize, /* XXX: do we need to check rec_len == 0 case? -Chris */ de = ext4_next_entry(de, blocksize); } +#ifdef CONFIG_EXT4_FS_ENCRYPTION + ext4_put_fname_crypto_ctx(&ctx); +#endif return count; } @@ -1106,57 +1276,107 @@ static void dx_insert_block(struct dx_frame *frame, u32 hash, ext4_lblk_t block) * `len <= EXT4_NAME_LEN' is guaranteed by caller. * `de != NULL' is guaranteed by caller. */ -static inline int ext4_match (int len, const char * const name, - struct ext4_dir_entry_2 * de) +static inline int ext4_match(struct ext4_fname_crypto_ctx *ctx, + struct ext4_str *fname_crypto_str, + int len, const char * const name, + struct ext4_dir_entry_2 *de) { - if (len != de->name_len) - return 0; + int res; + if (!de->inode) return 0; - return !memcmp(name, de->name, len); + +#ifdef CONFIG_EXT4_FS_ENCRYPTION + if (ctx) { + /* Directory is encrypted */ + res = ext4_fname_disk_to_usr(ctx, de, fname_crypto_str); + if (res < 0) + return res; + if (len != res) + return 0; + res = memcmp(name, fname_crypto_str->name, len); + return (res == 0) ? 1 : 0; + } +#endif + if (len != de->name_len) + return 0; + res = memcmp(name, de->name, len); + return (res == 0) ? 1 : 0; } /* * Returns 0 if not found, -1 on failure, and 1 on success */ -int search_dir(struct buffer_head *bh, - char *search_buf, - int buf_size, - struct inode *dir, - const struct qstr *d_name, - unsigned int offset, - struct ext4_dir_entry_2 **res_dir) +int search_dir(struct buffer_head *bh, char *search_buf, int buf_size, + struct inode *dir, const struct qstr *d_name, + unsigned int offset, struct ext4_dir_entry_2 **res_dir) { struct ext4_dir_entry_2 * de; char * dlimit; int de_len; const char *name = d_name->name; int namelen = d_name->len; + struct ext4_fname_crypto_ctx *ctx = NULL; + struct ext4_str fname_crypto_str = {.name = NULL, .len = 0}; + int res; + + ctx = ext4_get_fname_crypto_ctx(dir, EXT4_NAME_LEN); + if (IS_ERR(ctx)) + return -1; + + if (ctx != NULL) { + /* Allocate buffer to hold maximum name length */ + res = ext4_fname_crypto_alloc_buffer(ctx, EXT4_NAME_LEN, + &fname_crypto_str); + if (res < 0) { + ext4_put_fname_crypto_ctx(&ctx); + return -1; + } + } de = (struct ext4_dir_entry_2 *)search_buf; dlimit = search_buf + buf_size; while ((char *) de < dlimit) { /* this code is executed quadratically often */ /* do minimal checking `by hand' */ + if ((char *) de + de->name_len <= dlimit) { + res = ext4_match(ctx, &fname_crypto_str, namelen, + name, de); + if (res < 0) { + res = -1; + goto return_result; + } + if (res > 0) { + /* found a match - just to be sure, do + * a full check */ + if (ext4_check_dir_entry(dir, NULL, de, bh, + bh->b_data, + bh->b_size, offset)) { + res = -1; + goto return_result; + } + *res_dir = de; + res = 1; + goto return_result; + } - if ((char *) de + namelen <= dlimit && - ext4_match (namelen, name, de)) { - /* found a match - just to be sure, do a full check */ - if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data, - bh->b_size, offset)) - return -1; - *res_dir = de; - return 1; } /* prevent looping on a bad block */ de_len = ext4_rec_len_from_disk(de->rec_len, dir->i_sb->s_blocksize); - if (de_len <= 0) - return -1; + if (de_len <= 0) { + res = -1; + goto return_result; + } offset += de_len; de = (struct ext4_dir_entry_2 *) ((char *) de + de_len); } - return 0; + + res = 0; +return_result: + ext4_put_fname_crypto_ctx(&ctx); + ext4_fname_crypto_free_buffer(&fname_crypto_str); + return res; } static int is_dx_internal_node(struct inode *dir, ext4_lblk_t block, @@ -1345,6 +1565,9 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct q ext4_lblk_t block; int retval; +#ifdef CONFIG_EXT4_FS_ENCRYPTION + *res_dir = NULL; +#endif frame = dx_probe(d_name, dir, &hinfo, frames); if (IS_ERR(frame)) return (struct buffer_head *) frame; @@ -1417,6 +1640,18 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi ino); return ERR_PTR(-EIO); } + if (!IS_ERR(inode) && ext4_encrypted_inode(dir) && + (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || + S_ISLNK(inode->i_mode)) && + !ext4_is_child_context_consistent_with_parent(dir, + inode)) { + iput(inode); + ext4_warning(inode->i_sb, + "Inconsistent encryption contexts: %lu/%lu\n", + (unsigned long) dir->i_ino, + (unsigned long) inode->i_ino); + return ERR_PTR(-EPERM); + } } return d_splice_alias(inode, dentry); } @@ -1429,7 +1664,7 @@ struct dentry *ext4_get_parent(struct dentry *child) struct ext4_dir_entry_2 * de; struct buffer_head *bh; - bh = ext4_find_entry(child->d_inode, &dotdot, &de, NULL); + bh = ext4_find_entry(d_inode(child), &dotdot, &de, NULL); if (IS_ERR(bh)) return (struct dentry *) bh; if (!bh) @@ -1437,13 +1672,13 @@ struct dentry *ext4_get_parent(struct dentry *child) ino = le32_to_cpu(de->inode); brelse(bh); - if (!ext4_valid_inum(child->d_inode->i_sb, ino)) { - EXT4_ERROR_INODE(child->d_inode, + if (!ext4_valid_inum(d_inode(child)->i_sb, ino)) { + EXT4_ERROR_INODE(d_inode(child), "bad parent inode number: %u", ino); return ERR_PTR(-EIO); } - return d_obtain_alias(ext4_iget_normal(child->d_inode->i_sb, ino)); + return d_obtain_alias(ext4_iget_normal(d_inode(child)->i_sb, ino)); } /* @@ -1541,7 +1776,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, /* create map in the end of data2 block */ map = (struct dx_map_entry *) (data2 + blocksize); - count = dx_make_map((struct ext4_dir_entry_2 *) data1, + count = dx_make_map(dir, (struct ext4_dir_entry_2 *) data1, blocksize, hinfo, map); map -= count; dx_sort_map(map, count); @@ -1564,7 +1799,8 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, hash2, split, count-split)); /* Fancy dance to stay within two buffers */ - de2 = dx_move_dirents(data1, data2, map + split, count - split, blocksize); + de2 = dx_move_dirents(data1, data2, map + split, count - split, + blocksize); de = dx_pack_dirents(data1, blocksize); de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) - (char *) de, @@ -1580,8 +1816,10 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, initialize_dirent_tail(t, blocksize); } - dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data1, blocksize, 1)); - dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data2, blocksize, 1)); + dxtrace(dx_show_leaf(dir, hinfo, (struct ext4_dir_entry_2 *) data1, + blocksize, 1)); + dxtrace(dx_show_leaf(dir, hinfo, (struct ext4_dir_entry_2 *) data2, + blocksize, 1)); /* Which block gets the new entry? */ if (hinfo->hash >= hash2) { @@ -1618,15 +1856,48 @@ int ext4_find_dest_de(struct inode *dir, struct inode *inode, int nlen, rlen; unsigned int offset = 0; char *top; + struct ext4_fname_crypto_ctx *ctx = NULL; + struct ext4_str fname_crypto_str = {.name = NULL, .len = 0}; + int res; + + ctx = ext4_get_fname_crypto_ctx(dir, EXT4_NAME_LEN); + if (IS_ERR(ctx)) + return -1; + + if (ctx != NULL) { + /* Calculate record length needed to store the entry */ + res = ext4_fname_crypto_namelen_on_disk(ctx, namelen); + if (res < 0) { + ext4_put_fname_crypto_ctx(&ctx); + return res; + } + reclen = EXT4_DIR_REC_LEN(res); + + /* Allocate buffer to hold maximum name length */ + res = ext4_fname_crypto_alloc_buffer(ctx, EXT4_NAME_LEN, + &fname_crypto_str); + if (res < 0) { + ext4_put_fname_crypto_ctx(&ctx); + return -1; + } + } de = (struct ext4_dir_entry_2 *)buf; top = buf + buf_size - reclen; while ((char *) de <= top) { if (ext4_check_dir_entry(dir, NULL, de, bh, - buf, buf_size, offset)) - return -EIO; - if (ext4_match(namelen, name, de)) - return -EEXIST; + buf, buf_size, offset)) { + res = -EIO; + goto return_result; + } + /* Provide crypto context and crypto buffer to ext4 match */ + res = ext4_match(ctx, &fname_crypto_str, namelen, name, de); + if (res < 0) + goto return_result; + if (res > 0) { + res = -EEXIST; + goto return_result; + } nlen = EXT4_DIR_REC_LEN(de->name_len); rlen = ext4_rec_len_from_disk(de->rec_len, buf_size); if ((de->inode ? rlen - nlen : rlen) >= reclen) @@ -1634,26 +1905,62 @@ int ext4_find_dest_de(struct inode *dir, struct inode *inode, de = (struct ext4_dir_entry_2 *)((char *)de + rlen); offset += rlen; } - if ((char *) de > top) - return -ENOSPC; - *dest_de = de; - return 0; + if ((char *) de > top) + res = -ENOSPC; + else { + *dest_de = de; + res = 0; + } +return_result: + ext4_put_fname_crypto_ctx(&ctx); + ext4_fname_crypto_free_buffer(&fname_crypto_str); + return res; } -void ext4_insert_dentry(struct inode *inode, - struct ext4_dir_entry_2 *de, - int buf_size, - const char *name, int namelen) +int ext4_insert_dentry(struct inode *dir, + struct inode *inode, + struct ext4_dir_entry_2 *de, + int buf_size, + const struct qstr *iname, + const char *name, int namelen) { int nlen, rlen; + struct ext4_fname_crypto_ctx *ctx = NULL; + struct ext4_str fname_crypto_str = {.name = NULL, .len = 0}; + struct ext4_str tmp_str; + int res; + + ctx = ext4_get_fname_crypto_ctx(dir, EXT4_NAME_LEN); + if (IS_ERR(ctx)) + return -EIO; + /* By default, the input name would be written to the disk */ + tmp_str.name = (unsigned char *)name; + tmp_str.len = namelen; + if (ctx != NULL) { + /* Directory is encrypted */ + res = ext4_fname_crypto_alloc_buffer(ctx, EXT4_NAME_LEN, + &fname_crypto_str); + if (res < 0) { + ext4_put_fname_crypto_ctx(&ctx); + return -ENOMEM; + } + res = ext4_fname_usr_to_disk(ctx, iname, &fname_crypto_str); + if (res < 0) { + ext4_put_fname_crypto_ctx(&ctx); + ext4_fname_crypto_free_buffer(&fname_crypto_str); + return res; + } + tmp_str.name = fname_crypto_str.name; + tmp_str.len = fname_crypto_str.len; + } nlen = EXT4_DIR_REC_LEN(de->name_len); rlen = ext4_rec_len_from_disk(de->rec_len, buf_size); if (de->inode) { struct ext4_dir_entry_2 *de1 = - (struct ext4_dir_entry_2 *)((char *)de + nlen); + (struct ext4_dir_entry_2 *)((char *)de + nlen); de1->rec_len = ext4_rec_len_to_disk(rlen - nlen, buf_size); de->rec_len = ext4_rec_len_to_disk(nlen, buf_size); de = de1; @@ -1661,9 +1968,14 @@ void ext4_insert_dentry(struct inode *inode, de->file_type = EXT4_FT_UNKNOWN; de->inode = cpu_to_le32(inode->i_ino); ext4_set_de_type(inode->i_sb, de, inode->i_mode); - de->name_len = namelen; - memcpy(de->name, name, namelen); + de->name_len = tmp_str.len; + + memcpy(de->name, tmp_str.name, tmp_str.len); + ext4_put_fname_crypto_ctx(&ctx); + ext4_fname_crypto_free_buffer(&fname_crypto_str); + return 0; } + /* * Add a new entry into a directory (leaf) block. If de is non-NULL, * it points to a directory entry which is guaranteed to be large @@ -1676,7 +1988,7 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry, struct inode *inode, struct ext4_dir_entry_2 *de, struct buffer_head *bh) { - struct inode *dir = dentry->d_parent->d_inode; + struct inode *dir = d_inode(dentry->d_parent); const char *name = dentry->d_name.name; int namelen = dentry->d_name.len; unsigned int blocksize = dir->i_sb->s_blocksize; @@ -1700,8 +2012,12 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry, return err; } - /* By now the buffer is marked for journaling */ - ext4_insert_dentry(inode, de, blocksize, name, namelen); + /* By now the buffer is marked for journaling. Due to crypto operations, + * the following function call may fail */ + err = ext4_insert_dentry(dir, inode, de, blocksize, &dentry->d_name, + name, namelen); + if (err < 0) + return err; /* * XXX shouldn't update any times until successful @@ -1732,9 +2048,14 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry, static int make_indexed_dir(handle_t *handle, struct dentry *dentry, struct inode *inode, struct buffer_head *bh) { - struct inode *dir = dentry->d_parent->d_inode; + struct inode *dir = d_inode(dentry->d_parent); +#ifdef CONFIG_EXT4_FS_ENCRYPTION + struct ext4_fname_crypto_ctx *ctx = NULL; + int res; +#else const char *name = dentry->d_name.name; int namelen = dentry->d_name.len; +#endif struct buffer_head *bh2; struct dx_root *root; struct dx_frame frames[2], *frame; @@ -1748,7 +2069,13 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry, struct dx_hash_info hinfo; ext4_lblk_t block; struct fake_dirent *fde; - int csum_size = 0; + int csum_size = 0; + +#ifdef CONFIG_EXT4_FS_ENCRYPTION + ctx = ext4_get_fname_crypto_ctx(dir, EXT4_NAME_LEN); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); +#endif if (ext4_has_metadata_csum(inode->i_sb)) csum_size = sizeof(struct ext4_dir_entry_tail); @@ -1815,7 +2142,18 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry, if (hinfo.hash_version <= DX_HASH_TEA) hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned; hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed; +#ifdef CONFIG_EXT4_FS_ENCRYPTION + res = ext4_fname_usr_to_hash(ctx, &dentry->d_name, &hinfo); + if (res < 0) { + ext4_put_fname_crypto_ctx(&ctx); + ext4_mark_inode_dirty(handle, dir); + brelse(bh); + return res; + } + ext4_put_fname_crypto_ctx(&ctx); +#else ext4fs_dirhash(name, namelen, &hinfo); +#endif memset(frames, 0, sizeof(frames)); frame = frames; frame->entries = entries; @@ -1864,8 +2202,8 @@ out_frames: static int ext4_add_entry(handle_t *handle, struct dentry *dentry, struct inode *inode) { - struct inode *dir = dentry->d_parent->d_inode; - struct buffer_head *bh; + struct inode *dir = d_inode(dentry->d_parent); + struct buffer_head *bh = NULL; struct ext4_dir_entry_2 *de; struct ext4_dir_entry_tail *t; struct super_block *sb; @@ -1889,14 +2227,14 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry, return retval; if (retval == 1) { retval = 0; - return retval; + goto out; } } if (is_dx(dir)) { retval = ext4_dx_add_entry(handle, dentry, inode); if (!retval || (retval != ERR_BAD_DX_DIR)) - return retval; + goto out; ext4_clear_inode_flag(dir, EXT4_INODE_INDEX); dx_fallback++; ext4_mark_inode_dirty(handle, dir); @@ -1908,14 +2246,15 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry, return PTR_ERR(bh); retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh); - if (retval != -ENOSPC) { - brelse(bh); - return retval; - } + if (retval != -ENOSPC) + goto out; if (blocks == 1 && !dx_fallback && - EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) - return make_indexed_dir(handle, dentry, inode, bh); + EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) { + retval = make_indexed_dir(handle, dentry, inode, bh); + bh = NULL; /* make_indexed_dir releases bh */ + goto out; + } brelse(bh); } bh = ext4_append(handle, dir, &block); @@ -1931,6 +2270,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry, } retval = add_dirent_to_buf(handle, dentry, inode, de, bh); +out: brelse(bh); if (retval == 0) ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY); @@ -1947,7 +2287,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry, struct dx_entry *entries, *at; struct dx_hash_info hinfo; struct buffer_head *bh; - struct inode *dir = dentry->d_parent->d_inode; + struct inode *dir = d_inode(dentry->d_parent); struct super_block *sb = dir->i_sb; struct ext4_dir_entry_2 *de; int err; @@ -2235,12 +2575,22 @@ retry: err = PTR_ERR(inode); if (!IS_ERR(inode)) { inode->i_op = &ext4_file_inode_operations; - if (test_opt(inode->i_sb, DAX)) - inode->i_fop = &ext4_dax_file_operations; - else - inode->i_fop = &ext4_file_operations; + inode->i_fop = &ext4_file_operations; ext4_set_aops(inode); - err = ext4_add_nondir(handle, dentry, inode); + err = 0; +#ifdef CONFIG_EXT4_FS_ENCRYPTION + if (!err && (ext4_encrypted_inode(dir) || + DUMMY_ENCRYPTION_ENABLED(EXT4_SB(dir->i_sb)))) { + err = ext4_inherit_context(dir, inode); + if (err) { + clear_nlink(inode); + unlock_new_inode(inode); + iput(inode); + } + } +#endif + if (!err) + err = ext4_add_nondir(handle, dentry, inode); if (!err && IS_DIRSYNC(dir)) ext4_handle_sync(handle); } @@ -2302,10 +2652,7 @@ retry: err = PTR_ERR(inode); if (!IS_ERR(inode)) { inode->i_op = &ext4_file_inode_operations; - if (test_opt(inode->i_sb, DAX)) - inode->i_fop = &ext4_dax_file_operations; - else - inode->i_fop = &ext4_file_operations; + inode->i_fop = &ext4_file_operations; ext4_set_aops(inode); d_tmpfile(dentry, inode); err = ext4_orphan_add(handle, inode); @@ -2424,6 +2771,14 @@ retry: err = ext4_init_new_dir(handle, dir, inode); if (err) goto out_clear_inode; +#ifdef CONFIG_EXT4_FS_ENCRYPTION + if (ext4_encrypted_inode(dir) || + DUMMY_ENCRYPTION_ENABLED(EXT4_SB(dir->i_sb))) { + err = ext4_inherit_context(dir, inode); + if (err) + goto out_clear_inode; + } +#endif err = ext4_mark_inode_dirty(handle, inode); if (!err) err = ext4_add_entry(handle, dentry, inode); @@ -2456,7 +2811,7 @@ out_stop: /* * routine to check that the specified directory is empty (for rmdir) */ -static int empty_dir(struct inode *inode) +int ext4_empty_dir(struct inode *inode) { unsigned int offset; struct buffer_head *bh; @@ -2708,7 +3063,7 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry) /* Initialize quotas before so that eventual writes go in * separate transaction */ dquot_initialize(dir); - dquot_initialize(dentry->d_inode); + dquot_initialize(d_inode(dentry)); retval = -ENOENT; bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL); @@ -2717,14 +3072,14 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry) if (!bh) goto end_rmdir; - inode = dentry->d_inode; + inode = d_inode(dentry); retval = -EIO; if (le32_to_cpu(de->inode) != inode->i_ino) goto end_rmdir; retval = -ENOTEMPTY; - if (!empty_dir(inode)) + if (!ext4_empty_dir(inode)) goto end_rmdir; handle = ext4_journal_start(dir, EXT4_HT_DIR, @@ -2777,7 +3132,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry) /* Initialize quotas before so that eventual writes go * in separate transaction */ dquot_initialize(dir); - dquot_initialize(dentry->d_inode); + dquot_initialize(d_inode(dentry)); retval = -ENOENT; bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL); @@ -2786,7 +3141,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry) if (!bh) goto end_unlink; - inode = dentry->d_inode; + inode = d_inode(dentry); retval = -EIO; if (le32_to_cpu(de->inode) != inode->i_ino) @@ -2834,16 +3189,25 @@ static int ext4_symlink(struct inode *dir, { handle_t *handle; struct inode *inode; - int l, err, retries = 0; + int err, len = strlen(symname); int credits; - - l = strlen(symname)+1; - if (l > dir->i_sb->s_blocksize) + bool encryption_required; + struct ext4_str disk_link; + struct ext4_encrypted_symlink_data *sd = NULL; + + disk_link.len = len + 1; + disk_link.name = (char *) symname; + + encryption_required = (ext4_encrypted_inode(dir) || + DUMMY_ENCRYPTION_ENABLED(EXT4_SB(dir->i_sb))); + if (encryption_required) + disk_link.len = encrypted_symlink_data_len(len) + 1; + if (disk_link.len > dir->i_sb->s_blocksize) return -ENAMETOOLONG; dquot_initialize(dir); - if (l > EXT4_N_BLOCKS * 4) { + if ((disk_link.len > EXT4_N_BLOCKS * 4)) { /* * For non-fast symlinks, we just allocate inode and put it on * orphan list in the first transaction => we need bitmap, @@ -2862,16 +3226,49 @@ static int ext4_symlink(struct inode *dir, credits = EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3; } -retry: + inode = ext4_new_inode_start_handle(dir, S_IFLNK|S_IRWXUGO, &dentry->d_name, 0, NULL, EXT4_HT_DIR, credits); handle = ext4_journal_current_handle(); - err = PTR_ERR(inode); - if (IS_ERR(inode)) - goto out_stop; + if (IS_ERR(inode)) { + if (handle) + ext4_journal_stop(handle); + return PTR_ERR(inode); + } + + if (encryption_required) { + struct ext4_fname_crypto_ctx *ctx = NULL; + struct qstr istr; + struct ext4_str ostr; + + sd = kzalloc(disk_link.len, GFP_NOFS); + if (!sd) { + err = -ENOMEM; + goto err_drop_inode; + } + err = ext4_inherit_context(dir, inode); + if (err) + goto err_drop_inode; + ctx = ext4_get_fname_crypto_ctx(inode, + inode->i_sb->s_blocksize); + if (IS_ERR_OR_NULL(ctx)) { + /* We just set the policy, so ctx should not be NULL */ + err = (ctx == NULL) ? -EIO : PTR_ERR(ctx); + goto err_drop_inode; + } + istr.name = (const unsigned char *) symname; + istr.len = len; + ostr.name = sd->encrypted_path; + err = ext4_fname_usr_to_disk(ctx, &istr, &ostr); + ext4_put_fname_crypto_ctx(&ctx); + if (err < 0) + goto err_drop_inode; + sd->len = cpu_to_le16(ostr.len); + disk_link.name = (char *) sd; + } - if (l > EXT4_N_BLOCKS * 4) { + if ((disk_link.len > EXT4_N_BLOCKS * 4)) { inode->i_op = &ext4_symlink_inode_operations; ext4_set_aops(inode); /* @@ -2887,9 +3284,10 @@ retry: drop_nlink(inode); err = ext4_orphan_add(handle, inode); ext4_journal_stop(handle); + handle = NULL; if (err) goto err_drop_inode; - err = __page_symlink(inode, symname, l, 1); + err = __page_symlink(inode, disk_link.name, disk_link.len, 1); if (err) goto err_drop_inode; /* @@ -2901,34 +3299,37 @@ retry: EXT4_INDEX_EXTRA_TRANS_BLOCKS + 1); if (IS_ERR(handle)) { err = PTR_ERR(handle); + handle = NULL; goto err_drop_inode; } set_nlink(inode, 1); err = ext4_orphan_del(handle, inode); - if (err) { - ext4_journal_stop(handle); - clear_nlink(inode); + if (err) goto err_drop_inode; - } } else { /* clear the extent format for fast symlink */ ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS); - inode->i_op = &ext4_fast_symlink_inode_operations; - memcpy((char *)&EXT4_I(inode)->i_data, symname, l); - inode->i_size = l-1; + inode->i_op = encryption_required ? + &ext4_symlink_inode_operations : + &ext4_fast_symlink_inode_operations; + memcpy((char *)&EXT4_I(inode)->i_data, disk_link.name, + disk_link.len); + inode->i_size = disk_link.len - 1; } EXT4_I(inode)->i_disksize = inode->i_size; err = ext4_add_nondir(handle, dentry, inode); if (!err && IS_DIRSYNC(dir)) ext4_handle_sync(handle); -out_stop: if (handle) ext4_journal_stop(handle); - if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries)) - goto retry; + kfree(sd); return err; err_drop_inode: + if (handle) + ext4_journal_stop(handle); + kfree(sd); + clear_nlink(inode); unlock_new_inode(inode); iput(inode); return err; @@ -2938,12 +3339,14 @@ static int ext4_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { handle_t *handle; - struct inode *inode = old_dentry->d_inode; + struct inode *inode = d_inode(old_dentry); int err, retries = 0; if (inode->i_nlink >= EXT4_LINK_MAX) return -EMLINK; - + if (ext4_encrypted_inode(dir) && + !ext4_is_child_context_consistent_with_parent(dir, inode)) + return -EPERM; dquot_initialize(dir); retry: @@ -3210,12 +3613,12 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, struct ext4_renament old = { .dir = old_dir, .dentry = old_dentry, - .inode = old_dentry->d_inode, + .inode = d_inode(old_dentry), }; struct ext4_renament new = { .dir = new_dir, .dentry = new_dentry, - .inode = new_dentry->d_inode, + .inode = d_inode(new_dentry), }; int force_reread; int retval; @@ -3244,6 +3647,14 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, if (!old.bh || le32_to_cpu(old.de->inode) != old.inode->i_ino) goto end_rename; + if ((old.dir != new.dir) && + ext4_encrypted_inode(new.dir) && + !ext4_is_child_context_consistent_with_parent(new.dir, + old.inode)) { + retval = -EPERM; + goto end_rename; + } + new.bh = ext4_find_entry(new.dir, &new.dentry->d_name, &new.de, &new.inlined); if (IS_ERR(new.bh)) { @@ -3264,12 +3675,18 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2); if (!(flags & RENAME_WHITEOUT)) { handle = ext4_journal_start(old.dir, EXT4_HT_DIR, credits); - if (IS_ERR(handle)) - return PTR_ERR(handle); + if (IS_ERR(handle)) { + retval = PTR_ERR(handle); + handle = NULL; + goto end_rename; + } } else { whiteout = ext4_whiteout_for_rename(&old, credits, &handle); - if (IS_ERR(whiteout)) - return PTR_ERR(whiteout); + if (IS_ERR(whiteout)) { + retval = PTR_ERR(whiteout); + whiteout = NULL; + goto end_rename; + } } if (IS_DIRSYNC(old.dir) || IS_DIRSYNC(new.dir)) @@ -3278,7 +3695,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, if (S_ISDIR(old.inode->i_mode)) { if (new.inode) { retval = -ENOTEMPTY; - if (!empty_dir(new.inode)) + if (!ext4_empty_dir(new.inode)) goto end_rename; } else { retval = -EMLINK; @@ -3352,8 +3769,9 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, ext4_dec_count(handle, old.dir); if (new.inode) { - /* checked empty_dir above, can't have another parent, - * ext4_dec_count() won't work for many-linked dirs */ + /* checked ext4_empty_dir above, can't have another + * parent, ext4_dec_count() won't work for many-linked + * dirs */ clear_nlink(new.inode); } else { ext4_inc_count(handle, new.dir); @@ -3391,12 +3809,12 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry, struct ext4_renament old = { .dir = old_dir, .dentry = old_dentry, - .inode = old_dentry->d_inode, + .inode = d_inode(old_dentry), }; struct ext4_renament new = { .dir = new_dir, .dentry = new_dentry, - .inode = new_dentry->d_inode, + .inode = d_inode(new_dentry), }; u8 new_file_type; int retval; @@ -3433,8 +3851,11 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry, handle = ext4_journal_start(old.dir, EXT4_HT_DIR, (2 * EXT4_DATA_TRANS_BLOCKS(old.dir->i_sb) + 2 * EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2)); - if (IS_ERR(handle)) - return PTR_ERR(handle); + if (IS_ERR(handle)) { + retval = PTR_ERR(handle); + handle = NULL; + goto end_rename; + } if (IS_DIRSYNC(old.dir) || IS_DIRSYNC(new.dir)) ext4_handle_sync(handle); diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index b24a254..5765f88 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -8,7 +8,6 @@ #include <linux/fs.h> #include <linux/time.h> -#include <linux/jbd2.h> #include <linux/highuid.h> #include <linux/pagemap.h> #include <linux/quotaops.h> @@ -18,14 +17,12 @@ #include <linux/pagevec.h> #include <linux/mpage.h> #include <linux/namei.h> -#include <linux/aio.h> #include <linux/uio.h> #include <linux/bio.h> #include <linux/workqueue.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/mm.h> -#include <linux/ratelimit.h> #include "ext4_jbd2.h" #include "xattr.h" @@ -69,6 +66,10 @@ static void ext4_finish_bio(struct bio *bio) bio_for_each_segment_all(bvec, bio, i) { struct page *page = bvec->bv_page; +#ifdef CONFIG_EXT4_FS_ENCRYPTION + struct page *data_page = NULL; + struct ext4_crypto_ctx *ctx = NULL; +#endif struct buffer_head *bh, *head; unsigned bio_start = bvec->bv_offset; unsigned bio_end = bio_start + bvec->bv_len; @@ -78,6 +79,15 @@ static void ext4_finish_bio(struct bio *bio) if (!page) continue; +#ifdef CONFIG_EXT4_FS_ENCRYPTION + if (!page->mapping) { + /* The bounce data pages are unmapped. */ + data_page = page; + ctx = (struct ext4_crypto_ctx *)page_private(data_page); + page = ctx->control_page; + } +#endif + if (error) { SetPageError(page); set_bit(AS_EIO, &page->mapping->flags); @@ -102,8 +112,13 @@ static void ext4_finish_bio(struct bio *bio) } while ((bh = bh->b_this_page) != head); bit_spin_unlock(BH_Uptodate_Lock, &head->b_state); local_irq_restore(flags); - if (!under_io) + if (!under_io) { +#ifdef CONFIG_EXT4_FS_ENCRYPTION + if (ctx) + ext4_restore_control_page(data_page); +#endif end_page_writeback(page); + } } } @@ -378,6 +393,7 @@ static int io_submit_init_bio(struct ext4_io_submit *io, static int io_submit_add_bh(struct ext4_io_submit *io, struct inode *inode, + struct page *page, struct buffer_head *bh) { int ret; @@ -391,7 +407,7 @@ submit_and_retry: if (ret) return ret; } - ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh)); + ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh)); if (ret != bh->b_size) goto submit_and_retry; io->io_next_block++; @@ -404,6 +420,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io, struct writeback_control *wbc, bool keep_towrite) { + struct page *data_page = NULL; struct inode *inode = page->mapping->host; unsigned block_start, blocksize; struct buffer_head *bh, *head; @@ -463,19 +480,29 @@ int ext4_bio_write_page(struct ext4_io_submit *io, set_buffer_async_write(bh); } while ((bh = bh->b_this_page) != head); - /* Now submit buffers to write */ bh = head = page_buffers(page); + + if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)) { + data_page = ext4_encrypt(inode, page); + if (IS_ERR(data_page)) { + ret = PTR_ERR(data_page); + data_page = NULL; + goto out; + } + } + + /* Now submit buffers to write */ do { if (!buffer_async_write(bh)) continue; - ret = io_submit_add_bh(io, inode, bh); + ret = io_submit_add_bh(io, inode, + data_page ? data_page : page, bh); if (ret) { /* * We only get here on ENOMEM. Not much else * we can do but mark the page as dirty, and * better luck next time. */ - redirty_page_for_writepage(wbc, page); break; } nr_submitted++; @@ -484,6 +511,11 @@ int ext4_bio_write_page(struct ext4_io_submit *io, /* Error stopped previous loop? Clean up buffers... */ if (ret) { + out: + if (data_page) + ext4_restore_control_page(data_page); + printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret); + redirty_page_for_writepage(wbc, page); do { clear_buffer_async_write(bh); bh = bh->b_this_page; diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c new file mode 100644 index 0000000..171b9ac --- /dev/null +++ b/fs/ext4/readpage.c @@ -0,0 +1,328 @@ +/* + * linux/fs/ext4/readpage.c + * + * Copyright (C) 2002, Linus Torvalds. + * Copyright (C) 2015, Google, Inc. + * + * This was originally taken from fs/mpage.c + * + * The intent is the ext4_mpage_readpages() function here is intended + * to replace mpage_readpages() in the general case, not just for + * encrypted files. It has some limitations (see below), where it + * will fall back to read_block_full_page(), but these limitations + * should only be hit when page_size != block_size. + * + * This will allow us to attach a callback function to support ext4 + * encryption. + * + * If anything unusual happens, such as: + * + * - encountering a page which has buffers + * - encountering a page which has a non-hole after a hole + * - encountering a page with non-contiguous blocks + * + * then this code just gives up and calls the buffer_head-based read function. + * It does handle a page which has holes at the end - that is a common case: + * the end-of-file on blocksize < PAGE_CACHE_SIZE setups. + * + */ + +#include <linux/kernel.h> +#include <linux/export.h> +#include <linux/mm.h> +#include <linux/kdev_t.h> +#include <linux/gfp.h> +#include <linux/bio.h> +#include <linux/fs.h> +#include <linux/buffer_head.h> +#include <linux/blkdev.h> +#include <linux/highmem.h> +#include <linux/prefetch.h> +#include <linux/mpage.h> +#include <linux/writeback.h> +#include <linux/backing-dev.h> +#include <linux/pagevec.h> +#include <linux/cleancache.h> + +#include "ext4.h" + +/* + * Call ext4_decrypt on every single page, reusing the encryption + * context. + */ +static void completion_pages(struct work_struct *work) +{ +#ifdef CONFIG_EXT4_FS_ENCRYPTION + struct ext4_crypto_ctx *ctx = + container_of(work, struct ext4_crypto_ctx, work); + struct bio *bio = ctx->bio; + struct bio_vec *bv; + int i; + + bio_for_each_segment_all(bv, bio, i) { + struct page *page = bv->bv_page; + + int ret = ext4_decrypt(ctx, page); + if (ret) { + WARN_ON_ONCE(1); + SetPageError(page); + } else + SetPageUptodate(page); + unlock_page(page); + } + ext4_release_crypto_ctx(ctx); + bio_put(bio); +#else + BUG(); +#endif +} + +static inline bool ext4_bio_encrypted(struct bio *bio) +{ +#ifdef CONFIG_EXT4_FS_ENCRYPTION + return unlikely(bio->bi_private != NULL); +#else + return false; +#endif +} + +/* + * I/O completion handler for multipage BIOs. + * + * The mpage code never puts partial pages into a BIO (except for end-of-file). + * If a page does not map to a contiguous run of blocks then it simply falls + * back to block_read_full_page(). + * + * Why is this? If a page's completion depends on a number of different BIOs + * which can complete in any order (or at the same time) then determining the + * status of that page is hard. See end_buffer_async_read() for the details. + * There is no point in duplicating all that complexity. + */ +static void mpage_end_io(struct bio *bio, int err) +{ + struct bio_vec *bv; + int i; + + if (ext4_bio_encrypted(bio)) { + struct ext4_crypto_ctx *ctx = bio->bi_private; + + if (err) { + ext4_release_crypto_ctx(ctx); + } else { + INIT_WORK(&ctx->work, completion_pages); + ctx->bio = bio; + queue_work(ext4_read_workqueue, &ctx->work); + return; + } + } + bio_for_each_segment_all(bv, bio, i) { + struct page *page = bv->bv_page; + + if (!err) { + SetPageUptodate(page); + } else { + ClearPageUptodate(page); + SetPageError(page); + } + unlock_page(page); + } + + bio_put(bio); +} + +int ext4_mpage_readpages(struct address_space *mapping, + struct list_head *pages, struct page *page, + unsigned nr_pages) +{ + struct bio *bio = NULL; + unsigned page_idx; + sector_t last_block_in_bio = 0; + + struct inode *inode = mapping->host; + const unsigned blkbits = inode->i_blkbits; + const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits; + const unsigned blocksize = 1 << blkbits; + sector_t block_in_file; + sector_t last_block; + sector_t last_block_in_file; + sector_t blocks[MAX_BUF_PER_PAGE]; + unsigned page_block; + struct block_device *bdev = inode->i_sb->s_bdev; + int length; + unsigned relative_block = 0; + struct ext4_map_blocks map; + + map.m_pblk = 0; + map.m_lblk = 0; + map.m_len = 0; + map.m_flags = 0; + + for (page_idx = 0; nr_pages; page_idx++, nr_pages--) { + int fully_mapped = 1; + unsigned first_hole = blocks_per_page; + + prefetchw(&page->flags); + if (pages) { + page = list_entry(pages->prev, struct page, lru); + list_del(&page->lru); + if (add_to_page_cache_lru(page, mapping, + page->index, GFP_KERNEL)) + goto next_page; + } + + if (page_has_buffers(page)) + goto confused; + + block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); + last_block = block_in_file + nr_pages * blocks_per_page; + last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits; + if (last_block > last_block_in_file) + last_block = last_block_in_file; + page_block = 0; + + /* + * Map blocks using the previous result first. + */ + if ((map.m_flags & EXT4_MAP_MAPPED) && + block_in_file > map.m_lblk && + block_in_file < (map.m_lblk + map.m_len)) { + unsigned map_offset = block_in_file - map.m_lblk; + unsigned last = map.m_len - map_offset; + + for (relative_block = 0; ; relative_block++) { + if (relative_block == last) { + /* needed? */ + map.m_flags &= ~EXT4_MAP_MAPPED; + break; + } + if (page_block == blocks_per_page) + break; + blocks[page_block] = map.m_pblk + map_offset + + relative_block; + page_block++; + block_in_file++; + } + } + + /* + * Then do more ext4_map_blocks() calls until we are + * done with this page. + */ + while (page_block < blocks_per_page) { + if (block_in_file < last_block) { + map.m_lblk = block_in_file; + map.m_len = last_block - block_in_file; + + if (ext4_map_blocks(NULL, inode, &map, 0) < 0) { + set_error_page: + SetPageError(page); + zero_user_segment(page, 0, + PAGE_CACHE_SIZE); + unlock_page(page); + goto next_page; + } + } + if ((map.m_flags & EXT4_MAP_MAPPED) == 0) { + fully_mapped = 0; + if (first_hole == blocks_per_page) + first_hole = page_block; + page_block++; + block_in_file++; + continue; + } + if (first_hole != blocks_per_page) + goto confused; /* hole -> non-hole */ + + /* Contiguous blocks? */ + if (page_block && blocks[page_block-1] != map.m_pblk-1) + goto confused; + for (relative_block = 0; ; relative_block++) { + if (relative_block == map.m_len) { + /* needed? */ + map.m_flags &= ~EXT4_MAP_MAPPED; + break; + } else if (page_block == blocks_per_page) + break; + blocks[page_block] = map.m_pblk+relative_block; + page_block++; + block_in_file++; + } + } + if (first_hole != blocks_per_page) { + zero_user_segment(page, first_hole << blkbits, + PAGE_CACHE_SIZE); + if (first_hole == 0) { + SetPageUptodate(page); + unlock_page(page); + goto next_page; + } + } else if (fully_mapped) { + SetPageMappedToDisk(page); + } + if (fully_mapped && blocks_per_page == 1 && + !PageUptodate(page) && cleancache_get_page(page) == 0) { + SetPageUptodate(page); + goto confused; + } + + /* + * This page will go to BIO. Do we need to send this + * BIO off first? + */ + if (bio && (last_block_in_bio != blocks[0] - 1)) { + submit_and_realloc: + submit_bio(READ, bio); + bio = NULL; + } + if (bio == NULL) { + struct ext4_crypto_ctx *ctx = NULL; + + if (ext4_encrypted_inode(inode) && + S_ISREG(inode->i_mode)) { + ctx = ext4_get_crypto_ctx(inode); + if (IS_ERR(ctx)) + goto set_error_page; + } + bio = bio_alloc(GFP_KERNEL, + min_t(int, nr_pages, bio_get_nr_vecs(bdev))); + if (!bio) { + if (ctx) + ext4_release_crypto_ctx(ctx); + goto set_error_page; + } + bio->bi_bdev = bdev; + bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9); + bio->bi_end_io = mpage_end_io; + bio->bi_private = ctx; + } + + length = first_hole << blkbits; + if (bio_add_page(bio, page, length, 0) < length) + goto submit_and_realloc; + + if (((map.m_flags & EXT4_MAP_BOUNDARY) && + (relative_block == map.m_len)) || + (first_hole != blocks_per_page)) { + submit_bio(READ, bio); + bio = NULL; + } else + last_block_in_bio = blocks[blocks_per_page - 1]; + goto next_page; + confused: + if (bio) { + submit_bio(READ, bio); + bio = NULL; + } + if (!PageUptodate(page)) + block_read_full_page(page, ext4_get_block); + else + unlock_page(page); + next_page: + if (pages) + page_cache_release(page); + } + BUG_ON(pages && !list_empty(pages)); + if (bio) + submit_bio(READ, bio); + return 0; +} diff --git a/fs/ext4/super.c b/fs/ext4/super.c index e061e66..f06d058 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -21,7 +21,6 @@ #include <linux/fs.h> #include <linux/time.h> #include <linux/vmalloc.h> -#include <linux/jbd2.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/blkdev.h> @@ -323,22 +322,6 @@ static void save_error_info(struct super_block *sb, const char *func, ext4_commit_super(sb, 1); } -/* - * The del_gendisk() function uninitializes the disk-specific data - * structures, including the bdi structure, without telling anyone - * else. Once this happens, any attempt to call mark_buffer_dirty() - * (for example, by ext4_commit_super), will cause a kernel OOPS. - * This is a kludge to prevent these oops until we can put in a proper - * hook in del_gendisk() to inform the VFS and file system layers. - */ -static int block_device_ejected(struct super_block *sb) -{ - struct inode *bd_inode = sb->s_bdev->bd_inode; - struct backing_dev_info *bdi = inode_to_bdi(bd_inode); - - return bdi->dev == NULL; -} - static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn) { struct super_block *sb = journal->j_private; @@ -893,6 +876,9 @@ static struct inode *ext4_alloc_inode(struct super_block *sb) atomic_set(&ei->i_ioend_count, 0); atomic_set(&ei->i_unwritten, 0); INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work); +#ifdef CONFIG_EXT4_FS_ENCRYPTION + ei->i_encryption_key.mode = EXT4_ENCRYPTION_MODE_INVALID; +#endif return &ei->vfs_inode; } @@ -1076,7 +1062,7 @@ static const struct quotactl_ops ext4_qctl_operations = { .quota_on = ext4_quota_on, .quota_off = ext4_quota_off, .quota_sync = dquot_quota_sync, - .get_info = dquot_get_dqinfo, + .get_state = dquot_get_state, .set_info = dquot_set_dqinfo, .get_dqblk = dquot_get_dqblk, .set_dqblk = dquot_set_dqblk @@ -1120,7 +1106,7 @@ enum { Opt_commit, Opt_min_batch_time, Opt_max_batch_time, Opt_journal_dev, Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit, Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback, - Opt_data_err_abort, Opt_data_err_ignore, + Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption, Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota, Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota, Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err, @@ -1211,6 +1197,7 @@ static const match_table_t tokens = { {Opt_init_itable, "init_itable"}, {Opt_noinit_itable, "noinit_itable"}, {Opt_max_dir_size_kb, "max_dir_size_kb=%u"}, + {Opt_test_dummy_encryption, "test_dummy_encryption"}, {Opt_removed, "check=none"}, /* mount option from ext2/3 */ {Opt_removed, "nocheck"}, /* mount option from ext2/3 */ {Opt_removed, "reservation"}, /* mount option from ext2/3 */ @@ -1412,6 +1399,7 @@ static const struct mount_opts { {Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT}, {Opt_jqfmt_vfsv1, QFMT_VFS_V1, MOPT_QFMT}, {Opt_max_dir_size_kb, 0, MOPT_GTE0}, + {Opt_test_dummy_encryption, 0, MOPT_GTE0}, {Opt_err, 0, 0} }; @@ -1568,7 +1556,7 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token, return -1; } - journal_inode = path.dentry->d_inode; + journal_inode = d_inode(path.dentry); if (!S_ISBLK(journal_inode->i_mode)) { ext4_msg(sb, KERN_ERR, "error: journal path %s " "is not a block device", journal_path); @@ -1588,6 +1576,15 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token, } *journal_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, arg); + } else if (token == Opt_test_dummy_encryption) { +#ifdef CONFIG_EXT4_FS_ENCRYPTION + sbi->s_mount_flags |= EXT4_MF_TEST_DUMMY_ENCRYPTION; + ext4_msg(sb, KERN_WARNING, + "Test dummy encryption mode enabled"); +#else + ext4_msg(sb, KERN_WARNING, + "Test dummy encryption mount option ignored"); +#endif } else if (m->flags & MOPT_DATAJ) { if (is_remount) { if (!sbi->s_journal) @@ -2685,11 +2682,13 @@ static struct attribute *ext4_attrs[] = { EXT4_INFO_ATTR(lazy_itable_init); EXT4_INFO_ATTR(batched_discard); EXT4_INFO_ATTR(meta_bg_resize); +EXT4_INFO_ATTR(encryption); static struct attribute *ext4_feat_attrs[] = { ATTR_LIST(lazy_itable_init), ATTR_LIST(batched_discard), ATTR_LIST(meta_bg_resize), + ATTR_LIST(encryption), NULL, }; @@ -3448,6 +3447,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) if (sb->s_bdev->bd_part) sbi->s_sectors_written_start = part_stat_read(sb->s_bdev->bd_part, sectors[1]); +#ifdef CONFIG_EXT4_FS_ENCRYPTION + /* Modes of operations for file and directory encryption. */ + sbi->s_file_encryption_mode = EXT4_ENCRYPTION_MODE_AES_256_XTS; + sbi->s_dir_encryption_mode = EXT4_ENCRYPTION_MODE_INVALID; +#endif /* Cleanup superblock name */ for (cp = sb->s_id; (cp = strchr(cp, '/'));) @@ -3692,6 +3696,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) } } + if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_ENCRYPT) && + es->s_encryption_level) { + ext4_msg(sb, KERN_ERR, "Unsupported encryption level %d", + es->s_encryption_level); + goto failed_mount; + } + if (sb->s_blocksize != blocksize) { /* Validate the filesystem blocksize */ if (!sb_set_blocksize(sb, blocksize)) { @@ -4054,6 +4065,13 @@ no_journal: } } + if (unlikely(sbi->s_mount_flags & EXT4_MF_TEST_DUMMY_ENCRYPTION) && + !(sb->s_flags & MS_RDONLY) && + !EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_ENCRYPT)) { + EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_ENCRYPT); + ext4_commit_super(sb, 1); + } + /* * Get the # of file system overhead blocks from the * superblock if present. @@ -4570,7 +4588,7 @@ static int ext4_commit_super(struct super_block *sb, int sync) struct buffer_head *sbh = EXT4_SB(sb)->s_sbh; int error = 0; - if (!sbh || block_device_ejected(sb)) + if (!sbh) return error; if (buffer_write_io_error(sbh)) { /* @@ -5199,7 +5217,7 @@ static int ext4_write_info(struct super_block *sb, int type) handle_t *handle; /* Data block + inode block */ - handle = ext4_journal_start(sb->s_root->d_inode, EXT4_HT_QUOTA, 2); + handle = ext4_journal_start(d_inode(sb->s_root), EXT4_HT_QUOTA, 2); if (IS_ERR(handle)) return PTR_ERR(handle); ret = dquot_commit_info(sb, type); @@ -5247,7 +5265,7 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id, * all updates to the file when we bypass pagecache... */ if (EXT4_SB(sb)->s_journal && - ext4_should_journal_data(path->dentry->d_inode)) { + ext4_should_journal_data(d_inode(path->dentry))) { /* * We don't need to lock updates but journal_flush() could * otherwise be livelocked... diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c index ff37119..19f78f2 100644 --- a/fs/ext4/symlink.c +++ b/fs/ext4/symlink.c @@ -18,22 +18,115 @@ */ #include <linux/fs.h> -#include <linux/jbd2.h> #include <linux/namei.h> #include "ext4.h" #include "xattr.h" +#ifdef CONFIG_EXT4_FS_ENCRYPTION static void *ext4_follow_link(struct dentry *dentry, struct nameidata *nd) { - struct ext4_inode_info *ei = EXT4_I(dentry->d_inode); + struct page *cpage = NULL; + char *caddr, *paddr = NULL; + struct ext4_str cstr, pstr; + struct inode *inode = d_inode(dentry); + struct ext4_fname_crypto_ctx *ctx = NULL; + struct ext4_encrypted_symlink_data *sd; + loff_t size = min_t(loff_t, i_size_read(inode), PAGE_SIZE - 1); + int res; + u32 plen, max_size = inode->i_sb->s_blocksize; + + if (!ext4_encrypted_inode(inode)) + return page_follow_link_light(dentry, nd); + + ctx = ext4_get_fname_crypto_ctx(inode, inode->i_sb->s_blocksize); + if (IS_ERR(ctx)) + return ctx; + + if (ext4_inode_is_fast_symlink(inode)) { + caddr = (char *) EXT4_I(inode)->i_data; + max_size = sizeof(EXT4_I(inode)->i_data); + } else { + cpage = read_mapping_page(inode->i_mapping, 0, NULL); + if (IS_ERR(cpage)) { + ext4_put_fname_crypto_ctx(&ctx); + return cpage; + } + caddr = kmap(cpage); + caddr[size] = 0; + } + + /* Symlink is encrypted */ + sd = (struct ext4_encrypted_symlink_data *)caddr; + cstr.name = sd->encrypted_path; + cstr.len = le32_to_cpu(sd->len); + if ((cstr.len + + sizeof(struct ext4_encrypted_symlink_data) - 1) > + max_size) { + /* Symlink data on the disk is corrupted */ + res = -EIO; + goto errout; + } + plen = (cstr.len < EXT4_FNAME_CRYPTO_DIGEST_SIZE*2) ? + EXT4_FNAME_CRYPTO_DIGEST_SIZE*2 : cstr.len; + paddr = kmalloc(plen + 1, GFP_NOFS); + if (!paddr) { + res = -ENOMEM; + goto errout; + } + pstr.name = paddr; + res = _ext4_fname_disk_to_usr(ctx, &cstr, &pstr); + if (res < 0) + goto errout; + /* Null-terminate the name */ + if (res <= plen) + paddr[res] = '\0'; + nd_set_link(nd, paddr); + ext4_put_fname_crypto_ctx(&ctx); + if (cpage) { + kunmap(cpage); + page_cache_release(cpage); + } + return NULL; +errout: + ext4_put_fname_crypto_ctx(&ctx); + if (cpage) { + kunmap(cpage); + page_cache_release(cpage); + } + kfree(paddr); + return ERR_PTR(res); +} + +static void ext4_put_link(struct dentry *dentry, struct nameidata *nd, + void *cookie) +{ + struct page *page = cookie; + + if (!page) { + kfree(nd_get_link(nd)); + } else { + kunmap(page); + page_cache_release(page); + } +} +#endif + +static void *ext4_follow_fast_link(struct dentry *dentry, struct nameidata *nd) +{ + struct ext4_inode_info *ei = EXT4_I(d_inode(dentry)); nd_set_link(nd, (char *) ei->i_data); return NULL; } const struct inode_operations ext4_symlink_inode_operations = { .readlink = generic_readlink, +#ifdef CONFIG_EXT4_FS_ENCRYPTION + .follow_link = ext4_follow_link, + .put_link = ext4_put_link, +#else .follow_link = page_follow_link_light, .put_link = page_put_link, +#endif .setattr = ext4_setattr, .setxattr = generic_setxattr, .getxattr = generic_getxattr, @@ -43,7 +136,7 @@ const struct inode_operations ext4_symlink_inode_operations = { const struct inode_operations ext4_fast_symlink_inode_operations = { .readlink = generic_readlink, - .follow_link = ext4_follow_link, + .follow_link = ext4_follow_fast_link, .setattr = ext4_setattr, .setxattr = generic_setxattr, .getxattr = generic_getxattr, diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 1e09fc7..16e28c0 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -55,7 +55,6 @@ #include <linux/slab.h> #include <linux/mbcache.h> #include <linux/quotaops.h> -#include <linux/rwsem.h> #include "ext4_jbd2.h" #include "ext4.h" #include "xattr.h" @@ -179,7 +178,7 @@ ext4_xattr_handler(int name_index) /* * Inode operation listxattr() * - * dentry->d_inode->i_mutex: don't care + * d_inode(dentry)->i_mutex: don't care */ ssize_t ext4_listxattr(struct dentry *dentry, char *buffer, size_t size) @@ -424,7 +423,7 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry, static int ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct buffer_head *bh = NULL; int error; struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode); @@ -461,7 +460,7 @@ cleanup: static int ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct ext4_xattr_ibody_header *header; struct ext4_inode *raw_inode; struct ext4_iloc iloc; @@ -502,7 +501,7 @@ ext4_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size) { int ret, ret2; - down_read(&EXT4_I(dentry->d_inode)->xattr_sem); + down_read(&EXT4_I(d_inode(dentry))->xattr_sem); ret = ret2 = ext4_xattr_ibody_list(dentry, buffer, buffer_size); if (ret < 0) goto errout; @@ -515,7 +514,7 @@ ext4_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size) goto errout; ret += ret2; errout: - up_read(&EXT4_I(dentry->d_inode)->xattr_sem); + up_read(&EXT4_I(d_inode(dentry))->xattr_sem); return ret; } @@ -639,8 +638,7 @@ ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s) free += EXT4_XATTR_LEN(name_len); } if (i->value) { - if (free < EXT4_XATTR_SIZE(i->value_len) || - free < EXT4_XATTR_LEN(name_len) + + if (free < EXT4_XATTR_LEN(name_len) + EXT4_XATTR_SIZE(i->value_len)) return -ENOSPC; } diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h index 29bedf5..ddc0957 100644 --- a/fs/ext4/xattr.h +++ b/fs/ext4/xattr.h @@ -23,6 +23,7 @@ #define EXT4_XATTR_INDEX_SECURITY 6 #define EXT4_XATTR_INDEX_SYSTEM 7 #define EXT4_XATTR_INDEX_RICHACL 8 +#define EXT4_XATTR_INDEX_ENCRYPTION 9 struct ext4_xattr_header { __le32 h_magic; /* magic number for identification */ @@ -98,6 +99,8 @@ extern const struct xattr_handler ext4_xattr_user_handler; extern const struct xattr_handler ext4_xattr_trusted_handler; extern const struct xattr_handler ext4_xattr_security_handler; +#define EXT4_XATTR_NAME_ENCRYPTION_CONTEXT "c" + extern ssize_t ext4_listxattr(struct dentry *, char *, size_t); extern int ext4_xattr_get(struct inode *, int, const char *, void *, size_t); diff --git a/fs/ext4/xattr_security.c b/fs/ext4/xattr_security.c index d2a2006..95d90e0 100644 --- a/fs/ext4/xattr_security.c +++ b/fs/ext4/xattr_security.c @@ -33,7 +33,7 @@ ext4_xattr_security_get(struct dentry *dentry, const char *name, { if (strcmp(name, "") == 0) return -EINVAL; - return ext4_xattr_get(dentry->d_inode, EXT4_XATTR_INDEX_SECURITY, + return ext4_xattr_get(d_inode(dentry), EXT4_XATTR_INDEX_SECURITY, name, buffer, size); } @@ -43,7 +43,7 @@ ext4_xattr_security_set(struct dentry *dentry, const char *name, { if (strcmp(name, "") == 0) return -EINVAL; - return ext4_xattr_set(dentry->d_inode, EXT4_XATTR_INDEX_SECURITY, + return ext4_xattr_set(d_inode(dentry), EXT4_XATTR_INDEX_SECURITY, name, value, size, flags); } diff --git a/fs/ext4/xattr_trusted.c b/fs/ext4/xattr_trusted.c index 95f1f4a..891ee2d 100644 --- a/fs/ext4/xattr_trusted.c +++ b/fs/ext4/xattr_trusted.c @@ -36,7 +36,7 @@ ext4_xattr_trusted_get(struct dentry *dentry, const char *name, void *buffer, { if (strcmp(name, "") == 0) return -EINVAL; - return ext4_xattr_get(dentry->d_inode, EXT4_XATTR_INDEX_TRUSTED, + return ext4_xattr_get(d_inode(dentry), EXT4_XATTR_INDEX_TRUSTED, name, buffer, size); } @@ -46,7 +46,7 @@ ext4_xattr_trusted_set(struct dentry *dentry, const char *name, { if (strcmp(name, "") == 0) return -EINVAL; - return ext4_xattr_set(dentry->d_inode, EXT4_XATTR_INDEX_TRUSTED, + return ext4_xattr_set(d_inode(dentry), EXT4_XATTR_INDEX_TRUSTED, name, value, size, flags); } diff --git a/fs/ext4/xattr_user.c b/fs/ext4/xattr_user.c index 0edb7611..6ed932b 100644 --- a/fs/ext4/xattr_user.c +++ b/fs/ext4/xattr_user.c @@ -37,7 +37,7 @@ ext4_xattr_user_get(struct dentry *dentry, const char *name, return -EINVAL; if (!test_opt(dentry->d_sb, XATTR_USER)) return -EOPNOTSUPP; - return ext4_xattr_get(dentry->d_inode, EXT4_XATTR_INDEX_USER, + return ext4_xattr_get(d_inode(dentry), EXT4_XATTR_INDEX_USER, name, buffer, size); } @@ -49,7 +49,7 @@ ext4_xattr_user_set(struct dentry *dentry, const char *name, return -EINVAL; if (!test_opt(dentry->d_sb, XATTR_USER)) return -EOPNOTSUPP; - return ext4_xattr_set(dentry->d_inode, EXT4_XATTR_INDEX_USER, + return ext4_xattr_set(d_inode(dentry), EXT4_XATTR_INDEX_USER, name, value, size, flags); } diff --git a/fs/f2fs/Kconfig b/fs/f2fs/Kconfig index 94e2d2f..05f0f66 100644 --- a/fs/f2fs/Kconfig +++ b/fs/f2fs/Kconfig @@ -1,5 +1,5 @@ config F2FS_FS - tristate "F2FS filesystem support (EXPERIMENTAL)" + tristate "F2FS filesystem support" depends on BLOCK help F2FS is based on Log-structured File System (LFS), which supports diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c index 7422027..4320ffa 100644 --- a/fs/f2fs/acl.c +++ b/fs/f2fs/acl.c @@ -351,13 +351,11 @@ static int f2fs_acl_create(struct inode *dir, umode_t *mode, *acl = f2fs_acl_clone(p, GFP_NOFS); if (!*acl) - return -ENOMEM; + goto no_mem; ret = f2fs_acl_create_masq(*acl, mode); - if (ret < 0) { - posix_acl_release(*acl); - return -ENOMEM; - } + if (ret < 0) + goto no_mem_clone; if (ret == 0) { posix_acl_release(*acl); @@ -378,6 +376,12 @@ no_acl: *default_acl = NULL; *acl = NULL; return 0; + +no_mem_clone: + posix_acl_release(*acl); +no_mem: + posix_acl_release(p); + return -ENOMEM; } int f2fs_init_acl(struct inode *inode, struct inode *dir, struct page *ipage, diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 7f794b7..a5e17a2 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -276,7 +276,7 @@ continue_unlock: if (!clear_page_dirty_for_io(page)) goto continue_unlock; - if (f2fs_write_meta_page(page, &wbc)) { + if (mapping->a_ops->writepage(page, &wbc)) { unlock_page(page); break; } @@ -464,20 +464,19 @@ static void recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino) void recover_orphan_inodes(struct f2fs_sb_info *sbi) { - block_t start_blk, orphan_blkaddr, i, j; + block_t start_blk, orphan_blocks, i, j; if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG)) return; set_sbi_flag(sbi, SBI_POR_DOING); - start_blk = __start_cp_addr(sbi) + 1 + - le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload); - orphan_blkaddr = __start_sum_addr(sbi) - 1; + start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi); + orphan_blocks = __start_sum_addr(sbi) - 1 - __cp_payload(sbi); - ra_meta_pages(sbi, start_blk, orphan_blkaddr, META_CP); + ra_meta_pages(sbi, start_blk, orphan_blocks, META_CP); - for (i = 0; i < orphan_blkaddr; i++) { + for (i = 0; i < orphan_blocks; i++) { struct page *page = get_meta_page(sbi, start_blk + i); struct f2fs_orphan_block *orphan_blk; @@ -615,7 +614,7 @@ int get_valid_checkpoint(struct f2fs_sb_info *sbi) unsigned long blk_size = sbi->blocksize; unsigned long long cp1_version = 0, cp2_version = 0; unsigned long long cp_start_blk_no; - unsigned int cp_blks = 1 + le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload); + unsigned int cp_blks = 1 + __cp_payload(sbi); block_t cp_blk_no; int i; @@ -796,6 +795,7 @@ retry: * wribacking dentry pages in the freeing inode. */ f2fs_submit_merged_bio(sbi, DATA, WRITE); + cond_resched(); } goto retry; } @@ -884,7 +884,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) __u32 crc32 = 0; void *kaddr; int i; - int cp_payload_blks = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload); + int cp_payload_blks = __cp_payload(sbi); /* * This avoids to conduct wrong roll-forward operations and uses @@ -1048,17 +1048,18 @@ void write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); unsigned long long ckpt_ver; - trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "start block_ops"); - mutex_lock(&sbi->cp_mutex); if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) && - cpc->reason != CP_DISCARD && cpc->reason != CP_UMOUNT) + (cpc->reason == CP_FASTBOOT || cpc->reason == CP_SYNC)) goto out; if (unlikely(f2fs_cp_error(sbi))) goto out; if (f2fs_readonly(sbi->sb)) goto out; + + trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "start block_ops"); + if (block_operations(sbi)) goto out; @@ -1085,6 +1086,10 @@ void write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) unblock_operations(sbi); stat_inc_cp_count(sbi->stat_info); + + if (cpc->reason == CP_RECOVERY) + f2fs_msg(sbi->sb, KERN_NOTICE, + "checkpoint: version = %llx", ckpt_ver); out: mutex_unlock(&sbi->cp_mutex); trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint"); @@ -1103,14 +1108,9 @@ void init_ino_entry_info(struct f2fs_sb_info *sbi) im->ino_num = 0; } - /* - * considering 512 blocks in a segment 8 blocks are needed for cp - * and log segment summaries. Remaining blocks are used to keep - * orphan entries with the limitation one reserved segment - * for cp pack we can have max 1020*504 orphan entries - */ sbi->max_orphans = (sbi->blocks_per_seg - F2FS_CP_PACKS - - NR_CURSEG_TYPE) * F2FS_ORPHANS_PER_BLOCK; + NR_CURSEG_TYPE - __cp_payload(sbi)) * + F2FS_ORPHANS_PER_BLOCK; } int __init create_checkpoint_caches(void) diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 985ed02..b91b0e1 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -12,12 +12,12 @@ #include <linux/f2fs_fs.h> #include <linux/buffer_head.h> #include <linux/mpage.h> -#include <linux/aio.h> #include <linux/writeback.h> #include <linux/backing-dev.h> #include <linux/blkdev.h> #include <linux/bio.h> #include <linux/prefetch.h> +#include <linux/uio.h> #include "f2fs.h" #include "node.h" @@ -25,6 +25,9 @@ #include "trace.h" #include <trace/events/f2fs.h> +static struct kmem_cache *extent_tree_slab; +static struct kmem_cache *extent_node_slab; + static void f2fs_read_end_io(struct bio *bio, int err) { struct bio_vec *bvec; @@ -197,7 +200,7 @@ alloc_new: * ->node_page * update block addresses in the node page */ -static void __set_data_blkaddr(struct dnode_of_data *dn) +void set_data_blkaddr(struct dnode_of_data *dn) { struct f2fs_node *rn; __le32 *addr_array; @@ -226,7 +229,7 @@ int reserve_new_block(struct dnode_of_data *dn) trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node); dn->data_blkaddr = NEW_ADDR; - __set_data_blkaddr(dn); + set_data_blkaddr(dn); mark_inode_dirty(dn->inode); sync_inode_page(dn); return 0; @@ -248,73 +251,62 @@ int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index) return err; } -static int check_extent_cache(struct inode *inode, pgoff_t pgofs, - struct buffer_head *bh_result) +static void f2fs_map_bh(struct super_block *sb, pgoff_t pgofs, + struct extent_info *ei, struct buffer_head *bh_result) +{ + unsigned int blkbits = sb->s_blocksize_bits; + size_t max_size = bh_result->b_size; + size_t mapped_size; + + clear_buffer_new(bh_result); + map_bh(bh_result, sb, ei->blk + pgofs - ei->fofs); + mapped_size = (ei->fofs + ei->len - pgofs) << blkbits; + bh_result->b_size = min(max_size, mapped_size); +} + +static bool lookup_extent_info(struct inode *inode, pgoff_t pgofs, + struct extent_info *ei) { struct f2fs_inode_info *fi = F2FS_I(inode); pgoff_t start_fofs, end_fofs; block_t start_blkaddr; - if (is_inode_flag_set(fi, FI_NO_EXTENT)) - return 0; - - read_lock(&fi->ext.ext_lock); + read_lock(&fi->ext_lock); if (fi->ext.len == 0) { - read_unlock(&fi->ext.ext_lock); - return 0; + read_unlock(&fi->ext_lock); + return false; } stat_inc_total_hit(inode->i_sb); start_fofs = fi->ext.fofs; end_fofs = fi->ext.fofs + fi->ext.len - 1; - start_blkaddr = fi->ext.blk_addr; + start_blkaddr = fi->ext.blk; if (pgofs >= start_fofs && pgofs <= end_fofs) { - unsigned int blkbits = inode->i_sb->s_blocksize_bits; - size_t count; - - set_buffer_new(bh_result); - map_bh(bh_result, inode->i_sb, - start_blkaddr + pgofs - start_fofs); - count = end_fofs - pgofs + 1; - if (count < (UINT_MAX >> blkbits)) - bh_result->b_size = (count << blkbits); - else - bh_result->b_size = UINT_MAX; - + *ei = fi->ext; stat_inc_read_hit(inode->i_sb); - read_unlock(&fi->ext.ext_lock); - return 1; + read_unlock(&fi->ext_lock); + return true; } - read_unlock(&fi->ext.ext_lock); - return 0; + read_unlock(&fi->ext_lock); + return false; } -void update_extent_cache(struct dnode_of_data *dn) +static bool update_extent_info(struct inode *inode, pgoff_t fofs, + block_t blkaddr) { - struct f2fs_inode_info *fi = F2FS_I(dn->inode); - pgoff_t fofs, start_fofs, end_fofs; + struct f2fs_inode_info *fi = F2FS_I(inode); + pgoff_t start_fofs, end_fofs; block_t start_blkaddr, end_blkaddr; int need_update = true; - f2fs_bug_on(F2FS_I_SB(dn->inode), dn->data_blkaddr == NEW_ADDR); - - /* Update the page address in the parent node */ - __set_data_blkaddr(dn); - - if (is_inode_flag_set(fi, FI_NO_EXTENT)) - return; - - fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) + - dn->ofs_in_node; - - write_lock(&fi->ext.ext_lock); + write_lock(&fi->ext_lock); start_fofs = fi->ext.fofs; end_fofs = fi->ext.fofs + fi->ext.len - 1; - start_blkaddr = fi->ext.blk_addr; - end_blkaddr = fi->ext.blk_addr + fi->ext.len - 1; + start_blkaddr = fi->ext.blk; + end_blkaddr = fi->ext.blk + fi->ext.len - 1; /* Drop and initialize the matched extent */ if (fi->ext.len == 1 && fofs == start_fofs) @@ -322,24 +314,24 @@ void update_extent_cache(struct dnode_of_data *dn) /* Initial extent */ if (fi->ext.len == 0) { - if (dn->data_blkaddr != NULL_ADDR) { + if (blkaddr != NULL_ADDR) { fi->ext.fofs = fofs; - fi->ext.blk_addr = dn->data_blkaddr; + fi->ext.blk = blkaddr; fi->ext.len = 1; } goto end_update; } /* Front merge */ - if (fofs == start_fofs - 1 && dn->data_blkaddr == start_blkaddr - 1) { + if (fofs == start_fofs - 1 && blkaddr == start_blkaddr - 1) { fi->ext.fofs--; - fi->ext.blk_addr--; + fi->ext.blk--; fi->ext.len++; goto end_update; } /* Back merge */ - if (fofs == end_fofs + 1 && dn->data_blkaddr == end_blkaddr + 1) { + if (fofs == end_fofs + 1 && blkaddr == end_blkaddr + 1) { fi->ext.len++; goto end_update; } @@ -351,8 +343,7 @@ void update_extent_cache(struct dnode_of_data *dn) fi->ext.len = fofs - start_fofs; } else { fi->ext.fofs = fofs + 1; - fi->ext.blk_addr = start_blkaddr + - fofs - start_fofs + 1; + fi->ext.blk = start_blkaddr + fofs - start_fofs + 1; fi->ext.len -= fofs - start_fofs + 1; } } else { @@ -366,27 +357,583 @@ void update_extent_cache(struct dnode_of_data *dn) need_update = true; } end_update: - write_unlock(&fi->ext.ext_lock); - if (need_update) - sync_inode_page(dn); + write_unlock(&fi->ext_lock); + return need_update; +} + +static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi, + struct extent_tree *et, struct extent_info *ei, + struct rb_node *parent, struct rb_node **p) +{ + struct extent_node *en; + + en = kmem_cache_alloc(extent_node_slab, GFP_ATOMIC); + if (!en) + return NULL; + + en->ei = *ei; + INIT_LIST_HEAD(&en->list); + + rb_link_node(&en->rb_node, parent, p); + rb_insert_color(&en->rb_node, &et->root); + et->count++; + atomic_inc(&sbi->total_ext_node); + return en; +} + +static void __detach_extent_node(struct f2fs_sb_info *sbi, + struct extent_tree *et, struct extent_node *en) +{ + rb_erase(&en->rb_node, &et->root); + et->count--; + atomic_dec(&sbi->total_ext_node); + + if (et->cached_en == en) + et->cached_en = NULL; +} + +static struct extent_tree *__find_extent_tree(struct f2fs_sb_info *sbi, + nid_t ino) +{ + struct extent_tree *et; + + down_read(&sbi->extent_tree_lock); + et = radix_tree_lookup(&sbi->extent_tree_root, ino); + if (!et) { + up_read(&sbi->extent_tree_lock); + return NULL; + } + atomic_inc(&et->refcount); + up_read(&sbi->extent_tree_lock); + + return et; +} + +static struct extent_tree *__grab_extent_tree(struct inode *inode) +{ + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); + struct extent_tree *et; + nid_t ino = inode->i_ino; + + down_write(&sbi->extent_tree_lock); + et = radix_tree_lookup(&sbi->extent_tree_root, ino); + if (!et) { + et = f2fs_kmem_cache_alloc(extent_tree_slab, GFP_NOFS); + f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et); + memset(et, 0, sizeof(struct extent_tree)); + et->ino = ino; + et->root = RB_ROOT; + et->cached_en = NULL; + rwlock_init(&et->lock); + atomic_set(&et->refcount, 0); + et->count = 0; + sbi->total_ext_tree++; + } + atomic_inc(&et->refcount); + up_write(&sbi->extent_tree_lock); + + return et; +} + +static struct extent_node *__lookup_extent_tree(struct extent_tree *et, + unsigned int fofs) +{ + struct rb_node *node = et->root.rb_node; + struct extent_node *en; + + if (et->cached_en) { + struct extent_info *cei = &et->cached_en->ei; + + if (cei->fofs <= fofs && cei->fofs + cei->len > fofs) + return et->cached_en; + } + + while (node) { + en = rb_entry(node, struct extent_node, rb_node); + + if (fofs < en->ei.fofs) { + node = node->rb_left; + } else if (fofs >= en->ei.fofs + en->ei.len) { + node = node->rb_right; + } else { + et->cached_en = en; + return en; + } + } + return NULL; +} + +static struct extent_node *__try_back_merge(struct f2fs_sb_info *sbi, + struct extent_tree *et, struct extent_node *en) +{ + struct extent_node *prev; + struct rb_node *node; + + node = rb_prev(&en->rb_node); + if (!node) + return NULL; + + prev = rb_entry(node, struct extent_node, rb_node); + if (__is_back_mergeable(&en->ei, &prev->ei)) { + en->ei.fofs = prev->ei.fofs; + en->ei.blk = prev->ei.blk; + en->ei.len += prev->ei.len; + __detach_extent_node(sbi, et, prev); + return prev; + } + return NULL; +} + +static struct extent_node *__try_front_merge(struct f2fs_sb_info *sbi, + struct extent_tree *et, struct extent_node *en) +{ + struct extent_node *next; + struct rb_node *node; + + node = rb_next(&en->rb_node); + if (!node) + return NULL; + + next = rb_entry(node, struct extent_node, rb_node); + if (__is_front_mergeable(&en->ei, &next->ei)) { + en->ei.len += next->ei.len; + __detach_extent_node(sbi, et, next); + return next; + } + return NULL; +} + +static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi, + struct extent_tree *et, struct extent_info *ei, + struct extent_node **den) +{ + struct rb_node **p = &et->root.rb_node; + struct rb_node *parent = NULL; + struct extent_node *en; + + while (*p) { + parent = *p; + en = rb_entry(parent, struct extent_node, rb_node); + + if (ei->fofs < en->ei.fofs) { + if (__is_front_mergeable(ei, &en->ei)) { + f2fs_bug_on(sbi, !den); + en->ei.fofs = ei->fofs; + en->ei.blk = ei->blk; + en->ei.len += ei->len; + *den = __try_back_merge(sbi, et, en); + return en; + } + p = &(*p)->rb_left; + } else if (ei->fofs >= en->ei.fofs + en->ei.len) { + if (__is_back_mergeable(ei, &en->ei)) { + f2fs_bug_on(sbi, !den); + en->ei.len += ei->len; + *den = __try_front_merge(sbi, et, en); + return en; + } + p = &(*p)->rb_right; + } else { + f2fs_bug_on(sbi, 1); + } + } + + return __attach_extent_node(sbi, et, ei, parent, p); +} + +static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi, + struct extent_tree *et, bool free_all) +{ + struct rb_node *node, *next; + struct extent_node *en; + unsigned int count = et->count; + + node = rb_first(&et->root); + while (node) { + next = rb_next(node); + en = rb_entry(node, struct extent_node, rb_node); + + if (free_all) { + spin_lock(&sbi->extent_lock); + if (!list_empty(&en->list)) + list_del_init(&en->list); + spin_unlock(&sbi->extent_lock); + } + + if (free_all || list_empty(&en->list)) { + __detach_extent_node(sbi, et, en); + kmem_cache_free(extent_node_slab, en); + } + node = next; + } + + return count - et->count; +} + +static void f2fs_init_extent_tree(struct inode *inode, + struct f2fs_extent *i_ext) +{ + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); + struct extent_tree *et; + struct extent_node *en; + struct extent_info ei; + + if (le32_to_cpu(i_ext->len) < F2FS_MIN_EXTENT_LEN) + return; + + et = __grab_extent_tree(inode); + + write_lock(&et->lock); + if (et->count) + goto out; + + set_extent_info(&ei, le32_to_cpu(i_ext->fofs), + le32_to_cpu(i_ext->blk), le32_to_cpu(i_ext->len)); + + en = __insert_extent_tree(sbi, et, &ei, NULL); + if (en) { + et->cached_en = en; + + spin_lock(&sbi->extent_lock); + list_add_tail(&en->list, &sbi->extent_list); + spin_unlock(&sbi->extent_lock); + } +out: + write_unlock(&et->lock); + atomic_dec(&et->refcount); +} + +static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs, + struct extent_info *ei) +{ + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); + struct extent_tree *et; + struct extent_node *en; + + trace_f2fs_lookup_extent_tree_start(inode, pgofs); + + et = __find_extent_tree(sbi, inode->i_ino); + if (!et) + return false; + + read_lock(&et->lock); + en = __lookup_extent_tree(et, pgofs); + if (en) { + *ei = en->ei; + spin_lock(&sbi->extent_lock); + if (!list_empty(&en->list)) + list_move_tail(&en->list, &sbi->extent_list); + spin_unlock(&sbi->extent_lock); + stat_inc_read_hit(sbi->sb); + } + stat_inc_total_hit(sbi->sb); + read_unlock(&et->lock); + + trace_f2fs_lookup_extent_tree_end(inode, pgofs, en); + + atomic_dec(&et->refcount); + return en ? true : false; +} + +static void f2fs_update_extent_tree(struct inode *inode, pgoff_t fofs, + block_t blkaddr) +{ + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); + struct extent_tree *et; + struct extent_node *en = NULL, *en1 = NULL, *en2 = NULL, *en3 = NULL; + struct extent_node *den = NULL; + struct extent_info ei, dei; + unsigned int endofs; + + trace_f2fs_update_extent_tree(inode, fofs, blkaddr); + + et = __grab_extent_tree(inode); + + write_lock(&et->lock); + + /* 1. lookup and remove existing extent info in cache */ + en = __lookup_extent_tree(et, fofs); + if (!en) + goto update_extent; + + dei = en->ei; + __detach_extent_node(sbi, et, en); + + /* 2. if extent can be split more, split and insert the left part */ + if (dei.len > 1) { + /* insert left part of split extent into cache */ + if (fofs - dei.fofs >= F2FS_MIN_EXTENT_LEN) { + set_extent_info(&ei, dei.fofs, dei.blk, + fofs - dei.fofs); + en1 = __insert_extent_tree(sbi, et, &ei, NULL); + } + + /* insert right part of split extent into cache */ + endofs = dei.fofs + dei.len - 1; + if (endofs - fofs >= F2FS_MIN_EXTENT_LEN) { + set_extent_info(&ei, fofs + 1, + fofs - dei.fofs + dei.blk, endofs - fofs); + en2 = __insert_extent_tree(sbi, et, &ei, NULL); + } + } + +update_extent: + /* 3. update extent in extent cache */ + if (blkaddr) { + set_extent_info(&ei, fofs, blkaddr, 1); + en3 = __insert_extent_tree(sbi, et, &ei, &den); + } + + /* 4. update in global extent list */ + spin_lock(&sbi->extent_lock); + if (en && !list_empty(&en->list)) + list_del(&en->list); + /* + * en1 and en2 split from en, they will become more and more smaller + * fragments after splitting several times. So if the length is smaller + * than F2FS_MIN_EXTENT_LEN, we will not add them into extent tree. + */ + if (en1) + list_add_tail(&en1->list, &sbi->extent_list); + if (en2) + list_add_tail(&en2->list, &sbi->extent_list); + if (en3) { + if (list_empty(&en3->list)) + list_add_tail(&en3->list, &sbi->extent_list); + else + list_move_tail(&en3->list, &sbi->extent_list); + } + if (den && !list_empty(&den->list)) + list_del(&den->list); + spin_unlock(&sbi->extent_lock); + + /* 5. release extent node */ + if (en) + kmem_cache_free(extent_node_slab, en); + if (den) + kmem_cache_free(extent_node_slab, den); + + write_unlock(&et->lock); + atomic_dec(&et->refcount); +} + +void f2fs_preserve_extent_tree(struct inode *inode) +{ + struct extent_tree *et; + struct extent_info *ext = &F2FS_I(inode)->ext; + bool sync = false; + + if (!test_opt(F2FS_I_SB(inode), EXTENT_CACHE)) + return; + + et = __find_extent_tree(F2FS_I_SB(inode), inode->i_ino); + if (!et) { + if (ext->len) { + ext->len = 0; + update_inode_page(inode); + } + return; + } + + read_lock(&et->lock); + if (et->count) { + struct extent_node *en; + + if (et->cached_en) { + en = et->cached_en; + } else { + struct rb_node *node = rb_first(&et->root); + + if (!node) + node = rb_last(&et->root); + en = rb_entry(node, struct extent_node, rb_node); + } + + if (__is_extent_same(ext, &en->ei)) + goto out; + + *ext = en->ei; + sync = true; + } else if (ext->len) { + ext->len = 0; + sync = true; + } +out: + read_unlock(&et->lock); + atomic_dec(&et->refcount); + + if (sync) + update_inode_page(inode); +} + +void f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink) +{ + struct extent_tree *treevec[EXT_TREE_VEC_SIZE]; + struct extent_node *en, *tmp; + unsigned long ino = F2FS_ROOT_INO(sbi); + struct radix_tree_iter iter; + void **slot; + unsigned int found; + unsigned int node_cnt = 0, tree_cnt = 0; + + if (!test_opt(sbi, EXTENT_CACHE)) + return; + + if (available_free_memory(sbi, EXTENT_CACHE)) + return; + + spin_lock(&sbi->extent_lock); + list_for_each_entry_safe(en, tmp, &sbi->extent_list, list) { + if (!nr_shrink--) + break; + list_del_init(&en->list); + } + spin_unlock(&sbi->extent_lock); + + down_read(&sbi->extent_tree_lock); + while ((found = radix_tree_gang_lookup(&sbi->extent_tree_root, + (void **)treevec, ino, EXT_TREE_VEC_SIZE))) { + unsigned i; + + ino = treevec[found - 1]->ino + 1; + for (i = 0; i < found; i++) { + struct extent_tree *et = treevec[i]; + + atomic_inc(&et->refcount); + write_lock(&et->lock); + node_cnt += __free_extent_tree(sbi, et, false); + write_unlock(&et->lock); + atomic_dec(&et->refcount); + } + } + up_read(&sbi->extent_tree_lock); + + down_write(&sbi->extent_tree_lock); + radix_tree_for_each_slot(slot, &sbi->extent_tree_root, &iter, + F2FS_ROOT_INO(sbi)) { + struct extent_tree *et = (struct extent_tree *)*slot; + + if (!atomic_read(&et->refcount) && !et->count) { + radix_tree_delete(&sbi->extent_tree_root, et->ino); + kmem_cache_free(extent_tree_slab, et); + sbi->total_ext_tree--; + tree_cnt++; + } + } + up_write(&sbi->extent_tree_lock); + + trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt); +} + +void f2fs_destroy_extent_tree(struct inode *inode) +{ + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); + struct extent_tree *et; + unsigned int node_cnt = 0; + + if (!test_opt(sbi, EXTENT_CACHE)) + return; + + et = __find_extent_tree(sbi, inode->i_ino); + if (!et) + goto out; + + /* free all extent info belong to this extent tree */ + write_lock(&et->lock); + node_cnt = __free_extent_tree(sbi, et, true); + write_unlock(&et->lock); + + atomic_dec(&et->refcount); + + /* try to find and delete extent tree entry in radix tree */ + down_write(&sbi->extent_tree_lock); + et = radix_tree_lookup(&sbi->extent_tree_root, inode->i_ino); + if (!et) { + up_write(&sbi->extent_tree_lock); + goto out; + } + f2fs_bug_on(sbi, atomic_read(&et->refcount) || et->count); + radix_tree_delete(&sbi->extent_tree_root, inode->i_ino); + kmem_cache_free(extent_tree_slab, et); + sbi->total_ext_tree--; + up_write(&sbi->extent_tree_lock); +out: + trace_f2fs_destroy_extent_tree(inode, node_cnt); return; } +void f2fs_init_extent_cache(struct inode *inode, struct f2fs_extent *i_ext) +{ + if (test_opt(F2FS_I_SB(inode), EXTENT_CACHE)) + f2fs_init_extent_tree(inode, i_ext); + + write_lock(&F2FS_I(inode)->ext_lock); + get_extent_info(&F2FS_I(inode)->ext, *i_ext); + write_unlock(&F2FS_I(inode)->ext_lock); +} + +static bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs, + struct extent_info *ei) +{ + if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT)) + return false; + + if (test_opt(F2FS_I_SB(inode), EXTENT_CACHE)) + return f2fs_lookup_extent_tree(inode, pgofs, ei); + + return lookup_extent_info(inode, pgofs, ei); +} + +void f2fs_update_extent_cache(struct dnode_of_data *dn) +{ + struct f2fs_inode_info *fi = F2FS_I(dn->inode); + pgoff_t fofs; + + f2fs_bug_on(F2FS_I_SB(dn->inode), dn->data_blkaddr == NEW_ADDR); + + if (is_inode_flag_set(fi, FI_NO_EXTENT)) + return; + + fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) + + dn->ofs_in_node; + + if (test_opt(F2FS_I_SB(dn->inode), EXTENT_CACHE)) + return f2fs_update_extent_tree(dn->inode, fofs, + dn->data_blkaddr); + + if (update_extent_info(dn->inode, fofs, dn->data_blkaddr)) + sync_inode_page(dn); +} + struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync) { struct address_space *mapping = inode->i_mapping; struct dnode_of_data dn; struct page *page; + struct extent_info ei; int err; struct f2fs_io_info fio = { .type = DATA, .rw = sync ? READ_SYNC : READA, }; + /* + * If sync is false, it needs to check its block allocation. + * This is need and triggered by two flows: + * gc and truncate_partial_data_page. + */ + if (!sync) + goto search; + page = find_get_page(mapping, index); if (page && PageUptodate(page)) return page; f2fs_put_page(page, 0); +search: + if (f2fs_lookup_extent_cache(inode, index, &ei)) { + dn.data_blkaddr = ei.blk + index - ei.fofs; + goto got_it; + } set_new_dnode(&dn, inode, NULL, NULL, 0); err = get_dnode_of_data(&dn, index, LOOKUP_NODE); @@ -401,6 +948,7 @@ struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync) if (unlikely(dn.data_blkaddr == NEW_ADDR)) return ERR_PTR(-EINVAL); +got_it: page = grab_cache_page(mapping, index); if (!page) return ERR_PTR(-ENOMEM); @@ -435,6 +983,7 @@ struct page *get_lock_data_page(struct inode *inode, pgoff_t index) struct address_space *mapping = inode->i_mapping; struct dnode_of_data dn; struct page *page; + struct extent_info ei; int err; struct f2fs_io_info fio = { .type = DATA, @@ -445,6 +994,11 @@ repeat: if (!page) return ERR_PTR(-ENOMEM); + if (f2fs_lookup_extent_cache(inode, index, &ei)) { + dn.data_blkaddr = ei.blk + index - ei.fofs; + goto got_it; + } + set_new_dnode(&dn, inode, NULL, NULL, 0); err = get_dnode_of_data(&dn, index, LOOKUP_NODE); if (err) { @@ -458,6 +1012,7 @@ repeat: return ERR_PTR(-ENOENT); } +got_it: if (PageUptodate(page)) return page; @@ -569,19 +1124,26 @@ static int __allocate_data_block(struct dnode_of_data *dn) if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) return -EPERM; + + dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node); + if (dn->data_blkaddr == NEW_ADDR) + goto alloc; + if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1))) return -ENOSPC; +alloc: get_node_info(sbi, dn->nid, &ni); set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page) seg = CURSEG_DIRECT_IO; - allocate_data_block(sbi, NULL, NULL_ADDR, &dn->data_blkaddr, &sum, seg); + allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr, + &sum, seg); /* direct IO doesn't use extent cache to maximize the performance */ - __set_data_blkaddr(dn); + set_data_blkaddr(dn); /* update i_size */ fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) + @@ -615,7 +1177,10 @@ static void __allocate_data_blocks(struct inode *inode, loff_t offset, end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); while (dn.ofs_in_node < end_offset && len) { - if (dn.data_blkaddr == NULL_ADDR) { + block_t blkaddr; + + blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node); + if (blkaddr == NULL_ADDR || blkaddr == NEW_ADDR) { if (__allocate_data_block(&dn)) goto sync_out; allocated = true; @@ -659,13 +1224,16 @@ static int __get_data_block(struct inode *inode, sector_t iblock, int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA; pgoff_t pgofs, end_offset; int err = 0, ofs = 1; + struct extent_info ei; bool allocated = false; /* Get the page offset from the block offset(iblock) */ pgofs = (pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits)); - if (check_extent_cache(inode, pgofs, bh_result)) + if (f2fs_lookup_extent_cache(inode, pgofs, &ei)) { + f2fs_map_bh(inode->i_sb, pgofs, &ei, bh_result); goto out; + } if (create) f2fs_lock_op(F2FS_I_SB(inode)); @@ -682,7 +1250,7 @@ static int __get_data_block(struct inode *inode, sector_t iblock, goto put_out; if (dn.data_blkaddr != NULL_ADDR) { - set_buffer_new(bh_result); + clear_buffer_new(bh_result); map_bh(bh_result, inode->i_sb, dn.data_blkaddr); } else if (create) { err = __allocate_data_block(&dn); @@ -727,6 +1295,7 @@ get_next: if (err) goto sync_out; allocated = true; + set_buffer_new(bh_result); blkaddr = dn.data_blkaddr; } /* Give more consecutive addresses for the readahead */ @@ -813,8 +1382,10 @@ int do_write_data_page(struct page *page, struct f2fs_io_info *fio) fio->blk_addr = dn.data_blkaddr; /* This page is already truncated */ - if (fio->blk_addr == NULL_ADDR) + if (fio->blk_addr == NULL_ADDR) { + ClearPageUptodate(page); goto out_writepage; + } set_page_writeback(page); @@ -827,10 +1398,15 @@ int do_write_data_page(struct page *page, struct f2fs_io_info *fio) need_inplace_update(inode))) { rewrite_data_page(page, fio); set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE); + trace_f2fs_do_write_data_page(page, IPU); } else { write_data_page(page, &dn, fio); - update_extent_cache(&dn); + set_data_blkaddr(&dn); + f2fs_update_extent_cache(&dn); + trace_f2fs_do_write_data_page(page, OPU); set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE); + if (page->index == 0) + set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN); } out_writepage: f2fs_put_dnode(&dn); @@ -909,6 +1485,8 @@ done: clear_cold_data(page); out: inode_dec_dirty_pages(inode); + if (err) + ClearPageUptodate(page); unlock_page(page); if (need_balance_fs) f2fs_balance_fs(sbi); @@ -935,7 +1513,6 @@ static int f2fs_write_data_pages(struct address_space *mapping, { struct inode *inode = mapping->host; struct f2fs_sb_info *sbi = F2FS_I_SB(inode); - bool locked = false; int ret; long diff; @@ -950,15 +1527,13 @@ static int f2fs_write_data_pages(struct address_space *mapping, available_free_memory(sbi, DIRTY_DENTS)) goto skip_write; + /* during POR, we don't need to trigger writepage at all. */ + if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) + goto skip_write; + diff = nr_pages_to_write(sbi, DATA, wbc); - if (!S_ISDIR(inode->i_mode)) { - mutex_lock(&sbi->writepages); - locked = true; - } ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping); - if (locked) - mutex_unlock(&sbi->writepages); f2fs_submit_merged_bio(sbi, DATA, WRITE); @@ -1118,12 +1693,12 @@ static int f2fs_write_end(struct file *file, return copied; } -static int check_direct_IO(struct inode *inode, int rw, - struct iov_iter *iter, loff_t offset) +static int check_direct_IO(struct inode *inode, struct iov_iter *iter, + loff_t offset) { unsigned blocksize_mask = inode->i_sb->s_blocksize - 1; - if (rw == READ) + if (iov_iter_rw(iter) == READ) return 0; if (offset & blocksize_mask) @@ -1135,8 +1710,8 @@ static int check_direct_IO(struct inode *inode, int rw, return 0; } -static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb, - struct iov_iter *iter, loff_t offset) +static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, + loff_t offset) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; @@ -1151,19 +1726,19 @@ static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb, return err; } - if (check_direct_IO(inode, rw, iter, offset)) + if (check_direct_IO(inode, iter, offset)) return 0; - trace_f2fs_direct_IO_enter(inode, offset, count, rw); + trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter)); - if (rw & WRITE) + if (iov_iter_rw(iter) == WRITE) __allocate_data_blocks(inode, offset, count); - err = blockdev_direct_IO(rw, iocb, inode, iter, offset, get_data_block); - if (err < 0 && (rw & WRITE)) + err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block); + if (err < 0 && iov_iter_rw(iter) == WRITE) f2fs_write_failed(mapping, offset + count); - trace_f2fs_direct_IO_exit(inode, offset, count, rw, err); + trace_f2fs_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), err); return err; } @@ -1236,6 +1811,37 @@ static sector_t f2fs_bmap(struct address_space *mapping, sector_t block) return generic_block_bmap(mapping, block, get_data_block); } +void init_extent_cache_info(struct f2fs_sb_info *sbi) +{ + INIT_RADIX_TREE(&sbi->extent_tree_root, GFP_NOIO); + init_rwsem(&sbi->extent_tree_lock); + INIT_LIST_HEAD(&sbi->extent_list); + spin_lock_init(&sbi->extent_lock); + sbi->total_ext_tree = 0; + atomic_set(&sbi->total_ext_node, 0); +} + +int __init create_extent_cache(void) +{ + extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree", + sizeof(struct extent_tree)); + if (!extent_tree_slab) + return -ENOMEM; + extent_node_slab = f2fs_kmem_cache_create("f2fs_extent_node", + sizeof(struct extent_node)); + if (!extent_node_slab) { + kmem_cache_destroy(extent_tree_slab); + return -ENOMEM; + } + return 0; +} + +void destroy_extent_cache(void) +{ + kmem_cache_destroy(extent_node_slab); + kmem_cache_destroy(extent_tree_slab); +} + const struct address_space_operations f2fs_dblock_aops = { .readpage = f2fs_read_data_page, .readpages = f2fs_read_data_pages, diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c index e671373..f5388f3 100644 --- a/fs/f2fs/debug.c +++ b/fs/f2fs/debug.c @@ -35,6 +35,8 @@ static void update_general_status(struct f2fs_sb_info *sbi) /* validation check of the segment numbers */ si->hit_ext = sbi->read_hit_ext; si->total_ext = sbi->total_hit_ext; + si->ext_tree = sbi->total_ext_tree; + si->ext_node = atomic_read(&sbi->total_ext_node); si->ndirty_node = get_pages(sbi, F2FS_DIRTY_NODES); si->ndirty_dent = get_pages(sbi, F2FS_DIRTY_DENTS); si->ndirty_dirs = sbi->n_dirty_dirs; @@ -185,6 +187,9 @@ get_cache: si->cache_mem += sbi->n_dirty_dirs * sizeof(struct inode_entry); for (i = 0; i <= UPDATE_INO; i++) si->cache_mem += sbi->im[i].ino_num * sizeof(struct ino_entry); + si->cache_mem += sbi->total_ext_tree * sizeof(struct extent_tree); + si->cache_mem += atomic_read(&sbi->total_ext_node) * + sizeof(struct extent_node); si->page_mem = 0; npages = NODE_MAPPING(sbi)->nrpages; @@ -260,13 +265,20 @@ static int stat_show(struct seq_file *s, void *v) seq_printf(s, "CP calls: %d\n", si->cp_count); seq_printf(s, "GC calls: %d (BG: %d)\n", si->call_count, si->bg_gc); - seq_printf(s, " - data segments : %d\n", si->data_segs); - seq_printf(s, " - node segments : %d\n", si->node_segs); - seq_printf(s, "Try to move %d blocks\n", si->tot_blks); - seq_printf(s, " - data blocks : %d\n", si->data_blks); - seq_printf(s, " - node blocks : %d\n", si->node_blks); + seq_printf(s, " - data segments : %d (%d)\n", + si->data_segs, si->bg_data_segs); + seq_printf(s, " - node segments : %d (%d)\n", + si->node_segs, si->bg_node_segs); + seq_printf(s, "Try to move %d blocks (BG: %d)\n", si->tot_blks, + si->bg_data_blks + si->bg_node_blks); + seq_printf(s, " - data blocks : %d (%d)\n", si->data_blks, + si->bg_data_blks); + seq_printf(s, " - node blocks : %d (%d)\n", si->node_blks, + si->bg_node_blks); seq_printf(s, "\nExtent Hit Ratio: %d / %d\n", si->hit_ext, si->total_ext); + seq_printf(s, "\nExtent Tree Count: %d\n", si->ext_tree); + seq_printf(s, "\nExtent Node Count: %d\n", si->ext_node); seq_puts(s, "\nBalancing F2FS Async:\n"); seq_printf(s, " - inmem: %4d, wb: %4d\n", si->inmem_pages, si->wb_pages); diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index b74097a..3a3302a 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c @@ -59,9 +59,8 @@ static unsigned char f2fs_type_by_mode[S_IFMT >> S_SHIFT] = { [S_IFLNK >> S_SHIFT] = F2FS_FT_SYMLINK, }; -void set_de_type(struct f2fs_dir_entry *de, struct inode *inode) +void set_de_type(struct f2fs_dir_entry *de, umode_t mode) { - umode_t mode = inode->i_mode; de->file_type = f2fs_type_by_mode[(mode & S_IFMT) >> S_SHIFT]; } @@ -127,22 +126,19 @@ struct f2fs_dir_entry *find_target_dentry(struct qstr *name, int *max_slots, *max_slots = 0; while (bit_pos < d->max) { if (!test_bit_le(bit_pos, d->bitmap)) { - if (bit_pos == 0) - max_len = 1; - else if (!test_bit_le(bit_pos - 1, d->bitmap)) - max_len++; bit_pos++; + max_len++; continue; } + de = &d->dentry[bit_pos]; if (early_match_name(name->len, namehash, de) && !memcmp(d->filename[bit_pos], name->name, name->len)) goto found; - if (max_slots && *max_slots >= 0 && max_len > *max_slots) { + if (max_slots && max_len > *max_slots) *max_slots = max_len; - max_len = 0; - } + max_len = 0; /* remain bug on condition */ if (unlikely(!de->name_len)) @@ -219,14 +215,14 @@ struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir, unsigned int max_depth; unsigned int level; + *res_page = NULL; + if (f2fs_has_inline_dentry(dir)) return find_in_inline_dir(dir, child, res_page); if (npages == 0) return NULL; - *res_page = NULL; - name_hash = f2fs_dentry_hash(child); max_depth = F2FS_I(dir)->i_current_depth; @@ -285,7 +281,7 @@ void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de, lock_page(page); f2fs_wait_on_page_writeback(page, type); de->ino = cpu_to_le32(inode->i_ino); - set_de_type(de, inode); + set_de_type(de, inode->i_mode); f2fs_dentry_kunmap(dir, page); set_page_dirty(page); dir->i_mtime = dir->i_ctime = CURRENT_TIME; @@ -331,14 +327,14 @@ void do_make_empty_dir(struct inode *inode, struct inode *parent, de->hash_code = 0; de->ino = cpu_to_le32(inode->i_ino); memcpy(d->filename[0], ".", 1); - set_de_type(de, inode); + set_de_type(de, inode->i_mode); de = &d->dentry[1]; de->hash_code = 0; de->name_len = cpu_to_le16(2); de->ino = cpu_to_le32(parent->i_ino); memcpy(d->filename[1], "..", 2); - set_de_type(de, inode); + set_de_type(de, parent->i_mode); test_and_set_bit_le(0, (void *)d->bitmap); test_and_set_bit_le(1, (void *)d->bitmap); @@ -435,7 +431,7 @@ error: void update_parent_metadata(struct inode *dir, struct inode *inode, unsigned int current_depth) { - if (is_inode_flag_set(F2FS_I(inode), FI_NEW_INODE)) { + if (inode && is_inode_flag_set(F2FS_I(inode), FI_NEW_INODE)) { if (S_ISDIR(inode->i_mode)) { inc_nlink(dir); set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR); @@ -450,7 +446,7 @@ void update_parent_metadata(struct inode *dir, struct inode *inode, set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR); } - if (is_inode_flag_set(F2FS_I(inode), FI_INC_LINK)) + if (inode && is_inode_flag_set(F2FS_I(inode), FI_INC_LINK)) clear_inode_flag(F2FS_I(inode), FI_INC_LINK); } @@ -474,30 +470,47 @@ next: goto next; } +void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d, + const struct qstr *name, f2fs_hash_t name_hash, + unsigned int bit_pos) +{ + struct f2fs_dir_entry *de; + int slots = GET_DENTRY_SLOTS(name->len); + int i; + + de = &d->dentry[bit_pos]; + de->hash_code = name_hash; + de->name_len = cpu_to_le16(name->len); + memcpy(d->filename[bit_pos], name->name, name->len); + de->ino = cpu_to_le32(ino); + set_de_type(de, mode); + for (i = 0; i < slots; i++) + test_and_set_bit_le(bit_pos + i, (void *)d->bitmap); +} + /* * Caller should grab and release a rwsem by calling f2fs_lock_op() and * f2fs_unlock_op(). */ int __f2fs_add_link(struct inode *dir, const struct qstr *name, - struct inode *inode) + struct inode *inode, nid_t ino, umode_t mode) { unsigned int bit_pos; unsigned int level; unsigned int current_depth; unsigned long bidx, block; f2fs_hash_t dentry_hash; - struct f2fs_dir_entry *de; unsigned int nbucket, nblock; size_t namelen = name->len; struct page *dentry_page = NULL; struct f2fs_dentry_block *dentry_blk = NULL; + struct f2fs_dentry_ptr d; int slots = GET_DENTRY_SLOTS(namelen); - struct page *page; + struct page *page = NULL; int err = 0; - int i; if (f2fs_has_inline_dentry(dir)) { - err = f2fs_add_inline_entry(dir, name, inode); + err = f2fs_add_inline_entry(dir, name, inode, ino, mode); if (!err || err != -EAGAIN) return err; else @@ -547,30 +560,31 @@ start: add_dentry: f2fs_wait_on_page_writeback(dentry_page, DATA); - down_write(&F2FS_I(inode)->i_sem); - page = init_inode_metadata(inode, dir, name, NULL); - if (IS_ERR(page)) { - err = PTR_ERR(page); - goto fail; + if (inode) { + down_write(&F2FS_I(inode)->i_sem); + page = init_inode_metadata(inode, dir, name, NULL); + if (IS_ERR(page)) { + err = PTR_ERR(page); + goto fail; + } } - de = &dentry_blk->dentry[bit_pos]; - de->hash_code = dentry_hash; - de->name_len = cpu_to_le16(namelen); - memcpy(dentry_blk->filename[bit_pos], name->name, name->len); - de->ino = cpu_to_le32(inode->i_ino); - set_de_type(de, inode); - for (i = 0; i < slots; i++) - test_and_set_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap); + + make_dentry_ptr(&d, (void *)dentry_blk, 1); + f2fs_update_dentry(ino, mode, &d, name, dentry_hash, bit_pos); + set_page_dirty(dentry_page); - /* we don't need to mark_inode_dirty now */ - F2FS_I(inode)->i_pino = dir->i_ino; - update_inode(inode, page); - f2fs_put_page(page, 1); + if (inode) { + /* we don't need to mark_inode_dirty now */ + F2FS_I(inode)->i_pino = dir->i_ino; + update_inode(inode, page); + f2fs_put_page(page, 1); + } update_parent_metadata(dir, inode, current_depth); fail: - up_write(&F2FS_I(inode)->i_sem); + if (inode) + up_write(&F2FS_I(inode)->i_sem); if (is_inode_flag_set(F2FS_I(dir), FI_UPDATE_DIR)) { update_inode_page(dir); @@ -669,6 +683,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page, if (bit_pos == NR_DENTRY_IN_BLOCK) { truncate_hole(dir, page->index, page->index + 1); clear_page_dirty_for_io(page); + ClearPagePrivate(page); ClearPageUptodate(page); inode_dec_dirty_pages(dir); } diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 7fa3313..d8921cf 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -50,6 +50,7 @@ #define F2FS_MOUNT_FLUSH_MERGE 0x00000400 #define F2FS_MOUNT_NOBARRIER 0x00000800 #define F2FS_MOUNT_FASTBOOT 0x00001000 +#define F2FS_MOUNT_EXTENT_CACHE 0x00002000 #define clear_opt(sbi, option) (sbi->mount_opt.opt &= ~F2FS_MOUNT_##option) #define set_opt(sbi, option) (sbi->mount_opt.opt |= F2FS_MOUNT_##option) @@ -102,6 +103,7 @@ enum { CP_UMOUNT, CP_FASTBOOT, CP_SYNC, + CP_RECOVERY, CP_DISCARD, }; @@ -216,6 +218,15 @@ static inline bool __has_cursum_space(struct f2fs_summary_block *sum, int size, #define F2FS_IOC_RELEASE_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 4) #define F2FS_IOC_ABORT_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 5) +/* + * should be same as XFS_IOC_GOINGDOWN. + * Flags for going down operation used by FS_IOC_GOINGDOWN + */ +#define F2FS_IOC_SHUTDOWN _IOR('X', 125, __u32) /* Shutdown */ +#define F2FS_GOING_DOWN_FULLSYNC 0x0 /* going down with full sync */ +#define F2FS_GOING_DOWN_METASYNC 0x1 /* going down with metadata */ +#define F2FS_GOING_DOWN_NOSYNC 0x2 /* going down */ + #if defined(__KERNEL__) && defined(CONFIG_COMPAT) /* * ioctl commands in 32 bit emulation @@ -273,14 +284,34 @@ enum { #define MAX_DIR_RA_PAGES 4 /* maximum ra pages of dir */ +/* vector size for gang look-up from extent cache that consists of radix tree */ +#define EXT_TREE_VEC_SIZE 64 + /* for in-memory extent cache entry */ -#define F2FS_MIN_EXTENT_LEN 16 /* minimum extent length */ +#define F2FS_MIN_EXTENT_LEN 64 /* minimum extent length */ + +/* number of extent info in extent cache we try to shrink */ +#define EXTENT_CACHE_SHRINK_NUMBER 128 struct extent_info { - rwlock_t ext_lock; /* rwlock for consistency */ - unsigned int fofs; /* start offset in a file */ - u32 blk_addr; /* start block address of the extent */ - unsigned int len; /* length of the extent */ + unsigned int fofs; /* start offset in a file */ + u32 blk; /* start block address of the extent */ + unsigned int len; /* length of the extent */ +}; + +struct extent_node { + struct rb_node rb_node; /* rb node located in rb-tree */ + struct list_head list; /* node in global extent list of sbi */ + struct extent_info ei; /* extent info */ +}; + +struct extent_tree { + nid_t ino; /* inode number */ + struct rb_root root; /* root of extent info rb-tree */ + struct extent_node *cached_en; /* recently accessed extent node */ + rwlock_t lock; /* protect extent info rb-tree */ + atomic_t refcount; /* reference count of rb-tree */ + unsigned int count; /* # of extent node in rb-tree*/ }; /* @@ -309,6 +340,7 @@ struct f2fs_inode_info { nid_t i_xattr_nid; /* node id that contains xattrs */ unsigned long long xattr_ver; /* cp version of xattr modification */ struct extent_info ext; /* in-memory extent cache entry */ + rwlock_t ext_lock; /* rwlock for single extent cache */ struct inode_entry *dirty_dir; /* the pointer of dirty dir */ struct radix_tree_root inmem_root; /* radix tree for inmem pages */ @@ -319,21 +351,51 @@ struct f2fs_inode_info { static inline void get_extent_info(struct extent_info *ext, struct f2fs_extent i_ext) { - write_lock(&ext->ext_lock); ext->fofs = le32_to_cpu(i_ext.fofs); - ext->blk_addr = le32_to_cpu(i_ext.blk_addr); + ext->blk = le32_to_cpu(i_ext.blk); ext->len = le32_to_cpu(i_ext.len); - write_unlock(&ext->ext_lock); } static inline void set_raw_extent(struct extent_info *ext, struct f2fs_extent *i_ext) { - read_lock(&ext->ext_lock); i_ext->fofs = cpu_to_le32(ext->fofs); - i_ext->blk_addr = cpu_to_le32(ext->blk_addr); + i_ext->blk = cpu_to_le32(ext->blk); i_ext->len = cpu_to_le32(ext->len); - read_unlock(&ext->ext_lock); +} + +static inline void set_extent_info(struct extent_info *ei, unsigned int fofs, + u32 blk, unsigned int len) +{ + ei->fofs = fofs; + ei->blk = blk; + ei->len = len; +} + +static inline bool __is_extent_same(struct extent_info *ei1, + struct extent_info *ei2) +{ + return (ei1->fofs == ei2->fofs && ei1->blk == ei2->blk && + ei1->len == ei2->len); +} + +static inline bool __is_extent_mergeable(struct extent_info *back, + struct extent_info *front) +{ + return (back->fofs + back->len == front->fofs && + back->blk + back->len == front->blk); +} + +static inline bool __is_back_mergeable(struct extent_info *cur, + struct extent_info *back) +{ + return __is_extent_mergeable(back, cur); +} + +static inline bool __is_front_mergeable(struct extent_info *cur, + struct extent_info *front) +{ + return __is_extent_mergeable(cur, front); } struct f2fs_nm_info { @@ -502,6 +564,10 @@ enum page_type { META, NR_PAGE_TYPE, META_FLUSH, + INMEM, /* the below types are used by tracepoints only. */ + INMEM_DROP, + IPU, + OPU, }; struct f2fs_io_info { @@ -559,7 +625,6 @@ struct f2fs_sb_info { struct mutex cp_mutex; /* checkpoint procedure lock */ struct rw_semaphore cp_rwsem; /* blocking FS operations */ struct rw_semaphore node_write; /* locking node writes */ - struct mutex writepages; /* mutex for writepages() */ wait_queue_head_t cp_wait; struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */ @@ -571,6 +636,14 @@ struct f2fs_sb_info { struct list_head dir_inode_list; /* dir inode list */ spinlock_t dir_inode_lock; /* for dir inode list lock */ + /* for extent tree cache */ + struct radix_tree_root extent_tree_root;/* cache extent cache entries */ + struct rw_semaphore extent_tree_lock; /* locking extent radix tree */ + struct list_head extent_list; /* lru list for shrinker */ + spinlock_t extent_lock; /* locking extent lru list */ + int total_ext_tree; /* extent tree count */ + atomic_t total_ext_node; /* extent info count */ + /* basic filesystem units */ unsigned int log_sectors_per_block; /* log2 sectors per block */ unsigned int log_blocksize; /* log2 block size */ @@ -920,12 +993,17 @@ static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag) return 0; } +static inline block_t __cp_payload(struct f2fs_sb_info *sbi) +{ + return le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload); +} + static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag) { struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); int offset; - if (le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload) > 0) { + if (__cp_payload(sbi) > 0) { if (flag == NAT_BITMAP) return &ckpt->sit_nat_version_bitmap; else @@ -1166,8 +1244,10 @@ enum { FI_NEED_IPU, /* used for ipu per file */ FI_ATOMIC_FILE, /* indicate atomic file */ FI_VOLATILE_FILE, /* indicate volatile file */ + FI_FIRST_BLOCK_WRITTEN, /* indicate #0 data block was written */ FI_DROP_CACHE, /* drop dirty page cache */ FI_DATA_EXIST, /* indicate data exists */ + FI_INLINE_DOTS, /* indicate inline dot dentries */ }; static inline void set_inode_flag(struct f2fs_inode_info *fi, int flag) @@ -1204,6 +1284,8 @@ static inline void get_inline_info(struct f2fs_inode_info *fi, set_inode_flag(fi, FI_INLINE_DENTRY); if (ri->i_inline & F2FS_DATA_EXIST) set_inode_flag(fi, FI_DATA_EXIST); + if (ri->i_inline & F2FS_INLINE_DOTS) + set_inode_flag(fi, FI_INLINE_DOTS); } static inline void set_raw_inline(struct f2fs_inode_info *fi, @@ -1219,6 +1301,8 @@ static inline void set_raw_inline(struct f2fs_inode_info *fi, ri->i_inline |= F2FS_INLINE_DENTRY; if (is_inode_flag_set(fi, FI_DATA_EXIST)) ri->i_inline |= F2FS_DATA_EXIST; + if (is_inode_flag_set(fi, FI_INLINE_DOTS)) + ri->i_inline |= F2FS_INLINE_DOTS; } static inline int f2fs_has_inline_xattr(struct inode *inode) @@ -1264,6 +1348,11 @@ static inline int f2fs_exist_data(struct inode *inode) return is_inode_flag_set(F2FS_I(inode), FI_DATA_EXIST); } +static inline int f2fs_has_inline_dots(struct inode *inode) +{ + return is_inode_flag_set(F2FS_I(inode), FI_INLINE_DOTS); +} + static inline bool f2fs_is_atomic_file(struct inode *inode) { return is_inode_flag_set(F2FS_I(inode), FI_ATOMIC_FILE); @@ -1274,6 +1363,11 @@ static inline bool f2fs_is_volatile_file(struct inode *inode) return is_inode_flag_set(F2FS_I(inode), FI_VOLATILE_FILE); } +static inline bool f2fs_is_first_block_written(struct inode *inode) +{ + return is_inode_flag_set(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN); +} + static inline bool f2fs_is_drop_cache(struct inode *inode) { return is_inode_flag_set(F2FS_I(inode), FI_DROP_CACHE); @@ -1290,12 +1384,6 @@ static inline int f2fs_has_inline_dentry(struct inode *inode) return is_inode_flag_set(F2FS_I(inode), FI_INLINE_DENTRY); } -static inline void *inline_dentry_addr(struct page *page) -{ - struct f2fs_inode *ri = F2FS_INODE(page); - return (void *)&(ri->i_addr[1]); -} - static inline void f2fs_dentry_kunmap(struct inode *dir, struct page *page) { if (!f2fs_has_inline_dentry(dir)) @@ -1363,7 +1451,7 @@ struct dentry *f2fs_get_parent(struct dentry *child); * dir.c */ extern unsigned char f2fs_filetype_table[F2FS_FT_MAX]; -void set_de_type(struct f2fs_dir_entry *, struct inode *); +void set_de_type(struct f2fs_dir_entry *, umode_t); struct f2fs_dir_entry *find_target_dentry(struct qstr *, int *, struct f2fs_dentry_ptr *); bool f2fs_fill_dentries(struct dir_context *, struct f2fs_dentry_ptr *, @@ -1382,7 +1470,10 @@ ino_t f2fs_inode_by_name(struct inode *, struct qstr *); void f2fs_set_link(struct inode *, struct f2fs_dir_entry *, struct page *, struct inode *); int update_dent_inode(struct inode *, const struct qstr *); -int __f2fs_add_link(struct inode *, const struct qstr *, struct inode *); +void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *, + const struct qstr *, f2fs_hash_t , unsigned int); +int __f2fs_add_link(struct inode *, const struct qstr *, struct inode *, nid_t, + umode_t); void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *, struct inode *); int f2fs_do_tmpfile(struct inode *, struct inode *); @@ -1391,8 +1482,8 @@ bool f2fs_empty_dir(struct inode *); static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode) { - return __f2fs_add_link(dentry->d_parent->d_inode, &dentry->d_name, - inode); + return __f2fs_add_link(d_inode(dentry->d_parent), &dentry->d_name, + inode, inode->i_ino, inode->i_mode); } /* @@ -1519,14 +1610,22 @@ int f2fs_submit_page_bio(struct f2fs_sb_info *, struct page *, struct f2fs_io_info *); void f2fs_submit_page_mbio(struct f2fs_sb_info *, struct page *, struct f2fs_io_info *); +void set_data_blkaddr(struct dnode_of_data *); int reserve_new_block(struct dnode_of_data *); int f2fs_reserve_block(struct dnode_of_data *, pgoff_t); -void update_extent_cache(struct dnode_of_data *); +void f2fs_shrink_extent_tree(struct f2fs_sb_info *, int); +void f2fs_destroy_extent_tree(struct inode *); +void f2fs_init_extent_cache(struct inode *, struct f2fs_extent *); +void f2fs_update_extent_cache(struct dnode_of_data *); +void f2fs_preserve_extent_tree(struct inode *); struct page *find_data_page(struct inode *, pgoff_t, bool); struct page *get_lock_data_page(struct inode *, pgoff_t); struct page *get_new_data_page(struct inode *, struct page *, pgoff_t, bool); int do_write_data_page(struct page *, struct f2fs_io_info *); int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *, u64, u64); +void init_extent_cache_info(struct f2fs_sb_info *); +int __init create_extent_cache(void); +void destroy_extent_cache(void); void f2fs_invalidate_page(struct page *, unsigned int, unsigned int); int f2fs_release_page(struct page *, gfp_t); @@ -1554,7 +1653,7 @@ struct f2fs_stat_info { struct f2fs_sb_info *sbi; int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs; int main_area_segs, main_area_sections, main_area_zones; - int hit_ext, total_ext; + int hit_ext, total_ext, ext_tree, ext_node; int ndirty_node, ndirty_dent, ndirty_dirs, ndirty_meta; int nats, dirty_nats, sits, dirty_sits, fnids; int total_count, utilization; @@ -1566,7 +1665,9 @@ struct f2fs_stat_info { int dirty_count, node_pages, meta_pages; int prefree_count, call_count, cp_count; int tot_segs, node_segs, data_segs, free_segs, free_secs; + int bg_node_segs, bg_data_segs; int tot_blks, data_blks, node_blks; + int bg_data_blks, bg_node_blks; int curseg[NR_CURSEG_TYPE]; int cursec[NR_CURSEG_TYPE]; int curzone[NR_CURSEG_TYPE]; @@ -1615,31 +1716,36 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi) ((sbi)->block_count[(curseg)->alloc_type]++) #define stat_inc_inplace_blocks(sbi) \ (atomic_inc(&(sbi)->inplace_count)) -#define stat_inc_seg_count(sbi, type) \ +#define stat_inc_seg_count(sbi, type, gc_type) \ do { \ struct f2fs_stat_info *si = F2FS_STAT(sbi); \ (si)->tot_segs++; \ - if (type == SUM_TYPE_DATA) \ + if (type == SUM_TYPE_DATA) { \ si->data_segs++; \ - else \ + si->bg_data_segs += (gc_type == BG_GC) ? 1 : 0; \ + } else { \ si->node_segs++; \ + si->bg_node_segs += (gc_type == BG_GC) ? 1 : 0; \ + } \ } while (0) #define stat_inc_tot_blk_count(si, blks) \ (si->tot_blks += (blks)) -#define stat_inc_data_blk_count(sbi, blks) \ +#define stat_inc_data_blk_count(sbi, blks, gc_type) \ do { \ struct f2fs_stat_info *si = F2FS_STAT(sbi); \ stat_inc_tot_blk_count(si, blks); \ si->data_blks += (blks); \ + si->bg_data_blks += (gc_type == BG_GC) ? (blks) : 0; \ } while (0) -#define stat_inc_node_blk_count(sbi, blks) \ +#define stat_inc_node_blk_count(sbi, blks, gc_type) \ do { \ struct f2fs_stat_info *si = F2FS_STAT(sbi); \ stat_inc_tot_blk_count(si, blks); \ si->node_blks += (blks); \ + si->bg_node_blks += (gc_type == BG_GC) ? (blks) : 0; \ } while (0) int f2fs_build_stats(struct f2fs_sb_info *); @@ -1661,10 +1767,10 @@ void f2fs_destroy_root_stats(void); #define stat_inc_seg_type(sbi, curseg) #define stat_inc_block_count(sbi, curseg) #define stat_inc_inplace_blocks(sbi) -#define stat_inc_seg_count(si, type) +#define stat_inc_seg_count(sbi, type, gc_type) #define stat_inc_tot_blk_count(si, blks) -#define stat_inc_data_blk_count(si, blks) -#define stat_inc_node_blk_count(sbi, blks) +#define stat_inc_data_blk_count(sbi, blks, gc_type) +#define stat_inc_node_blk_count(sbi, blks, gc_type) static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; } static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { } @@ -1688,6 +1794,7 @@ extern struct kmem_cache *inode_entry_slab; */ bool f2fs_may_inline(struct inode *); void read_inline_data(struct page *, struct page *); +bool truncate_inline_inode(struct page *, u64); int f2fs_read_inline_data(struct inode *, struct page *); int f2fs_convert_inline_page(struct dnode_of_data *, struct page *); int f2fs_convert_inline_inode(struct inode *); @@ -1697,7 +1804,8 @@ struct f2fs_dir_entry *find_in_inline_dir(struct inode *, struct qstr *, struct page **); struct f2fs_dir_entry *f2fs_parent_inline_dir(struct inode *, struct page **); int make_empty_inline_dir(struct inode *inode, struct inode *, struct page *); -int f2fs_add_inline_entry(struct inode *, const struct qstr *, struct inode *); +int f2fs_add_inline_entry(struct inode *, const struct qstr *, struct inode *, + nid_t, umode_t); void f2fs_delete_inline_entry(struct f2fs_dir_entry *, struct page *, struct inode *, struct inode *); bool f2fs_empty_inline_dir(struct inode *); diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 98dac27..2b52e48 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -241,6 +241,8 @@ go_write: * will be used only for fsynced inodes after checkpoint. */ try_to_fix_pino(inode); + clear_inode_flag(fi, FI_APPEND_WRITE); + clear_inode_flag(fi, FI_UPDATE_WRITE); goto out; } sync_nodes: @@ -433,8 +435,12 @@ int truncate_data_blocks_range(struct dnode_of_data *dn, int count) continue; dn->data_blkaddr = NULL_ADDR; - update_extent_cache(dn); + set_data_blkaddr(dn); + f2fs_update_extent_cache(dn); invalidate_blocks(sbi, blkaddr); + if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page)) + clear_inode_flag(F2FS_I(dn->inode), + FI_FIRST_BLOCK_WRITTEN); nr_free++; } if (nr_free) { @@ -454,15 +460,16 @@ void truncate_data_blocks(struct dnode_of_data *dn) truncate_data_blocks_range(dn, ADDRS_PER_BLOCK); } -static int truncate_partial_data_page(struct inode *inode, u64 from) +static int truncate_partial_data_page(struct inode *inode, u64 from, + bool force) { unsigned offset = from & (PAGE_CACHE_SIZE - 1); struct page *page; - if (!offset) + if (!offset && !force) return 0; - page = find_data_page(inode, from >> PAGE_CACHE_SHIFT, false); + page = find_data_page(inode, from >> PAGE_CACHE_SHIFT, force); if (IS_ERR(page)) return 0; @@ -473,7 +480,8 @@ static int truncate_partial_data_page(struct inode *inode, u64 from) f2fs_wait_on_page_writeback(page, DATA); zero_user(page, offset, PAGE_CACHE_SIZE - offset); - set_page_dirty(page); + if (!force) + set_page_dirty(page); out: f2fs_put_page(page, 1); return 0; @@ -487,6 +495,7 @@ int truncate_blocks(struct inode *inode, u64 from, bool lock) pgoff_t free_from; int count = 0, err = 0; struct page *ipage; + bool truncate_page = false; trace_f2fs_truncate_blocks_enter(inode, from); @@ -502,7 +511,10 @@ int truncate_blocks(struct inode *inode, u64 from, bool lock) } if (f2fs_has_inline_data(inode)) { + if (truncate_inline_inode(ipage, from)) + set_page_dirty(ipage); f2fs_put_page(ipage, 1); + truncate_page = true; goto out; } @@ -533,7 +545,7 @@ out: /* lastly zero out the first data page */ if (!err) - err = truncate_partial_data_page(inode, from); + err = truncate_partial_data_page(inode, from, truncate_page); trace_f2fs_truncate_blocks_exit(inode, err); return err; @@ -562,7 +574,7 @@ void f2fs_truncate(struct inode *inode) int f2fs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); generic_fillattr(inode, stat); stat->blocks <<= 3; return 0; @@ -601,7 +613,7 @@ static void __setattr_copy(struct inode *inode, const struct iattr *attr) int f2fs_setattr(struct dentry *dentry, struct iattr *attr) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct f2fs_inode_info *fi = F2FS_I(inode); int err; @@ -997,6 +1009,9 @@ static int f2fs_ioc_release_volatile_write(struct file *filp) if (!f2fs_is_volatile_file(inode)) return 0; + if (!f2fs_is_first_block_written(inode)) + return truncate_partial_data_page(inode, 0, true); + punch_hole(inode, 0, F2FS_BLKSIZE); return 0; } @@ -1029,6 +1044,41 @@ static int f2fs_ioc_abort_volatile_write(struct file *filp) return ret; } +static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg) +{ + struct inode *inode = file_inode(filp); + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); + struct super_block *sb = sbi->sb; + __u32 in; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (get_user(in, (__u32 __user *)arg)) + return -EFAULT; + + switch (in) { + case F2FS_GOING_DOWN_FULLSYNC: + sb = freeze_bdev(sb->s_bdev); + if (sb && !IS_ERR(sb)) { + f2fs_stop_checkpoint(sbi); + thaw_bdev(sb->s_bdev, sb); + } + break; + case F2FS_GOING_DOWN_METASYNC: + /* do checkpoint only */ + f2fs_sync_fs(sb, 1); + f2fs_stop_checkpoint(sbi); + break; + case F2FS_GOING_DOWN_NOSYNC: + f2fs_stop_checkpoint(sbi); + break; + default: + return -EINVAL; + } + return 0; +} + static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); @@ -1078,6 +1128,8 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return f2fs_ioc_release_volatile_write(filp); case F2FS_IOC_ABORT_VOLATILE_WRITE: return f2fs_ioc_abort_volatile_write(filp); + case F2FS_IOC_SHUTDOWN: + return f2fs_ioc_shutdown(filp, arg); case FITRIM: return f2fs_ioc_fitrim(filp, arg); default: @@ -1104,8 +1156,6 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) const struct file_operations f2fs_file_operations = { .llseek = f2fs_llseek, - .read = new_sync_read, - .write = new_sync_write, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, .open = generic_file_open, diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 76adbc3..ed58211 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -435,7 +435,7 @@ next_step: set_page_dirty(node_page); } f2fs_put_page(node_page, 1); - stat_inc_node_blk_count(sbi, 1); + stat_inc_node_blk_count(sbi, 1, gc_type); } if (initial) { @@ -622,7 +622,7 @@ next_step: if (IS_ERR(data_page)) continue; move_data_page(inode, data_page, gc_type); - stat_inc_data_blk_count(sbi, 1); + stat_inc_data_blk_count(sbi, 1, gc_type); } } @@ -680,7 +680,7 @@ static void do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno, } blk_finish_plug(&plug); - stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer))); + stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)), gc_type); stat_inc_call_count(sbi->stat_info); f2fs_put_page(sum_page, 1); diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index 1484c00..8140e4f 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c @@ -21,7 +21,7 @@ bool f2fs_may_inline(struct inode *inode) if (f2fs_is_atomic_file(inode)) return false; - if (!S_ISREG(inode->i_mode)) + if (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode)) return false; if (i_size_read(inode) > MAX_INLINE_DATA) @@ -50,10 +50,19 @@ void read_inline_data(struct page *page, struct page *ipage) SetPageUptodate(page); } -static void truncate_inline_data(struct page *ipage) +bool truncate_inline_inode(struct page *ipage, u64 from) { + void *addr; + + if (from >= MAX_INLINE_DATA) + return false; + + addr = inline_data_addr(ipage); + f2fs_wait_on_page_writeback(ipage, NODE); - memset(inline_data_addr(ipage), 0, MAX_INLINE_DATA); + memset(addr + from, 0, MAX_INLINE_DATA - from); + + return true; } int f2fs_read_inline_data(struct inode *inode, struct page *page) @@ -122,7 +131,8 @@ no_update: set_page_writeback(page); fio.blk_addr = dn->data_blkaddr; write_data_page(page, dn, &fio); - update_extent_cache(dn); + set_data_blkaddr(dn); + f2fs_update_extent_cache(dn); f2fs_wait_on_page_writeback(page, DATA); if (dirty) inode_dec_dirty_pages(dn->inode); @@ -131,7 +141,7 @@ no_update: set_inode_flag(F2FS_I(dn->inode), FI_APPEND_WRITE); /* clear inline data and flag after data writeback */ - truncate_inline_data(dn->inode_page); + truncate_inline_inode(dn->inode_page, 0); clear_out: stat_dec_inline_inode(dn->inode); f2fs_clear_inline_inode(dn->inode); @@ -245,7 +255,7 @@ process_inline: if (f2fs_has_inline_data(inode)) { ipage = get_node_page(sbi, inode->i_ino); f2fs_bug_on(sbi, IS_ERR(ipage)); - truncate_inline_data(ipage); + truncate_inline_inode(ipage, 0); f2fs_clear_inline_inode(inode); update_inode(inode, ipage); f2fs_put_page(ipage, 1); @@ -363,7 +373,7 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage, set_page_dirty(page); /* clear inline dir and flag after data writeback */ - truncate_inline_data(ipage); + truncate_inline_inode(ipage, 0); stat_dec_inline_dir(dir); clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY); @@ -380,21 +390,18 @@ out: } int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name, - struct inode *inode) + struct inode *inode, nid_t ino, umode_t mode) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct page *ipage; unsigned int bit_pos; f2fs_hash_t name_hash; - struct f2fs_dir_entry *de; size_t namelen = name->len; struct f2fs_inline_dentry *dentry_blk = NULL; + struct f2fs_dentry_ptr d; int slots = GET_DENTRY_SLOTS(namelen); - struct page *page; + struct page *page = NULL; int err = 0; - int i; - - name_hash = f2fs_dentry_hash(name); ipage = get_node_page(sbi, dir->i_ino); if (IS_ERR(ipage)) @@ -410,32 +417,34 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name, goto out; } - down_write(&F2FS_I(inode)->i_sem); - page = init_inode_metadata(inode, dir, name, ipage); - if (IS_ERR(page)) { - err = PTR_ERR(page); - goto fail; + if (inode) { + down_write(&F2FS_I(inode)->i_sem); + page = init_inode_metadata(inode, dir, name, ipage); + if (IS_ERR(page)) { + err = PTR_ERR(page); + goto fail; + } } f2fs_wait_on_page_writeback(ipage, NODE); - de = &dentry_blk->dentry[bit_pos]; - de->hash_code = name_hash; - de->name_len = cpu_to_le16(namelen); - memcpy(dentry_blk->filename[bit_pos], name->name, name->len); - de->ino = cpu_to_le32(inode->i_ino); - set_de_type(de, inode); - for (i = 0; i < slots; i++) - test_and_set_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap); + + name_hash = f2fs_dentry_hash(name); + make_dentry_ptr(&d, (void *)dentry_blk, 2); + f2fs_update_dentry(ino, mode, &d, name, name_hash, bit_pos); + set_page_dirty(ipage); /* we don't need to mark_inode_dirty now */ - F2FS_I(inode)->i_pino = dir->i_ino; - update_inode(inode, page); - f2fs_put_page(page, 1); + if (inode) { + F2FS_I(inode)->i_pino = dir->i_ino; + update_inode(inode, page); + f2fs_put_page(page, 1); + } update_parent_metadata(dir, inode, 0); fail: - up_write(&F2FS_I(inode)->i_sem); + if (inode) + up_write(&F2FS_I(inode)->i_sem); if (is_inode_flag_set(F2FS_I(dir), FI_UPDATE_DIR)) { update_inode(dir, ipage); diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c index 2d002e3..e622ec9 100644 --- a/fs/f2fs/inode.c +++ b/fs/f2fs/inode.c @@ -51,6 +51,15 @@ static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri) } } +static bool __written_first_block(struct f2fs_inode *ri) +{ + block_t addr = le32_to_cpu(ri->i_addr[0]); + + if (addr != NEW_ADDR && addr != NULL_ADDR) + return true; + return false; +} + static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri) { if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { @@ -130,7 +139,8 @@ static int do_read_inode(struct inode *inode) fi->i_pino = le32_to_cpu(ri->i_pino); fi->i_dir_level = ri->i_dir_level; - get_extent_info(&fi->ext, ri->i_ext); + f2fs_init_extent_cache(inode, &ri->i_ext); + get_inline_info(fi, ri); /* check data exist */ @@ -140,6 +150,9 @@ static int do_read_inode(struct inode *inode) /* get rdev by using inline_info */ __get_inode_rdev(inode, ri); + if (__written_first_block(ri)) + set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN); + f2fs_put_page(node_page, 1); stat_inc_inline_inode(inode); @@ -220,7 +233,11 @@ void update_inode(struct inode *inode, struct page *node_page) ri->i_links = cpu_to_le32(inode->i_nlink); ri->i_size = cpu_to_le64(i_size_read(inode)); ri->i_blocks = cpu_to_le64(inode->i_blocks); + + read_lock(&F2FS_I(inode)->ext_lock); set_raw_extent(&F2FS_I(inode)->ext, &ri->i_ext); + read_unlock(&F2FS_I(inode)->ext_lock); + set_raw_inline(F2FS_I(inode), ri); ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec); @@ -328,6 +345,12 @@ void f2fs_evict_inode(struct inode *inode) no_delete: stat_dec_inline_dir(inode); stat_dec_inline_inode(inode); + + /* update extent info in inode */ + if (inode->i_nlink) + f2fs_preserve_extent_tree(inode); + f2fs_destroy_extent_tree(inode); + invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, inode->i_ino); if (xnid) invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid); diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index e79639a9..7e3794e 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -14,6 +14,7 @@ #include <linux/sched.h> #include <linux/ctype.h> #include <linux/dcache.h> +#include <linux/namei.h> #include "f2fs.h" #include "node.h" @@ -150,7 +151,7 @@ out: static int f2fs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { - struct inode *inode = old_dentry->d_inode; + struct inode *inode = d_inode(old_dentry); struct f2fs_sb_info *sbi = F2FS_I_SB(dir); int err; @@ -181,10 +182,48 @@ out: struct dentry *f2fs_get_parent(struct dentry *child) { struct qstr dotdot = QSTR_INIT("..", 2); - unsigned long ino = f2fs_inode_by_name(child->d_inode, &dotdot); + unsigned long ino = f2fs_inode_by_name(d_inode(child), &dotdot); if (!ino) return ERR_PTR(-ENOENT); - return d_obtain_alias(f2fs_iget(child->d_inode->i_sb, ino)); + return d_obtain_alias(f2fs_iget(d_inode(child)->i_sb, ino)); +} + +static int __recover_dot_dentries(struct inode *dir, nid_t pino) +{ + struct f2fs_sb_info *sbi = F2FS_I_SB(dir); + struct qstr dot = QSTR_INIT(".", 1); + struct qstr dotdot = QSTR_INIT("..", 2); + struct f2fs_dir_entry *de; + struct page *page; + int err = 0; + + f2fs_lock_op(sbi); + + de = f2fs_find_entry(dir, &dot, &page); + if (de) { + f2fs_dentry_kunmap(dir, page); + f2fs_put_page(page, 0); + } else { + err = __f2fs_add_link(dir, &dot, NULL, dir->i_ino, S_IFDIR); + if (err) + goto out; + } + + de = f2fs_find_entry(dir, &dotdot, &page); + if (de) { + f2fs_dentry_kunmap(dir, page); + f2fs_put_page(page, 0); + } else { + err = __f2fs_add_link(dir, &dotdot, NULL, pino, S_IFDIR); + } +out: + if (!err) { + clear_inode_flag(F2FS_I(dir), FI_INLINE_DOTS); + mark_inode_dirty(dir); + } + + f2fs_unlock_op(sbi); + return err; } static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, @@ -206,6 +245,16 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, inode = f2fs_iget(dir->i_sb, ino); if (IS_ERR(inode)) return ERR_CAST(inode); + + if (f2fs_has_inline_dots(inode)) { + int err; + + err = __recover_dot_dentries(inode, dir->i_ino); + if (err) { + iget_failed(inode); + return ERR_PTR(err); + } + } } return d_splice_alias(inode, dentry); @@ -214,7 +263,7 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, static int f2fs_unlink(struct inode *dir, struct dentry *dentry) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct f2fs_dir_entry *de; struct page *page; int err = -ENOENT; @@ -247,6 +296,23 @@ fail: return err; } +static void *f2fs_follow_link(struct dentry *dentry, struct nameidata *nd) +{ + struct page *page; + + page = page_follow_link_light(dentry, nd); + if (IS_ERR(page)) + return page; + + /* this is broken symlink case */ + if (*nd_get_link(nd) == 0) { + kunmap(page); + page_cache_release(page); + return ERR_PTR(-ENOENT); + } + return page; +} + static int f2fs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { @@ -276,6 +342,17 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry, d_instantiate(dentry, inode); unlock_new_inode(inode); + /* + * Let's flush symlink data in order to avoid broken symlink as much as + * possible. Nevertheless, fsyncing is the best way, but there is no + * way to get a file descriptor in order to flush that. + * + * Note that, it needs to do dir->fsync to make this recoverable. + * If the symlink path is stored into inline_data, there is no + * performance regression. + */ + filemap_write_and_wait_range(inode->i_mapping, 0, symlen - 1); + if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); return err; @@ -326,7 +403,7 @@ out_fail: static int f2fs_rmdir(struct inode *dir, struct dentry *dentry) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); if (f2fs_empty_dir(inode)) return f2fs_unlink(dir, dentry); return -ENOTEMPTY; @@ -374,8 +451,8 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct f2fs_sb_info *sbi = F2FS_I_SB(old_dir); - struct inode *old_inode = old_dentry->d_inode; - struct inode *new_inode = new_dentry->d_inode; + struct inode *old_inode = d_inode(old_dentry); + struct inode *new_inode = d_inode(new_dentry); struct page *old_dir_page; struct page *old_page, *new_page; struct f2fs_dir_entry *old_dir_entry = NULL; @@ -501,8 +578,8 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct f2fs_sb_info *sbi = F2FS_I_SB(old_dir); - struct inode *old_inode = old_dentry->d_inode; - struct inode *new_inode = new_dentry->d_inode; + struct inode *old_inode = d_inode(old_dentry); + struct inode *new_inode = d_inode(new_dentry); struct page *old_dir_page, *new_dir_page; struct page *old_page, *new_page; struct f2fs_dir_entry *old_dir_entry = NULL, *new_dir_entry = NULL; @@ -693,6 +770,8 @@ static int f2fs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) f2fs_unlock_op(sbi); alloc_nid_done(sbi, inode->i_ino); + + stat_inc_inline_inode(inode); d_tmpfile(dentry, inode); unlock_new_inode(inode); return 0; @@ -729,7 +808,7 @@ const struct inode_operations f2fs_dir_inode_operations = { const struct inode_operations f2fs_symlink_inode_operations = { .readlink = generic_readlink, - .follow_link = page_follow_link_light, + .follow_link = f2fs_follow_link, .put_link = page_put_link, .getattr = f2fs_getattr, .setattr = f2fs_setattr, diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 97bd9d3..8ab0cf1 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -41,7 +41,9 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type) /* only uses low memory */ avail_ram = val.totalram - val.totalhigh; - /* give 25%, 25%, 50%, 50% memory for each components respectively */ + /* + * give 25%, 25%, 50%, 50%, 50% memory for each components respectively + */ if (type == FREE_NIDS) { mem_size = (nm_i->fcnt * sizeof(struct free_nid)) >> PAGE_CACHE_SHIFT; @@ -62,6 +64,11 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type) mem_size += (sbi->im[i].ino_num * sizeof(struct ino_entry)) >> PAGE_CACHE_SHIFT; res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); + } else if (type == EXTENT_CACHE) { + mem_size = (sbi->total_ext_tree * sizeof(struct extent_tree) + + atomic_read(&sbi->total_ext_node) * + sizeof(struct extent_node)) >> PAGE_CACHE_SHIFT; + res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); } else { if (sbi->sb->s_bdi->dirty_exceeded) return false; @@ -494,7 +501,7 @@ int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode) /* if inline_data is set, should not report any block indices */ if (f2fs_has_inline_data(dn->inode) && index) { - err = -EINVAL; + err = -ENOENT; f2fs_put_page(npage[0], 1); goto release_out; } @@ -995,6 +1002,7 @@ static int read_node_page(struct page *page, int rw) get_node_info(sbi, page->index, &ni); if (unlikely(ni.blk_addr == NULL_ADDR)) { + ClearPageUptodate(page); f2fs_put_page(page, 1); return -ENOENT; } @@ -1306,6 +1314,7 @@ static int f2fs_write_node_page(struct page *page, /* This page is already truncated */ if (unlikely(ni.blk_addr == NULL_ADDR)) { + ClearPageUptodate(page); dec_page_count(sbi, F2FS_DIRTY_NODES); unlock_page(page); return 0; @@ -1821,6 +1830,7 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi, struct f2fs_nat_block *nat_blk; struct nat_entry *ne, *cur; struct page *page = NULL; + struct f2fs_nm_info *nm_i = NM_I(sbi); /* * there are two steps to flush nat entries: @@ -1874,7 +1884,9 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi, f2fs_bug_on(sbi, set->entry_cnt); + down_write(&nm_i->nat_tree_lock); radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set); + up_write(&nm_i->nat_tree_lock); kmem_cache_free(nat_entry_set_slab, set); } @@ -1902,6 +1914,7 @@ void flush_nat_entries(struct f2fs_sb_info *sbi) if (!__has_cursum_space(sum, nm_i->dirty_nat_cnt, NAT_JOURNAL)) remove_nats_in_journal(sbi); + down_write(&nm_i->nat_tree_lock); while ((found = __gang_lookup_nat_set(nm_i, set_idx, SETVEC_SIZE, setvec))) { unsigned idx; @@ -1910,6 +1923,7 @@ void flush_nat_entries(struct f2fs_sb_info *sbi) __adjust_nat_entry_set(setvec[idx], &sets, MAX_NAT_JENTRIES(sum)); } + up_write(&nm_i->nat_tree_lock); /* flush dirty nats in nat entry set */ list_for_each_entry_safe(set, tmp, &sets, set_list) diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h index f405bbf..c56026f 100644 --- a/fs/f2fs/node.h +++ b/fs/f2fs/node.h @@ -120,6 +120,7 @@ enum mem_type { NAT_ENTRIES, /* indicates the cached nat entry */ DIRTY_DENTS, /* indicates dirty dentry pages */ INO_ENTRIES, /* indicates inode entries */ + EXTENT_CACHE, /* indicates extent cache */ BASE_CHECK, /* check kernel status */ }; diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index 41afb95..8d8ea99 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c @@ -93,10 +93,9 @@ static int recover_dentry(struct inode *inode, struct page *ipage) } retry: de = f2fs_find_entry(dir, &name, &page); - if (de && inode->i_ino == le32_to_cpu(de->ino)) { - clear_inode_flag(F2FS_I(inode), FI_INC_LINK); + if (de && inode->i_ino == le32_to_cpu(de->ino)) goto out_unmap_put; - } + if (de) { einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino)); if (IS_ERR(einode)) { @@ -115,7 +114,7 @@ retry: iput(einode); goto retry; } - err = __f2fs_add_link(dir, &name, inode); + err = __f2fs_add_link(dir, &name, inode, inode->i_ino, inode->i_mode); if (err) goto out_err; @@ -187,11 +186,7 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head) goto next; entry = get_fsync_inode(head, ino_of_node(page)); - if (entry) { - if (IS_INODE(page) && is_dent_dnode(page)) - set_inode_flag(F2FS_I(entry->inode), - FI_INC_LINK); - } else { + if (!entry) { if (IS_INODE(page) && is_dent_dnode(page)) { err = recover_inode_page(sbi, page); if (err) @@ -212,8 +207,10 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head) if (IS_ERR(entry->inode)) { err = PTR_ERR(entry->inode); kmem_cache_free(fsync_entry_slab, entry); - if (err == -ENOENT) + if (err == -ENOENT) { + err = 0; goto next; + } break; } list_add_tail(&entry->list, head); @@ -256,6 +253,7 @@ static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi, struct f2fs_summary_block *sum_node; struct f2fs_summary sum; struct page *sum_page, *node_page; + struct dnode_of_data tdn = *dn; nid_t ino, nid; struct inode *inode; unsigned int offset; @@ -283,17 +281,15 @@ got_it: /* Use the locked dnode page and inode */ nid = le32_to_cpu(sum.nid); if (dn->inode->i_ino == nid) { - struct dnode_of_data tdn = *dn; tdn.nid = nid; + if (!dn->inode_page_locked) + lock_page(dn->inode_page); tdn.node_page = dn->inode_page; tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node); - truncate_data_blocks_range(&tdn, 1); - return 0; + goto truncate_out; } else if (dn->nid == nid) { - struct dnode_of_data tdn = *dn; tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node); - truncate_data_blocks_range(&tdn, 1); - return 0; + goto truncate_out; } /* Get the node page */ @@ -317,18 +313,33 @@ got_it: bidx = start_bidx_of_node(offset, F2FS_I(inode)) + le16_to_cpu(sum.ofs_in_node); - if (ino != dn->inode->i_ino) { - truncate_hole(inode, bidx, bidx + 1); + /* + * if inode page is locked, unlock temporarily, but its reference + * count keeps alive. + */ + if (ino == dn->inode->i_ino && dn->inode_page_locked) + unlock_page(dn->inode_page); + + set_new_dnode(&tdn, inode, NULL, NULL, 0); + if (get_dnode_of_data(&tdn, bidx, LOOKUP_NODE)) + goto out; + + if (tdn.data_blkaddr == blkaddr) + truncate_data_blocks_range(&tdn, 1); + + f2fs_put_dnode(&tdn); +out: + if (ino != dn->inode->i_ino) iput(inode); - } else { - struct dnode_of_data tdn; - set_new_dnode(&tdn, inode, dn->inode_page, NULL, 0); - if (get_dnode_of_data(&tdn, bidx, LOOKUP_NODE)) - return 0; - if (tdn.data_blkaddr != NULL_ADDR) - truncate_data_blocks_range(&tdn, 1); - f2fs_put_page(tdn.node_page, 1); - } + else if (dn->inode_page_locked) + lock_page(dn->inode_page); + return 0; + +truncate_out: + if (datablock_addr(tdn.node_page, tdn.ofs_in_node) == blkaddr) + truncate_data_blocks_range(&tdn, 1); + if (dn->inode->i_ino == nid && !dn->inode_page_locked) + unlock_page(dn->inode_page); return 0; } @@ -384,7 +395,9 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, src = datablock_addr(dn.node_page, dn.ofs_in_node); dest = datablock_addr(page, dn.ofs_in_node); - if (src != dest && dest != NEW_ADDR && dest != NULL_ADDR) { + if (src != dest && dest != NEW_ADDR && dest != NULL_ADDR && + dest >= MAIN_BLKADDR(sbi) && dest < MAX_BLKADDR(sbi)) { + if (src == NULL_ADDR) { err = reserve_new_block(&dn); /* We should not get -ENOSPC */ @@ -401,14 +414,13 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, /* write dummy data page */ recover_data_page(sbi, NULL, &sum, src, dest); dn.data_blkaddr = dest; - update_extent_cache(&dn); + set_data_blkaddr(&dn); + f2fs_update_extent_cache(&dn); recovered++; } dn.ofs_in_node++; } - /* write node page in place */ - set_summary(&sum, dn.nid, 0, 0); if (IS_INODE(dn.node_page)) sync_inode_page(&dn); @@ -552,7 +564,7 @@ out: mutex_unlock(&sbi->cp_mutex); } else if (need_writecp) { struct cp_control cpc = { - .reason = CP_SYNC, + .reason = CP_RECOVERY, }; mutex_unlock(&sbi->cp_mutex); write_checkpoint(sbi, &cpc); diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index daee4ab..f939660 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -205,6 +205,8 @@ retry: list_add_tail(&new->list, &fi->inmem_pages); inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES); mutex_unlock(&fi->inmem_lock); + + trace_f2fs_register_inmem_page(page, INMEM); } void commit_inmem_pages(struct inode *inode, bool abort) @@ -238,11 +240,13 @@ void commit_inmem_pages(struct inode *inode, bool abort) f2fs_wait_on_page_writeback(cur->page, DATA); if (clear_page_dirty_for_io(cur->page)) inode_dec_dirty_pages(inode); + trace_f2fs_commit_inmem_page(cur->page, INMEM); do_write_data_page(cur->page, &fio); submit_bio = true; } f2fs_put_page(cur->page, 1); } else { + trace_f2fs_commit_inmem_page(cur->page, INMEM_DROP); put_page(cur->page); } radix_tree_delete(&fi->inmem_root, cur->page->index); @@ -277,6 +281,9 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi) void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi) { + /* try to shrink extent cache when there is no enough memory */ + f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER); + /* check the # of cached NAT entries and prefree segments */ if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK) || excess_prefree_segs(sbi) || @@ -549,7 +556,7 @@ static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc) end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1); - if (end - start < cpc->trim_minlen) + if (force && end - start < cpc->trim_minlen) continue; __add_discard_entry(sbi, cpc, start, end); @@ -1164,6 +1171,7 @@ void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, curseg = CURSEG_I(sbi, type); mutex_lock(&curseg->curseg_mutex); + mutex_lock(&sit_i->sentry_lock); /* direct_io'ed data is aligned to the segment for better performance */ if (direct_io && curseg->next_blkoff) @@ -1178,7 +1186,6 @@ void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, */ __add_sum_entry(sbi, type, sum); - mutex_lock(&sit_i->sentry_lock); __refresh_next_blkoff(sbi, curseg); stat_inc_block_count(sbi, curseg); @@ -1730,6 +1737,9 @@ void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) mutex_lock(&curseg->curseg_mutex); mutex_lock(&sit_i->sentry_lock); + if (!sit_i->dirty_sentries) + goto out; + /* * add and account sit entries of dirty bitmap in sit entry * set temporarily @@ -1744,9 +1754,6 @@ void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) if (!__has_cursum_space(sum, sit_i->dirty_sentries, SIT_JOURNAL)) remove_sits_in_journal(sbi); - if (!sit_i->dirty_sentries) - goto out; - /* * there are two steps to flush sit entries: * #1, flush sit entries to journal in current cold data summary block. diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h index 7fd3511..85d7fa7 100644 --- a/fs/f2fs/segment.h +++ b/fs/f2fs/segment.h @@ -336,7 +336,8 @@ static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno) clear_bit(segno, free_i->free_segmap); free_i->free_segments++; - next = find_next_bit(free_i->free_segmap, MAIN_SEGS(sbi), start_segno); + next = find_next_bit(free_i->free_segmap, + start_segno + sbi->segs_per_sec, start_segno); if (next >= start_segno + sbi->segs_per_sec) { clear_bit(secno, free_i->free_secmap); free_i->free_sections++; diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index f2fe666..160b883 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -57,6 +57,8 @@ enum { Opt_flush_merge, Opt_nobarrier, Opt_fastboot, + Opt_extent_cache, + Opt_noinline_data, Opt_err, }; @@ -78,6 +80,8 @@ static match_table_t f2fs_tokens = { {Opt_flush_merge, "flush_merge"}, {Opt_nobarrier, "nobarrier"}, {Opt_fastboot, "fastboot"}, + {Opt_extent_cache, "extent_cache"}, + {Opt_noinline_data, "noinline_data"}, {Opt_err, NULL}, }; @@ -367,6 +371,12 @@ static int parse_options(struct super_block *sb, char *options) case Opt_fastboot: set_opt(sbi, FASTBOOT); break; + case Opt_extent_cache: + set_opt(sbi, EXTENT_CACHE); + break; + case Opt_noinline_data: + clear_opt(sbi, INLINE_DATA); + break; default: f2fs_msg(sb, KERN_ERR, "Unrecognized mount option \"%s\" or missing value", @@ -392,7 +402,7 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb) atomic_set(&fi->dirty_pages, 0); fi->i_current_depth = 1; fi->i_advise = 0; - rwlock_init(&fi->ext.ext_lock); + rwlock_init(&fi->ext_lock); init_rwsem(&fi->i_sem); INIT_RADIX_TREE(&fi->inmem_root, GFP_NOFS); INIT_LIST_HEAD(&fi->inmem_pages); @@ -591,6 +601,8 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root) seq_puts(seq, ",disable_ext_identify"); if (test_opt(sbi, INLINE_DATA)) seq_puts(seq, ",inline_data"); + else + seq_puts(seq, ",noinline_data"); if (test_opt(sbi, INLINE_DENTRY)) seq_puts(seq, ",inline_dentry"); if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE)) @@ -599,6 +611,8 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root) seq_puts(seq, ",nobarrier"); if (test_opt(sbi, FASTBOOT)) seq_puts(seq, ",fastboot"); + if (test_opt(sbi, EXTENT_CACHE)) + seq_puts(seq, ",extent_cache"); seq_printf(seq, ",active_logs=%u", sbi->active_logs); return 0; @@ -959,7 +973,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) struct buffer_head *raw_super_buf; struct inode *root; long err = -EINVAL; - bool retry = true; + bool retry = true, need_fsck = false; char *options = NULL; int i; @@ -984,6 +998,7 @@ try_onemore: sbi->active_logs = NR_CURSEG_TYPE; set_opt(sbi, BG_GC); + set_opt(sbi, INLINE_DATA); #ifdef CONFIG_F2FS_FS_XATTR set_opt(sbi, XATTR_USER); @@ -1020,7 +1035,6 @@ try_onemore: sbi->raw_super = raw_super; sbi->raw_super_buf = raw_super_buf; mutex_init(&sbi->gc_mutex); - mutex_init(&sbi->writepages); mutex_init(&sbi->cp_mutex); init_rwsem(&sbi->node_write); clear_sbi_flag(sbi, SBI_POR_DOING); @@ -1072,6 +1086,8 @@ try_onemore: INIT_LIST_HEAD(&sbi->dir_inode_list); spin_lock_init(&sbi->dir_inode_lock); + init_extent_cache_info(sbi); + init_ino_entry_info(sbi); /* setup f2fs internal modules */ @@ -1146,9 +1162,6 @@ try_onemore: if (err) goto free_proc; - if (!retry) - set_sbi_flag(sbi, SBI_NEED_FSCK); - /* recover fsynced data */ if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) { /* @@ -1160,8 +1173,13 @@ try_onemore: err = -EROFS; goto free_kobj; } + + if (need_fsck) + set_sbi_flag(sbi, SBI_NEED_FSCK); + err = recover_fsync_data(sbi); if (err) { + need_fsck = true; f2fs_msg(sb, KERN_ERR, "Cannot recover all fsync data errno=%ld", err); goto free_kobj; @@ -1212,7 +1230,7 @@ free_sbi: /* give only one another chance */ if (retry) { - retry = 0; + retry = false; shrink_dcache_sb(sb); goto try_onemore; } @@ -1278,10 +1296,13 @@ static int __init init_f2fs_fs(void) err = create_checkpoint_caches(); if (err) goto free_segment_manager_caches; + err = create_extent_cache(); + if (err) + goto free_checkpoint_caches; f2fs_kset = kset_create_and_add("f2fs", NULL, fs_kobj); if (!f2fs_kset) { err = -ENOMEM; - goto free_checkpoint_caches; + goto free_extent_cache; } err = register_filesystem(&f2fs_fs_type); if (err) @@ -1292,6 +1313,8 @@ static int __init init_f2fs_fs(void) free_kset: kset_unregister(f2fs_kset); +free_extent_cache: + destroy_extent_cache(); free_checkpoint_caches: destroy_checkpoint_caches(); free_segment_manager_caches: @@ -1309,6 +1332,7 @@ static void __exit exit_f2fs_fs(void) remove_proc_entry("fs/f2fs", NULL); f2fs_destroy_root_stats(); unregister_filesystem(&f2fs_fs_type); + destroy_extent_cache(); destroy_checkpoint_caches(); destroy_segment_manager_caches(); destroy_node_manager_caches(); diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c index 5072bf9..9757f65 100644 --- a/fs/f2fs/xattr.c +++ b/fs/f2fs/xattr.c @@ -83,7 +83,7 @@ static int f2fs_xattr_generic_get(struct dentry *dentry, const char *name, } if (strcmp(name, "") == 0) return -EINVAL; - return f2fs_getxattr(dentry->d_inode, type, name, buffer, size, NULL); + return f2fs_getxattr(d_inode(dentry), type, name, buffer, size, NULL); } static int f2fs_xattr_generic_set(struct dentry *dentry, const char *name, @@ -108,7 +108,7 @@ static int f2fs_xattr_generic_set(struct dentry *dentry, const char *name, if (strcmp(name, "") == 0) return -EINVAL; - return f2fs_setxattr(dentry->d_inode, type, name, + return f2fs_setxattr(d_inode(dentry), type, name, value, size, NULL, flags); } @@ -130,19 +130,20 @@ static size_t f2fs_xattr_advise_list(struct dentry *dentry, char *list, static int f2fs_xattr_advise_get(struct dentry *dentry, const char *name, void *buffer, size_t size, int type) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); if (strcmp(name, "") != 0) return -EINVAL; - *((char *)buffer) = F2FS_I(inode)->i_advise; + if (buffer) + *((char *)buffer) = F2FS_I(inode)->i_advise; return sizeof(char); } static int f2fs_xattr_advise_set(struct dentry *dentry, const char *name, const void *value, size_t size, int flags, int type) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); if (strcmp(name, "") != 0) return -EINVAL; @@ -152,6 +153,7 @@ static int f2fs_xattr_advise_set(struct dentry *dentry, const char *name, return -EINVAL; F2FS_I(inode)->i_advise |= *(char *)value; + mark_inode_dirty(inode); return 0; } @@ -442,7 +444,7 @@ cleanup: ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct f2fs_xattr_entry *entry; void *base_addr; int error = 0; diff --git a/fs/fat/cache.c b/fs/fat/cache.c index 91ad9e1..93fc622 100644 --- a/fs/fat/cache.c +++ b/fs/fat/cache.c @@ -8,9 +8,7 @@ * May 1999. AV. Fixed the bogosity with FAT32 (read "FAT28"). Fscking lusers. */ -#include <linux/fs.h> #include <linux/slab.h> -#include <linux/buffer_head.h> #include "fat.h" /* this must be > 0. */ diff --git a/fs/fat/dir.c b/fs/fat/dir.c index c5d6bb9..4afc4d9 100644 --- a/fs/fat/dir.c +++ b/fs/fat/dir.c @@ -13,13 +13,9 @@ * Short name translation 1999, 2001 by Wolfram Pienkoss <wp@bszh.de> */ -#include <linux/module.h> #include <linux/slab.h> -#include <linux/time.h> -#include <linux/buffer_head.h> #include <linux/compat.h> #include <linux/uaccess.h> -#include <linux/kernel.h> #include "fat.h" /* diff --git a/fs/fat/fat.h b/fs/fat/fat.h index 64e295e..be5e153 100644 --- a/fs/fat/fat.h +++ b/fs/fat/fat.h @@ -2,11 +2,8 @@ #define _FAT_H #include <linux/buffer_head.h> -#include <linux/string.h> #include <linux/nls.h> -#include <linux/fs.h> #include <linux/hash.h> -#include <linux/mutex.h> #include <linux/ratelimit.h> #include <linux/msdos_fs.h> @@ -66,7 +63,7 @@ struct msdos_sb_info { unsigned short sec_per_clus; /* sectors/cluster */ unsigned short cluster_bits; /* log2(cluster_size) */ unsigned int cluster_size; /* cluster size */ - unsigned char fats, fat_bits; /* number of FATs, FAT bits (12 or 16) */ + unsigned char fats, fat_bits; /* number of FATs, FAT bits (12,16 or 32) */ unsigned short fat_start; unsigned long fat_length; /* FAT start & length (sec.) */ unsigned long dir_start; diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c index 260705c..8226557 100644 --- a/fs/fat/fatent.c +++ b/fs/fat/fatent.c @@ -3,9 +3,6 @@ * Released under GPL v2. */ -#include <linux/module.h> -#include <linux/fs.h> -#include <linux/msdos_fs.h> #include <linux/blkdev.h> #include "fat.h" diff --git a/fs/fat/file.c b/fs/fat/file.c index 8429c68..442d50a 100644 --- a/fs/fat/file.c +++ b/fs/fat/file.c @@ -10,10 +10,6 @@ #include <linux/module.h> #include <linux/compat.h> #include <linux/mount.h> -#include <linux/time.h> -#include <linux/buffer_head.h> -#include <linux/writeback.h> -#include <linux/backing-dev.h> #include <linux/blkdev.h> #include <linux/fsnotify.h> #include <linux/security.h> @@ -170,8 +166,6 @@ int fat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync) const struct file_operations fat_file_operations = { .llseek = generic_file_llseek, - .read = new_sync_read, - .write = new_sync_write, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, .mmap = generic_file_mmap, @@ -311,7 +305,7 @@ void fat_truncate_blocks(struct inode *inode, loff_t offset) int fat_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); generic_fillattr(inode, stat); stat->blksize = MSDOS_SB(inode->i_sb)->cluster_size; @@ -383,7 +377,7 @@ static int fat_allow_set_time(struct msdos_sb_info *sbi, struct inode *inode) int fat_setattr(struct dentry *dentry, struct iattr *attr) { struct msdos_sb_info *sbi = MSDOS_SB(dentry->d_sb); - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); unsigned int ia_valid; int error; diff --git a/fs/fat/inode.c b/fs/fat/inode.c index 497c7c5..c067746 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -11,21 +11,12 @@ */ #include <linux/module.h> -#include <linux/init.h> -#include <linux/time.h> -#include <linux/slab.h> -#include <linux/seq_file.h> #include <linux/pagemap.h> #include <linux/mpage.h> -#include <linux/buffer_head.h> -#include <linux/mount.h> -#include <linux/aio.h> #include <linux/vfs.h> +#include <linux/seq_file.h> #include <linux/parser.h> #include <linux/uio.h> -#include <linux/writeback.h> -#include <linux/log2.h> -#include <linux/hash.h> #include <linux/blkdev.h> #include <asm/unaligned.h> #include "fat.h" @@ -246,8 +237,7 @@ static int fat_write_end(struct file *file, struct address_space *mapping, return err; } -static ssize_t fat_direct_IO(int rw, struct kiocb *iocb, - struct iov_iter *iter, +static ssize_t fat_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset) { struct file *file = iocb->ki_filp; @@ -256,7 +246,7 @@ static ssize_t fat_direct_IO(int rw, struct kiocb *iocb, size_t count = iov_iter_count(iter); ssize_t ret; - if (rw == WRITE) { + if (iov_iter_rw(iter) == WRITE) { /* * FIXME: blockdev_direct_IO() doesn't use ->write_begin(), * so we need to update the ->mmu_private to block boundary. @@ -275,8 +265,8 @@ static ssize_t fat_direct_IO(int rw, struct kiocb *iocb, * FAT need to use the DIO_LOCKING for avoiding the race * condition of fat_get_block() and ->truncate(). */ - ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, fat_get_block); - if (ret < 0 && (rw & WRITE)) + ret = blockdev_direct_IO(iocb, inode, iter, offset, fat_get_block); + if (ret < 0 && iov_iter_rw(iter) == WRITE) fat_write_failed(mapping, offset + count); return ret; @@ -1280,8 +1270,7 @@ out: static int fat_read_root(struct inode *inode) { - struct super_block *sb = inode->i_sb; - struct msdos_sb_info *sbi = MSDOS_SB(sb); + struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb); int error; MSDOS_I(inode)->i_pos = MSDOS_ROOT_INO; diff --git a/fs/fat/misc.c b/fs/fat/misc.c index d8da2d2..c4589e9 100644 --- a/fs/fat/misc.c +++ b/fs/fat/misc.c @@ -6,10 +6,6 @@ * and date_dos2unix for date==0 by Igor Zhbanov(bsg@uniyar.ac.ru) */ -#include <linux/module.h> -#include <linux/fs.h> -#include <linux/buffer_head.h> -#include <linux/time.h> #include "fat.h" /* diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c index a783b0e..b7e2b33 100644 --- a/fs/fat/namei_msdos.c +++ b/fs/fat/namei_msdos.c @@ -7,8 +7,6 @@ */ #include <linux/module.h> -#include <linux/time.h> -#include <linux/buffer_head.h> #include "fat.h" /* Characters that are undesirable in an MS-DOS file name */ @@ -310,7 +308,7 @@ out: static int msdos_rmdir(struct inode *dir, struct dentry *dentry) { struct super_block *sb = dir->i_sb; - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct fat_slot_info sinfo; int err; @@ -404,7 +402,7 @@ out: /***** Unlink a file */ static int msdos_unlink(struct inode *dir, struct dentry *dentry) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct super_block *sb = inode->i_sb; struct fat_slot_info sinfo; int err; @@ -442,8 +440,8 @@ static int do_msdos_rename(struct inode *old_dir, unsigned char *old_name, int err, old_attrs, is_dir, update_dotdot, corrupt = 0; old_sinfo.bh = sinfo.bh = dotdot_bh = NULL; - old_inode = old_dentry->d_inode; - new_inode = new_dentry->d_inode; + old_inode = d_inode(old_dentry); + new_inode = d_inode(new_dentry); err = fat_scan(old_dir, old_name, &old_sinfo); if (err) { diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c index b8b92c2..7092584 100644 --- a/fs/fat/namei_vfat.c +++ b/fs/fat/namei_vfat.c @@ -16,10 +16,8 @@ */ #include <linux/module.h> -#include <linux/jiffies.h> #include <linux/ctype.h> #include <linux/slab.h> -#include <linux/buffer_head.h> #include <linux/namei.h> #include "fat.h" @@ -35,7 +33,7 @@ static int vfat_revalidate_shortname(struct dentry *dentry) { int ret = 1; spin_lock(&dentry->d_lock); - if (dentry->d_time != dentry->d_parent->d_inode->i_version) + if (dentry->d_time != d_inode(dentry->d_parent)->i_version) ret = 0; spin_unlock(&dentry->d_lock); return ret; @@ -47,7 +45,7 @@ static int vfat_revalidate(struct dentry *dentry, unsigned int flags) return -ECHILD; /* This is not negative dentry. Always valid. */ - if (dentry->d_inode) + if (d_really_is_positive(dentry)) return 1; return vfat_revalidate_shortname(dentry); } @@ -67,7 +65,7 @@ static int vfat_revalidate_ci(struct dentry *dentry, unsigned int flags) * positive dentry isn't good idea. So it's unsupported like * rename("filename", "FILENAME") for now. */ - if (dentry->d_inode) + if (d_really_is_positive(dentry)) return 1; /* @@ -803,7 +801,7 @@ out: static int vfat_rmdir(struct inode *dir, struct dentry *dentry) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct super_block *sb = dir->i_sb; struct fat_slot_info sinfo; int err; @@ -834,7 +832,7 @@ out: static int vfat_unlink(struct inode *dir, struct dentry *dentry) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct super_block *sb = dir->i_sb; struct fat_slot_info sinfo; int err; @@ -917,8 +915,8 @@ static int vfat_rename(struct inode *old_dir, struct dentry *old_dentry, struct super_block *sb = old_dir->i_sb; old_sinfo.bh = sinfo.bh = dotdot_bh = NULL; - old_inode = old_dentry->d_inode; - new_inode = new_dentry->d_inode; + old_inode = d_inode(old_dentry); + new_inode = d_inode(new_dentry); mutex_lock(&MSDOS_SB(sb)->s_lock); err = vfat_find(old_dir, &old_dentry->d_name, &old_sinfo); if (err) diff --git a/fs/fat/nfs.c b/fs/fat/nfs.c index 93e1493..eb19265 100644 --- a/fs/fat/nfs.c +++ b/fs/fat/nfs.c @@ -266,7 +266,7 @@ struct inode *fat_rebuild_parent(struct super_block *sb, int parent_logstart) * Find the parent for a directory that is not currently connected to * the filesystem root. * - * On entry, the caller holds child_dir->d_inode->i_mutex. + * On entry, the caller holds d_inode(child_dir)->i_mutex. */ static struct dentry *fat_get_parent(struct dentry *child_dir) { @@ -276,7 +276,7 @@ static struct dentry *fat_get_parent(struct dentry *child_dir) struct inode *parent_inode = NULL; struct msdos_sb_info *sbi = MSDOS_SB(sb); - if (!fat_get_dotdot_entry(child_dir->d_inode, &bh, &de)) { + if (!fat_get_dotdot_entry(d_inode(child_dir), &bh, &de)) { int parent_logstart = fat_get_start(sbi, de); parent_inode = fat_dget(sb, parent_logstart); if (!parent_inode && sbi->options.nfs == FAT_NFS_NOSTALE_RO) @@ -638,8 +638,7 @@ static struct file *__fget(unsigned int fd, fmode_t mask) file = fcheck_files(files, fd); if (file) { /* File object ref couldn't be taken */ - if ((file->f_mode & mask) || - !atomic_long_inc_not_zero(&file->f_count)) + if ((file->f_mode & mask) || !get_file_rcu(file)) file = NULL; } rcu_read_unlock(); diff --git a/fs/file_table.c b/fs/file_table.c index 3f85411..294174d 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -168,10 +168,10 @@ struct file *alloc_file(struct path *path, fmode_t mode, file->f_inode = path->dentry->d_inode; file->f_mapping = path->dentry->d_inode->i_mapping; if ((mode & FMODE_READ) && - likely(fop->read || fop->aio_read || fop->read_iter)) + likely(fop->read || fop->read_iter)) mode |= FMODE_CAN_READ; if ((mode & FMODE_WRITE) && - likely(fop->write || fop->aio_write || fop->write_iter)) + likely(fop->write || fop->write_iter)) mode |= FMODE_CAN_WRITE; file->f_mode = mode; file->f_op = fop; diff --git a/fs/freevxfs/vxfs_immed.c b/fs/freevxfs/vxfs_immed.c index c36aeaf..8b9229e 100644 --- a/fs/freevxfs/vxfs_immed.c +++ b/fs/freevxfs/vxfs_immed.c @@ -76,7 +76,7 @@ const struct address_space_operations vxfs_immed_aops = { static void * vxfs_immed_follow_link(struct dentry *dp, struct nameidata *np) { - struct vxfs_inode_info *vip = VXFS_INO(dp->d_inode); + struct vxfs_inode_info *vip = VXFS_INO(d_inode(dp)); nd_set_link(np, vip->vii_immed.vi_immed); return NULL; } diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index e907052..32a8bbd 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -53,6 +53,18 @@ struct wb_writeback_work { struct completion *done; /* set if the caller waits */ }; +/* + * If an inode is constantly having its pages dirtied, but then the + * updates stop dirtytime_expire_interval seconds in the past, it's + * possible for the worst case time between when an inode has its + * timestamps updated and when they finally get written out to be two + * dirtytime_expire_intervals. We set the default to 12 hours (in + * seconds), which means most of the time inodes will have their + * timestamps written to disk after 12 hours, but in the worst case a + * few inodes might not their timestamps updated for 24 hours. + */ +unsigned int dirtytime_expire_interval = 12 * 60 * 60; + /** * writeback_in_progress - determine whether there is writeback in progress * @bdi: the device's backing_dev_info structure. @@ -275,8 +287,8 @@ static int move_expired_inodes(struct list_head *delaying_queue, if ((flags & EXPIRE_DIRTY_ATIME) == 0) older_than_this = work->older_than_this; - else if ((work->reason == WB_REASON_SYNC) == 0) { - expire_time = jiffies - (HZ * 86400); + else if (!work->for_sync) { + expire_time = jiffies - (dirtytime_expire_interval * HZ); older_than_this = &expire_time; } while (!list_empty(delaying_queue)) { @@ -458,6 +470,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, */ redirty_tail(inode, wb); } else if (inode->i_state & I_DIRTY_TIME) { + inode->dirtied_when = jiffies; list_move(&inode->i_wb_list, &wb->b_dirty_time); } else { /* The inode is clean. Remove from writeback lists. */ @@ -505,12 +518,17 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc) spin_lock(&inode->i_lock); dirty = inode->i_state & I_DIRTY; - if (((dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) && - (inode->i_state & I_DIRTY_TIME)) || - (inode->i_state & I_DIRTY_TIME_EXPIRED)) { - dirty |= I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED; - trace_writeback_lazytime(inode); - } + if (inode->i_state & I_DIRTY_TIME) { + if ((dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) || + unlikely(inode->i_state & I_DIRTY_TIME_EXPIRED) || + unlikely(time_after(jiffies, + (inode->dirtied_time_when + + dirtytime_expire_interval * HZ)))) { + dirty |= I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED; + trace_writeback_lazytime(inode); + } + } else + inode->i_state &= ~I_DIRTY_TIME_EXPIRED; inode->i_state &= ~dirty; /* @@ -1131,6 +1149,56 @@ void wakeup_flusher_threads(long nr_pages, enum wb_reason reason) rcu_read_unlock(); } +/* + * Wake up bdi's periodically to make sure dirtytime inodes gets + * written back periodically. We deliberately do *not* check the + * b_dirtytime list in wb_has_dirty_io(), since this would cause the + * kernel to be constantly waking up once there are any dirtytime + * inodes on the system. So instead we define a separate delayed work + * function which gets called much more rarely. (By default, only + * once every 12 hours.) + * + * If there is any other write activity going on in the file system, + * this function won't be necessary. But if the only thing that has + * happened on the file system is a dirtytime inode caused by an atime + * update, we need this infrastructure below to make sure that inode + * eventually gets pushed out to disk. + */ +static void wakeup_dirtytime_writeback(struct work_struct *w); +static DECLARE_DELAYED_WORK(dirtytime_work, wakeup_dirtytime_writeback); + +static void wakeup_dirtytime_writeback(struct work_struct *w) +{ + struct backing_dev_info *bdi; + + rcu_read_lock(); + list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { + if (list_empty(&bdi->wb.b_dirty_time)) + continue; + bdi_wakeup_thread(bdi); + } + rcu_read_unlock(); + schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ); +} + +static int __init start_dirtytime_writeback(void) +{ + schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ); + return 0; +} +__initcall(start_dirtytime_writeback); + +int dirtytime_interval_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + int ret; + + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); + if (ret == 0 && write) + mod_delayed_work(system_wq, &dirtytime_work, 0); + return ret; +} + static noinline void block_dump___mark_inode_dirty(struct inode *inode) { if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) { @@ -1269,8 +1337,13 @@ void __mark_inode_dirty(struct inode *inode, int flags) } inode->dirtied_when = jiffies; - list_move(&inode->i_wb_list, dirtytime ? - &bdi->wb.b_dirty_time : &bdi->wb.b_dirty); + if (dirtytime) + inode->dirtied_time_when = jiffies; + if (inode->i_state & (I_DIRTY_INODE | I_DIRTY_PAGES)) + list_move(&inode->i_wb_list, &bdi->wb.b_dirty); + else + list_move(&inode->i_wb_list, + &bdi->wb.b_dirty_time); spin_unlock(&bdi->wb.list_lock); trace_writeback_dirty_inode_enqueue(inode); diff --git a/fs/fs_pin.c b/fs/fs_pin.c index b06c987..611b540 100644 --- a/fs/fs_pin.c +++ b/fs/fs_pin.c @@ -9,8 +9,8 @@ static DEFINE_SPINLOCK(pin_lock); void pin_remove(struct fs_pin *pin) { spin_lock(&pin_lock); - hlist_del(&pin->m_list); - hlist_del(&pin->s_list); + hlist_del_init(&pin->m_list); + hlist_del_init(&pin->s_list); spin_unlock(&pin_lock); spin_lock_irq(&pin->wait.lock); pin->done = 1; diff --git a/fs/fuse/control.c b/fs/fuse/control.c index 205e0d5..f863ac6 100644 --- a/fs/fuse/control.c +++ b/fs/fuse/control.c @@ -244,7 +244,7 @@ int fuse_ctl_add_conn(struct fuse_conn *fc) return 0; parent = fuse_control_sb->s_root; - inc_nlink(parent->d_inode); + inc_nlink(d_inode(parent)); sprintf(name, "%u", fc->dev); parent = fuse_ctl_add_dentry(parent, fc, name, S_IFDIR | 0500, 2, &simple_dir_inode_operations, @@ -283,11 +283,11 @@ void fuse_ctl_remove_conn(struct fuse_conn *fc) for (i = fc->ctl_ndents - 1; i >= 0; i--) { struct dentry *dentry = fc->ctl_dentry[i]; - dentry->d_inode->i_private = NULL; + d_inode(dentry)->i_private = NULL; d_drop(dentry); dput(dentry); } - drop_nlink(fuse_control_sb->s_root->d_inode); + drop_nlink(d_inode(fuse_control_sb->s_root)); } static int fuse_ctl_fill_super(struct super_block *sb, void *data, int silent) diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c index 28d0c7ab..e5bbf74 100644 --- a/fs/fuse/cuse.c +++ b/fs/fuse/cuse.c @@ -38,7 +38,6 @@ #include <linux/device.h> #include <linux/file.h> #include <linux/fs.h> -#include <linux/aio.h> #include <linux/kdev_t.h> #include <linux/kthread.h> #include <linux/list.h> @@ -48,6 +47,7 @@ #include <linux/slab.h> #include <linux/stat.h> #include <linux/module.h> +#include <linux/uio.h> #include "fuse_i.h" @@ -88,32 +88,23 @@ static struct list_head *cuse_conntbl_head(dev_t devt) * FUSE file. */ -static ssize_t cuse_read(struct file *file, char __user *buf, size_t count, - loff_t *ppos) +static ssize_t cuse_read_iter(struct kiocb *kiocb, struct iov_iter *to) { + struct fuse_io_priv io = { .async = 0, .file = kiocb->ki_filp }; loff_t pos = 0; - struct iovec iov = { .iov_base = buf, .iov_len = count }; - struct fuse_io_priv io = { .async = 0, .file = file }; - struct iov_iter ii; - iov_iter_init(&ii, READ, &iov, 1, count); - return fuse_direct_io(&io, &ii, &pos, FUSE_DIO_CUSE); + return fuse_direct_io(&io, to, &pos, FUSE_DIO_CUSE); } -static ssize_t cuse_write(struct file *file, const char __user *buf, - size_t count, loff_t *ppos) +static ssize_t cuse_write_iter(struct kiocb *kiocb, struct iov_iter *from) { + struct fuse_io_priv io = { .async = 0, .file = kiocb->ki_filp }; loff_t pos = 0; - struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = count }; - struct fuse_io_priv io = { .async = 0, .file = file }; - struct iov_iter ii; - iov_iter_init(&ii, WRITE, &iov, 1, count); - /* * No locking or generic_write_checks(), the server is * responsible for locking and sanity checks. */ - return fuse_direct_io(&io, &ii, &pos, + return fuse_direct_io(&io, from, &pos, FUSE_DIO_WRITE | FUSE_DIO_CUSE); } @@ -186,8 +177,8 @@ static long cuse_file_compat_ioctl(struct file *file, unsigned int cmd, static const struct file_operations cuse_frontend_fops = { .owner = THIS_MODULE, - .read = cuse_read, - .write = cuse_write, + .read_iter = cuse_read_iter, + .write_iter = cuse_write_iter, .open = cuse_open, .release = cuse_release, .unlocked_ioctl = cuse_file_ioctl, diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 39706c5..c8b68ab 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -19,7 +19,6 @@ #include <linux/pipe_fs_i.h> #include <linux/swap.h> #include <linux/splice.h> -#include <linux/aio.h> MODULE_ALIAS_MISCDEV(FUSE_MINOR); MODULE_ALIAS("devname:fuse"); @@ -711,28 +710,26 @@ struct fuse_copy_state { struct fuse_conn *fc; int write; struct fuse_req *req; - const struct iovec *iov; + struct iov_iter *iter; struct pipe_buffer *pipebufs; struct pipe_buffer *currbuf; struct pipe_inode_info *pipe; unsigned long nr_segs; - unsigned long seglen; - unsigned long addr; struct page *pg; unsigned len; unsigned offset; unsigned move_pages:1; }; -static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc, +static void fuse_copy_init(struct fuse_copy_state *cs, + struct fuse_conn *fc, int write, - const struct iovec *iov, unsigned long nr_segs) + struct iov_iter *iter) { memset(cs, 0, sizeof(*cs)); cs->fc = fc; cs->write = write; - cs->iov = iov; - cs->nr_segs = nr_segs; + cs->iter = iter; } /* Unmap and put previous page of userspace buffer */ @@ -800,22 +797,16 @@ static int fuse_copy_fill(struct fuse_copy_state *cs) cs->nr_segs++; } } else { - if (!cs->seglen) { - BUG_ON(!cs->nr_segs); - cs->seglen = cs->iov[0].iov_len; - cs->addr = (unsigned long) cs->iov[0].iov_base; - cs->iov++; - cs->nr_segs--; - } - err = get_user_pages_fast(cs->addr, 1, cs->write, &page); + size_t off; + err = iov_iter_get_pages(cs->iter, &page, PAGE_SIZE, 1, &off); if (err < 0) return err; - BUG_ON(err != 1); + BUG_ON(!err); + cs->len = err; + cs->offset = off; cs->pg = page; - cs->offset = cs->addr % PAGE_SIZE; - cs->len = min(PAGE_SIZE - cs->offset, cs->seglen); - cs->seglen -= cs->len; - cs->addr += cs->len; + cs->offset = off; + iov_iter_advance(cs->iter, err); } return lock_request(cs->fc, cs->req); @@ -1364,8 +1355,7 @@ static int fuse_dev_open(struct inode *inode, struct file *file) return 0; } -static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov, - unsigned long nr_segs, loff_t pos) +static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to) { struct fuse_copy_state cs; struct file *file = iocb->ki_filp; @@ -1373,9 +1363,12 @@ static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov, if (!fc) return -EPERM; - fuse_copy_init(&cs, fc, 1, iov, nr_segs); + if (!iter_is_iovec(to)) + return -EINVAL; - return fuse_dev_do_read(fc, file, &cs, iov_length(iov, nr_segs)); + fuse_copy_init(&cs, fc, 1, to); + + return fuse_dev_do_read(fc, file, &cs, iov_iter_count(to)); } static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos, @@ -1395,7 +1388,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos, if (!bufs) return -ENOMEM; - fuse_copy_init(&cs, fc, 1, NULL, 0); + fuse_copy_init(&cs, fc, 1, NULL); cs.pipebufs = bufs; cs.pipe = pipe; ret = fuse_dev_do_read(fc, in, &cs, len); @@ -1971,17 +1964,19 @@ static ssize_t fuse_dev_do_write(struct fuse_conn *fc, return err; } -static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov, - unsigned long nr_segs, loff_t pos) +static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from) { struct fuse_copy_state cs; struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp); if (!fc) return -EPERM; - fuse_copy_init(&cs, fc, 0, iov, nr_segs); + if (!iter_is_iovec(from)) + return -EINVAL; + + fuse_copy_init(&cs, fc, 0, from); - return fuse_dev_do_write(fc, &cs, iov_length(iov, nr_segs)); + return fuse_dev_do_write(fc, &cs, iov_iter_count(from)); } static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe, @@ -2044,8 +2039,9 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe, } pipe_unlock(pipe); - fuse_copy_init(&cs, fc, 0, NULL, nbuf); + fuse_copy_init(&cs, fc, 0, NULL); cs.pipebufs = bufs; + cs.nr_segs = nbuf; cs.pipe = pipe; if (flags & SPLICE_F_MOVE) @@ -2233,11 +2229,9 @@ const struct file_operations fuse_dev_operations = { .owner = THIS_MODULE, .open = fuse_dev_open, .llseek = no_llseek, - .read = do_sync_read, - .aio_read = fuse_dev_read, + .read_iter = fuse_dev_read, .splice_read = fuse_dev_splice_read, - .write = do_sync_write, - .aio_write = fuse_dev_write, + .write_iter = fuse_dev_write, .splice_write = fuse_dev_splice_write, .poll = fuse_dev_poll, .release = fuse_dev_release, diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 1545b71..0572bca 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -192,7 +192,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags) struct fuse_inode *fi; int ret; - inode = ACCESS_ONCE(entry->d_inode); + inode = d_inode_rcu(entry); if (inode && is_bad_inode(inode)) goto invalid; else if (time_before64(fuse_dentry_time(entry), get_jiffies_64()) || @@ -220,7 +220,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags) attr_version = fuse_get_attr_version(fc); parent = dget_parent(entry); - fuse_lookup_init(fc, &args, get_node_id(parent->d_inode), + fuse_lookup_init(fc, &args, get_node_id(d_inode(parent)), &entry->d_name, &outarg); ret = fuse_simple_request(fc, &args); dput(parent); @@ -254,7 +254,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags) return -ECHILD; } else if (test_and_clear_bit(FUSE_I_INIT_RDPLUS, &fi->state)) { parent = dget_parent(entry); - fuse_advise_use_readdirplus(parent->d_inode); + fuse_advise_use_readdirplus(d_inode(parent)); dput(parent); } } @@ -487,7 +487,7 @@ static int fuse_atomic_open(struct inode *dir, struct dentry *entry, entry = res; } - if (!(flags & O_CREAT) || entry->d_inode) + if (!(flags & O_CREAT) || d_really_is_positive(entry)) goto no_open; /* Only creates */ @@ -653,7 +653,7 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry) args.in.args[0].value = entry->d_name.name; err = fuse_simple_request(fc, &args); if (!err) { - struct inode *inode = entry->d_inode; + struct inode *inode = d_inode(entry); struct fuse_inode *fi = get_fuse_inode(inode); spin_lock(&fc->lock); @@ -689,7 +689,7 @@ static int fuse_rmdir(struct inode *dir, struct dentry *entry) args.in.args[0].value = entry->d_name.name; err = fuse_simple_request(fc, &args); if (!err) { - clear_nlink(entry->d_inode); + clear_nlink(d_inode(entry)); fuse_invalidate_attr(dir); fuse_invalidate_entry_cache(entry); } else if (err == -EINTR) @@ -721,12 +721,12 @@ static int fuse_rename_common(struct inode *olddir, struct dentry *oldent, err = fuse_simple_request(fc, &args); if (!err) { /* ctime changes */ - fuse_invalidate_attr(oldent->d_inode); - fuse_update_ctime(oldent->d_inode); + fuse_invalidate_attr(d_inode(oldent)); + fuse_update_ctime(d_inode(oldent)); if (flags & RENAME_EXCHANGE) { - fuse_invalidate_attr(newent->d_inode); - fuse_update_ctime(newent->d_inode); + fuse_invalidate_attr(d_inode(newent)); + fuse_update_ctime(d_inode(newent)); } fuse_invalidate_attr(olddir); @@ -734,10 +734,10 @@ static int fuse_rename_common(struct inode *olddir, struct dentry *oldent, fuse_invalidate_attr(newdir); /* newent will end up negative */ - if (!(flags & RENAME_EXCHANGE) && newent->d_inode) { - fuse_invalidate_attr(newent->d_inode); + if (!(flags & RENAME_EXCHANGE) && d_really_is_positive(newent)) { + fuse_invalidate_attr(d_inode(newent)); fuse_invalidate_entry_cache(newent); - fuse_update_ctime(newent->d_inode); + fuse_update_ctime(d_inode(newent)); } } else if (err == -EINTR) { /* If request was interrupted, DEITY only knows if the @@ -746,7 +746,7 @@ static int fuse_rename_common(struct inode *olddir, struct dentry *oldent, directory), then there can be inconsistency between the dcache and the real filesystem. Tough luck. */ fuse_invalidate_entry(oldent); - if (newent->d_inode) + if (d_really_is_positive(newent)) fuse_invalidate_entry(newent); } @@ -788,7 +788,7 @@ static int fuse_link(struct dentry *entry, struct inode *newdir, { int err; struct fuse_link_in inarg; - struct inode *inode = entry->d_inode; + struct inode *inode = d_inode(entry); struct fuse_conn *fc = get_fuse_conn(inode); FUSE_ARGS(args); @@ -961,9 +961,9 @@ int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid, fuse_invalidate_attr(parent); fuse_invalidate_entry(entry); - if (child_nodeid != 0 && entry->d_inode) { - mutex_lock(&entry->d_inode->i_mutex); - if (get_node_id(entry->d_inode) != child_nodeid) { + if (child_nodeid != 0 && d_really_is_positive(entry)) { + mutex_lock(&d_inode(entry)->i_mutex); + if (get_node_id(d_inode(entry)) != child_nodeid) { err = -ENOENT; goto badentry; } @@ -977,13 +977,13 @@ int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid, err = -ENOTEMPTY; goto badentry; } - entry->d_inode->i_flags |= S_DEAD; + d_inode(entry)->i_flags |= S_DEAD; } dont_mount(entry); - clear_nlink(entry->d_inode); + clear_nlink(d_inode(entry)); err = 0; badentry: - mutex_unlock(&entry->d_inode->i_mutex); + mutex_unlock(&d_inode(entry)->i_mutex); if (!err) d_delete(entry); } else { @@ -1169,7 +1169,7 @@ static int fuse_direntplus_link(struct file *file, struct qstr name = QSTR_INIT(dirent->name, dirent->namelen); struct dentry *dentry; struct dentry *alias; - struct inode *dir = parent->d_inode; + struct inode *dir = d_inode(parent); struct fuse_conn *fc; struct inode *inode; @@ -1205,7 +1205,7 @@ static int fuse_direntplus_link(struct file *file, name.hash = full_name_hash(name.name, name.len); dentry = d_lookup(parent, &name); if (dentry) { - inode = dentry->d_inode; + inode = d_inode(dentry); if (!inode) { d_drop(dentry); } else if (get_node_id(inode) != o->nodeid || @@ -1367,7 +1367,7 @@ static int fuse_readdir(struct file *file, struct dir_context *ctx) static char *read_link(struct dentry *dentry) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct fuse_conn *fc = get_fuse_conn(inode); FUSE_ARGS(args); char *link; @@ -1712,7 +1712,7 @@ error: static int fuse_setattr(struct dentry *entry, struct iattr *attr) { - struct inode *inode = entry->d_inode; + struct inode *inode = d_inode(entry); if (!fuse_allow_current_process(get_fuse_conn(inode))) return -EACCES; @@ -1726,7 +1726,7 @@ static int fuse_setattr(struct dentry *entry, struct iattr *attr) static int fuse_getattr(struct vfsmount *mnt, struct dentry *entry, struct kstat *stat) { - struct inode *inode = entry->d_inode; + struct inode *inode = d_inode(entry); struct fuse_conn *fc = get_fuse_conn(inode); if (!fuse_allow_current_process(fc)) @@ -1738,7 +1738,7 @@ static int fuse_getattr(struct vfsmount *mnt, struct dentry *entry, static int fuse_setxattr(struct dentry *entry, const char *name, const void *value, size_t size, int flags) { - struct inode *inode = entry->d_inode; + struct inode *inode = d_inode(entry); struct fuse_conn *fc = get_fuse_conn(inode); FUSE_ARGS(args); struct fuse_setxattr_in inarg; @@ -1774,7 +1774,7 @@ static int fuse_setxattr(struct dentry *entry, const char *name, static ssize_t fuse_getxattr(struct dentry *entry, const char *name, void *value, size_t size) { - struct inode *inode = entry->d_inode; + struct inode *inode = d_inode(entry); struct fuse_conn *fc = get_fuse_conn(inode); FUSE_ARGS(args); struct fuse_getxattr_in inarg; @@ -1815,7 +1815,7 @@ static ssize_t fuse_getxattr(struct dentry *entry, const char *name, static ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size) { - struct inode *inode = entry->d_inode; + struct inode *inode = d_inode(entry); struct fuse_conn *fc = get_fuse_conn(inode); FUSE_ARGS(args); struct fuse_getxattr_in inarg; @@ -1857,7 +1857,7 @@ static ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size) static int fuse_removexattr(struct dentry *entry, const char *name) { - struct inode *inode = entry->d_inode; + struct inode *inode = d_inode(entry); struct fuse_conn *fc = get_fuse_conn(inode); FUSE_ARGS(args); int err; diff --git a/fs/fuse/file.c b/fs/fuse/file.c index c01ec3b..5ef05b5 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -15,8 +15,8 @@ #include <linux/module.h> #include <linux/compat.h> #include <linux/swap.h> -#include <linux/aio.h> #include <linux/falloc.h> +#include <linux/uio.h> static const struct file_operations fuse_direct_io_file_operations; @@ -528,6 +528,17 @@ static void fuse_release_user_pages(struct fuse_req *req, int write) } } +static ssize_t fuse_get_res_by_io(struct fuse_io_priv *io) +{ + if (io->err) + return io->err; + + if (io->bytes >= 0 && io->write) + return -EIO; + + return io->bytes < 0 ? io->size : io->bytes; +} + /** * In case of short read, the caller sets 'pos' to the position of * actual end of fuse request in IO request. Otherwise, if bytes_requested @@ -546,6 +557,7 @@ static void fuse_release_user_pages(struct fuse_req *req, int write) */ static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos) { + bool is_sync = is_sync_kiocb(io->iocb); int left; spin_lock(&io->lock); @@ -555,30 +567,24 @@ static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos) io->bytes = pos; left = --io->reqs; + if (!left && is_sync) + complete(io->done); spin_unlock(&io->lock); - if (!left) { - long res; + if (!left && !is_sync) { + ssize_t res = fuse_get_res_by_io(io); - if (io->err) - res = io->err; - else if (io->bytes >= 0 && io->write) - res = -EIO; - else { - res = io->bytes < 0 ? io->size : io->bytes; - - if (!is_sync_kiocb(io->iocb)) { - struct inode *inode = file_inode(io->iocb->ki_filp); - struct fuse_conn *fc = get_fuse_conn(inode); - struct fuse_inode *fi = get_fuse_inode(inode); + if (res >= 0) { + struct inode *inode = file_inode(io->iocb->ki_filp); + struct fuse_conn *fc = get_fuse_conn(inode); + struct fuse_inode *fi = get_fuse_inode(inode); - spin_lock(&fc->lock); - fi->attr_version = ++fc->attr_version; - spin_unlock(&fc->lock); - } + spin_lock(&fc->lock); + fi->attr_version = ++fc->attr_version; + spin_unlock(&fc->lock); } - aio_complete(io->iocb, res, 0); + io->iocb->ki_complete(io->iocb, res, 0); kfree(io); } } @@ -1139,13 +1145,11 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; - size_t count = iov_iter_count(from); ssize_t written = 0; ssize_t written_buffered = 0; struct inode *inode = mapping->host; ssize_t err; loff_t endbyte = 0; - loff_t pos = iocb->ki_pos; if (get_fuse_conn(inode)->writeback_cache) { /* Update size (EOF optimization) and mode (SUID clearing) */ @@ -1161,14 +1165,10 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from) /* We can write back this queue in page reclaim */ current->backing_dev_info = inode_to_bdi(inode); - err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); - if (err) + err = generic_write_checks(iocb, from); + if (err <= 0) goto out; - if (count == 0) - goto out; - - iov_iter_truncate(from, count); err = file_remove_suid(file); if (err) goto out; @@ -1177,7 +1177,8 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from) if (err) goto out; - if (file->f_flags & O_DIRECT) { + if (iocb->ki_flags & IOCB_DIRECT) { + loff_t pos = iocb->ki_pos; written = generic_file_direct_write(iocb, from, pos); if (written < 0 || !iov_iter_count(from)) goto out; @@ -1203,9 +1204,9 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from) written += written_buffered; iocb->ki_pos = pos + written_buffered; } else { - written = fuse_perform_write(file, mapping, from, pos); + written = fuse_perform_write(file, mapping, from, iocb->ki_pos); if (written >= 0) - iocb->ki_pos = pos + written; + iocb->ki_pos += written; } out: current->backing_dev_info = NULL; @@ -1395,55 +1396,30 @@ static ssize_t __fuse_direct_read(struct fuse_io_priv *io, return res; } -static ssize_t fuse_direct_read(struct file *file, char __user *buf, - size_t count, loff_t *ppos) -{ - struct fuse_io_priv io = { .async = 0, .file = file }; - struct iovec iov = { .iov_base = buf, .iov_len = count }; - struct iov_iter ii; - iov_iter_init(&ii, READ, &iov, 1, count); - return __fuse_direct_read(&io, &ii, ppos); -} - -static ssize_t __fuse_direct_write(struct fuse_io_priv *io, - struct iov_iter *iter, - loff_t *ppos) +static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to) { - struct file *file = io->file; - struct inode *inode = file_inode(file); - size_t count = iov_iter_count(iter); - ssize_t res; - - - res = generic_write_checks(file, ppos, &count, 0); - if (!res) { - iov_iter_truncate(iter, count); - res = fuse_direct_io(io, iter, ppos, FUSE_DIO_WRITE); - } - - fuse_invalidate_attr(inode); - - return res; + struct fuse_io_priv io = { .async = 0, .file = iocb->ki_filp }; + return __fuse_direct_read(&io, to, &iocb->ki_pos); } -static ssize_t fuse_direct_write(struct file *file, const char __user *buf, - size_t count, loff_t *ppos) +static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from) { - struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = count }; + struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); - ssize_t res; struct fuse_io_priv io = { .async = 0, .file = file }; - struct iov_iter ii; - iov_iter_init(&ii, WRITE, &iov, 1, count); + ssize_t res; if (is_bad_inode(inode)) return -EIO; /* Don't allow parallel writes to the same file */ mutex_lock(&inode->i_mutex); - res = __fuse_direct_write(&io, &ii, ppos); + res = generic_write_checks(iocb, from); if (res > 0) - fuse_write_update_size(inode, *ppos); + res = fuse_direct_io(&io, from, &iocb->ki_pos, FUSE_DIO_WRITE); + fuse_invalidate_attr(inode); + if (res > 0) + fuse_write_update_size(inode, iocb->ki_pos); mutex_unlock(&inode->i_mutex); return res; @@ -2798,9 +2774,9 @@ static inline loff_t fuse_round_up(loff_t off) } static ssize_t -fuse_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, - loff_t offset) +fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset) { + DECLARE_COMPLETION_ONSTACK(wait); ssize_t ret = 0; struct file *file = iocb->ki_filp; struct fuse_file *ff = file->private_data; @@ -2815,15 +2791,15 @@ fuse_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, inode = file->f_mapping->host; i_size = i_size_read(inode); - if ((rw == READ) && (offset > i_size)) + if ((iov_iter_rw(iter) == READ) && (offset > i_size)) return 0; /* optimization for short read */ - if (async_dio && rw != WRITE && offset + count > i_size) { + if (async_dio && iov_iter_rw(iter) != WRITE && offset + count > i_size) { if (offset >= i_size) return 0; - count = min_t(loff_t, count, fuse_round_up(i_size - offset)); - iov_iter_truncate(iter, count); + iov_iter_truncate(iter, fuse_round_up(i_size - offset)); + count = iov_iter_count(iter); } io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL); @@ -2834,7 +2810,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, io->bytes = -1; io->size = 0; io->offset = offset; - io->write = (rw == WRITE); + io->write = (iov_iter_rw(iter) == WRITE); io->err = 0; io->file = file; /* @@ -2849,13 +2825,19 @@ fuse_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, * to wait on real async I/O requests, so we must submit this request * synchronously. */ - if (!is_sync_kiocb(iocb) && (offset + count > i_size) && rw == WRITE) + if (!is_sync_kiocb(iocb) && (offset + count > i_size) && + iov_iter_rw(iter) == WRITE) io->async = false; - if (rw == WRITE) - ret = __fuse_direct_write(io, iter, &pos); - else + if (io->async && is_sync_kiocb(iocb)) + io->done = &wait; + + if (iov_iter_rw(iter) == WRITE) { + ret = fuse_direct_io(io, iter, &pos, FUSE_DIO_WRITE); + fuse_invalidate_attr(inode); + } else { ret = __fuse_direct_read(io, iter, &pos); + } if (io->async) { fuse_aio_complete(io, ret < 0 ? ret : 0, -1); @@ -2864,12 +2846,13 @@ fuse_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, if (!is_sync_kiocb(iocb)) return -EIOCBQUEUED; - ret = wait_on_sync_kiocb(iocb); - } else { - kfree(io); + wait_for_completion(&wait); + ret = fuse_get_res_by_io(io); } - if (rw == WRITE) { + kfree(io); + + if (iov_iter_rw(iter) == WRITE) { if (ret > 0) fuse_write_update_size(inode, pos); else if (ret < 0 && offset + count > i_size) @@ -2957,9 +2940,7 @@ out: static const struct file_operations fuse_file_operations = { .llseek = fuse_file_llseek, - .read = new_sync_read, .read_iter = fuse_file_read_iter, - .write = new_sync_write, .write_iter = fuse_file_write_iter, .mmap = fuse_file_mmap, .open = fuse_open, @@ -2977,8 +2958,8 @@ static const struct file_operations fuse_file_operations = { static const struct file_operations fuse_direct_io_file_operations = { .llseek = fuse_file_llseek, - .read = fuse_direct_read, - .write = fuse_direct_write, + .read_iter = fuse_direct_read_iter, + .write_iter = fuse_direct_write_iter, .mmap = fuse_direct_mmap, .open = fuse_open, .flush = fuse_flush, diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 1cdfb07..7354dc1 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -263,6 +263,7 @@ struct fuse_io_priv { int err; struct kiocb *iocb; struct file *file; + struct completion *done; }; /** diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index e8799c1..082ac1c 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -421,7 +421,7 @@ static int fuse_statfs(struct dentry *dentry, struct kstatfs *buf) memset(&outarg, 0, sizeof(outarg)); args.in.numargs = 0; args.in.h.opcode = FUSE_STATFS; - args.in.h.nodeid = get_node_id(dentry->d_inode); + args.in.h.nodeid = get_node_id(d_inode(dentry)); args.out.numargs = 1; args.out.args[0].size = sizeof(outarg); args.out.args[0].value = &outarg; @@ -740,7 +740,7 @@ static struct dentry *fuse_fh_to_parent(struct super_block *sb, static struct dentry *fuse_get_parent(struct dentry *child) { - struct inode *child_inode = child->d_inode; + struct inode *child_inode = d_inode(child); struct fuse_conn *fc = get_fuse_conn(child_inode); struct inode *inode; struct dentry *parent; diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c index 7b31430..1be3b06 100644 --- a/fs/gfs2/acl.c +++ b/fs/gfs2/acl.c @@ -110,11 +110,7 @@ int gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type) error = __gfs2_xattr_set(inode, name, data, len, 0, GFS2_EATYPE_SYS); if (error) goto out; - - if (acl) - set_cached_acl(inode, type, acl); - else - forget_cached_acl(inode, type); + set_cached_acl(inode, type, acl); out: kfree(data); return error; diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index 4ad4f94..5551fea 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c @@ -20,7 +20,7 @@ #include <linux/swap.h> #include <linux/gfs2_ondisk.h> #include <linux/backing-dev.h> -#include <linux/aio.h> +#include <linux/uio.h> #include <trace/events/writeback.h> #include "gfs2.h" @@ -671,12 +671,12 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping, if (alloc_required) { struct gfs2_alloc_parms ap = { .aflags = 0, }; - error = gfs2_quota_lock_check(ip); + requested = data_blocks + ind_blocks; + ap.target = requested; + error = gfs2_quota_lock_check(ip, &ap); if (error) goto out_unlock; - requested = data_blocks + ind_blocks; - ap.target = requested; error = gfs2_inplace_reserve(ip, &ap); if (error) goto out_qunlock; @@ -1016,13 +1016,12 @@ out: /** * gfs2_ok_for_dio - check that dio is valid on this file * @ip: The inode - * @rw: READ or WRITE * @offset: The offset at which we are reading or writing * * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o) * 1 (to accept the i/o request) */ -static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset) +static int gfs2_ok_for_dio(struct gfs2_inode *ip, loff_t offset) { /* * Should we return an error here? I can't see that O_DIRECT for @@ -1039,8 +1038,8 @@ static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset) -static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb, - struct iov_iter *iter, loff_t offset) +static ssize_t gfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter, + loff_t offset) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; @@ -1061,7 +1060,7 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb, rv = gfs2_glock_nq(&gh); if (rv) return rv; - rv = gfs2_ok_for_dio(ip, rw, offset); + rv = gfs2_ok_for_dio(ip, offset); if (rv != 1) goto out; /* dio not valid, fall back to buffered i/o */ @@ -1091,13 +1090,12 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb, rv = filemap_write_and_wait_range(mapping, lstart, end); if (rv) goto out; - if (rw == WRITE) + if (iov_iter_rw(iter) == WRITE) truncate_inode_pages_range(mapping, lstart, end); } - rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, - iter, offset, - gfs2_get_block_direct, NULL, NULL, 0); + rv = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter, + offset, gfs2_get_block_direct, NULL, NULL, 0); out: gfs2_glock_dq(&gh); gfs2_holder_uninit(&gh); diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index f0b945a..61296ec 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -1224,7 +1224,7 @@ static int do_grow(struct inode *inode, u64 size) if (gfs2_is_stuffed(ip) && (size > (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)))) { - error = gfs2_quota_lock_check(ip); + error = gfs2_quota_lock_check(ip, &ap); if (error) return error; diff --git a/fs/gfs2/dentry.c b/fs/gfs2/dentry.c index 589f4ea9..30822b1 100644 --- a/fs/gfs2/dentry.c +++ b/fs/gfs2/dentry.c @@ -48,9 +48,9 @@ static int gfs2_drevalidate(struct dentry *dentry, unsigned int flags) return -ECHILD; parent = dget_parent(dentry); - sdp = GFS2_SB(parent->d_inode); - dip = GFS2_I(parent->d_inode); - inode = dentry->d_inode; + sdp = GFS2_SB(d_inode(parent)); + dip = GFS2_I(d_inode(parent)); + inode = d_inode(dentry); if (inode) { if (is_bad_inode(inode)) @@ -68,7 +68,7 @@ static int gfs2_drevalidate(struct dentry *dentry, unsigned int flags) goto fail; } - error = gfs2_dir_check(parent->d_inode, &dentry->d_name, ip); + error = gfs2_dir_check(d_inode(parent), &dentry->d_name, ip); switch (error) { case 0: if (!inode) @@ -113,10 +113,10 @@ static int gfs2_dentry_delete(const struct dentry *dentry) { struct gfs2_inode *ginode; - if (!dentry->d_inode) + if (d_really_is_negative(dentry)) return 0; - ginode = GFS2_I(dentry->d_inode); + ginode = GFS2_I(d_inode(dentry)); if (!ginode->i_iopen_gh.gh_gl) return 0; diff --git a/fs/gfs2/export.c b/fs/gfs2/export.c index c41d255..5d15e94 100644 --- a/fs/gfs2/export.c +++ b/fs/gfs2/export.c @@ -49,7 +49,7 @@ static int gfs2_encode_fh(struct inode *inode, __u32 *p, int *len, fh[3] = cpu_to_be32(ip->i_no_addr & 0xFFFFFFFF); *len = GFS2_SMALL_FH_SIZE; - if (!parent || inode == sb->s_root->d_inode) + if (!parent || inode == d_inode(sb->s_root)) return *len; ip = GFS2_I(parent); @@ -88,8 +88,8 @@ static int get_name_filldir(struct dir_context *ctx, const char *name, static int gfs2_get_name(struct dentry *parent, char *name, struct dentry *child) { - struct inode *dir = parent->d_inode; - struct inode *inode = child->d_inode; + struct inode *dir = d_inode(parent); + struct inode *inode = d_inode(child); struct gfs2_inode *dip, *ip; struct get_name_filldir gnfd = { .ctx.actor = get_name_filldir, @@ -128,7 +128,7 @@ static int gfs2_get_name(struct dentry *parent, char *name, static struct dentry *gfs2_get_parent(struct dentry *child) { - return d_obtain_alias(gfs2_lookupi(child->d_inode, &gfs2_qdotdot, 1)); + return d_obtain_alias(gfs2_lookupi(d_inode(child), &gfs2_qdotdot, 1)); } static struct dentry *gfs2_get_dentry(struct super_block *sb, diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index 3e32bb8..3189287 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -25,7 +25,6 @@ #include <asm/uaccess.h> #include <linux/dlm.h> #include <linux/dlm_plock.h> -#include <linux/aio.h> #include <linux/delay.h> #include "gfs2.h" @@ -429,11 +428,11 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) if (ret) goto out_unlock; - ret = gfs2_quota_lock_check(ip); - if (ret) - goto out_unlock; gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks); ap.target = data_blocks + ind_blocks; + ret = gfs2_quota_lock_check(ip, &ap); + if (ret) + goto out_unlock; ret = gfs2_inplace_reserve(ip, &ap); if (ret) goto out_quota_unlock; @@ -710,7 +709,7 @@ static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from) gfs2_size_hint(file, iocb->ki_pos, iov_iter_count(from)); - if (file->f_flags & O_APPEND) { + if (iocb->ki_flags & IOCB_APPEND) { struct gfs2_holder gh; ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh); @@ -765,22 +764,30 @@ out: brelse(dibh); return error; } - -static void calc_max_reserv(struct gfs2_inode *ip, loff_t max, loff_t *len, - unsigned int *data_blocks, unsigned int *ind_blocks) +/** + * calc_max_reserv() - Reverse of write_calc_reserv. Given a number of + * blocks, determine how many bytes can be written. + * @ip: The inode in question. + * @len: Max cap of bytes. What we return in *len must be <= this. + * @data_blocks: Compute and return the number of data blocks needed + * @ind_blocks: Compute and return the number of indirect blocks needed + * @max_blocks: The total blocks available to work with. + * + * Returns: void, but @len, @data_blocks and @ind_blocks are filled in. + */ +static void calc_max_reserv(struct gfs2_inode *ip, loff_t *len, + unsigned int *data_blocks, unsigned int *ind_blocks, + unsigned int max_blocks) { + loff_t max = *len; const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); - unsigned int max_blocks = ip->i_rgd->rd_free_clone; unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1); for (tmp = max_data; tmp > sdp->sd_diptrs;) { tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs); max_data -= tmp; } - /* This calculation isn't the exact reverse of gfs2_write_calc_reserve, - so it might end up with fewer data blocks */ - if (max_data <= *data_blocks) - return; + *data_blocks = max_data; *ind_blocks = max_blocks - max_data; *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift; @@ -797,7 +804,7 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_alloc_parms ap = { .aflags = 0, }; unsigned int data_blocks = 0, ind_blocks = 0, rblocks; - loff_t bytes, max_bytes; + loff_t bytes, max_bytes, max_blks = UINT_MAX; int error; const loff_t pos = offset; const loff_t count = len; @@ -819,6 +826,9 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t gfs2_size_hint(file, offset, len); + gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks); + ap.min_target = data_blocks + ind_blocks; + while (len > 0) { if (len < bytes) bytes = len; @@ -827,27 +837,41 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t offset += bytes; continue; } - error = gfs2_quota_lock_check(ip); + + /* We need to determine how many bytes we can actually + * fallocate without exceeding quota or going over the + * end of the fs. We start off optimistically by assuming + * we can write max_bytes */ + max_bytes = (len > max_chunk_size) ? max_chunk_size : len; + + /* Since max_bytes is most likely a theoretical max, we + * calculate a more realistic 'bytes' to serve as a good + * starting point for the number of bytes we may be able + * to write */ + gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks); + ap.target = data_blocks + ind_blocks; + + error = gfs2_quota_lock_check(ip, &ap); if (error) return error; -retry: - gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks); + /* ap.allowed tells us how many blocks quota will allow + * us to write. Check if this reduces max_blks */ + if (ap.allowed && ap.allowed < max_blks) + max_blks = ap.allowed; - ap.target = data_blocks + ind_blocks; error = gfs2_inplace_reserve(ip, &ap); - if (error) { - if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) { - bytes >>= 1; - bytes &= bsize_mask; - if (bytes == 0) - bytes = sdp->sd_sb.sb_bsize; - goto retry; - } + if (error) goto out_qunlock; - } - max_bytes = bytes; - calc_max_reserv(ip, (len > max_chunk_size)? max_chunk_size: len, - &max_bytes, &data_blocks, &ind_blocks); + + /* check if the selected rgrp limits our max_blks further */ + if (ap.allowed && ap.allowed < max_blks) + max_blks = ap.allowed; + + /* Almost done. Calculate bytes that can be written using + * max_blks. We also recompute max_bytes, data_blocks and + * ind_blocks */ + calc_max_reserv(ip, &max_bytes, &data_blocks, + &ind_blocks, max_blks); rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA + RES_RG_HDR + gfs2_rg_blocks(ip, data_blocks + ind_blocks); @@ -931,6 +955,22 @@ out_uninit: return ret; } +static ssize_t gfs2_file_splice_write(struct pipe_inode_info *pipe, + struct file *out, loff_t *ppos, + size_t len, unsigned int flags) +{ + int error; + struct gfs2_inode *ip = GFS2_I(out->f_mapping->host); + + error = gfs2_rs_alloc(ip); + if (error) + return (ssize_t)error; + + gfs2_size_hint(out, *ppos, len); + + return iter_file_splice_write(pipe, out, ppos, len, flags); +} + #ifdef CONFIG_GFS2_FS_LOCKING_DLM /** @@ -1065,9 +1105,7 @@ static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl) const struct file_operations gfs2_file_fops = { .llseek = gfs2_llseek, - .read = new_sync_read, .read_iter = generic_file_read_iter, - .write = new_sync_write, .write_iter = gfs2_file_write_iter, .unlocked_ioctl = gfs2_ioctl, .mmap = gfs2_mmap, @@ -1077,7 +1115,7 @@ const struct file_operations gfs2_file_fops = { .lock = gfs2_lock, .flock = gfs2_flock, .splice_read = generic_file_splice_read, - .splice_write = iter_file_splice_write, + .splice_write = gfs2_file_splice_write, .setlease = simple_nosetlease, .fallocate = gfs2_fallocate, }; @@ -1097,9 +1135,7 @@ const struct file_operations gfs2_dir_fops = { const struct file_operations gfs2_file_fops_nolock = { .llseek = gfs2_llseek, - .read = new_sync_read, .read_iter = generic_file_read_iter, - .write = new_sync_write, .write_iter = gfs2_file_write_iter, .unlocked_ioctl = gfs2_ioctl, .mmap = gfs2_mmap, @@ -1107,7 +1143,7 @@ const struct file_operations gfs2_file_fops_nolock = { .release = gfs2_release, .fsync = gfs2_fsync, .splice_read = generic_file_splice_read, - .splice_write = iter_file_splice_write, + .splice_write = gfs2_file_splice_write, .setlease = generic_setlease, .fallocate = gfs2_fallocate, }; diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index f42dffb..0fa8062 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -2047,34 +2047,41 @@ static const struct file_operations gfs2_sbstats_fops = { int gfs2_create_debugfs_file(struct gfs2_sbd *sdp) { - sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root); - if (!sdp->debugfs_dir) - return -ENOMEM; - sdp->debugfs_dentry_glocks = debugfs_create_file("glocks", - S_IFREG | S_IRUGO, - sdp->debugfs_dir, sdp, - &gfs2_glocks_fops); - if (!sdp->debugfs_dentry_glocks) + struct dentry *dent; + + dent = debugfs_create_dir(sdp->sd_table_name, gfs2_root); + if (IS_ERR_OR_NULL(dent)) + goto fail; + sdp->debugfs_dir = dent; + + dent = debugfs_create_file("glocks", + S_IFREG | S_IRUGO, + sdp->debugfs_dir, sdp, + &gfs2_glocks_fops); + if (IS_ERR_OR_NULL(dent)) goto fail; + sdp->debugfs_dentry_glocks = dent; - sdp->debugfs_dentry_glstats = debugfs_create_file("glstats", - S_IFREG | S_IRUGO, - sdp->debugfs_dir, sdp, - &gfs2_glstats_fops); - if (!sdp->debugfs_dentry_glstats) + dent = debugfs_create_file("glstats", + S_IFREG | S_IRUGO, + sdp->debugfs_dir, sdp, + &gfs2_glstats_fops); + if (IS_ERR_OR_NULL(dent)) goto fail; + sdp->debugfs_dentry_glstats = dent; - sdp->debugfs_dentry_sbstats = debugfs_create_file("sbstats", - S_IFREG | S_IRUGO, - sdp->debugfs_dir, sdp, - &gfs2_sbstats_fops); - if (!sdp->debugfs_dentry_sbstats) + dent = debugfs_create_file("sbstats", + S_IFREG | S_IRUGO, + sdp->debugfs_dir, sdp, + &gfs2_sbstats_fops); + if (IS_ERR_OR_NULL(dent)) goto fail; + sdp->debugfs_dentry_sbstats = dent; return 0; fail: gfs2_delete_debugfs_file(sdp); - return -ENOMEM; + return dent ? PTR_ERR(dent) : -ENOMEM; } void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp) @@ -2100,6 +2107,8 @@ void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp) int gfs2_register_debugfs(void) { gfs2_root = debugfs_create_dir("gfs2", NULL); + if (IS_ERR(gfs2_root)) + return PTR_ERR(gfs2_root); return gfs2_root ? 0 : -ENOMEM; } diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 7a2dbbc..58b75ab 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -301,8 +301,10 @@ struct gfs2_blkreserv { * to the allocation code. */ struct gfs2_alloc_parms { - u32 target; + u64 target; + u32 min_target; u32 aflags; + u64 allowed; }; enum { diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index 73c72253..1b3ca7a 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -295,7 +295,7 @@ struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name, if ((name->len == 1 && memcmp(name->name, ".", 1) == 0) || (name->len == 2 && memcmp(name->name, "..", 2) == 0 && - dir == sb->s_root->d_inode)) { + dir == d_inode(sb->s_root))) { igrab(dir); return dir; } @@ -382,7 +382,7 @@ static int alloc_dinode(struct gfs2_inode *ip, u32 flags, unsigned *dblocks) struct gfs2_alloc_parms ap = { .target = *dblocks, .aflags = flags, }; int error; - error = gfs2_quota_lock_check(ip); + error = gfs2_quota_lock_check(ip, &ap); if (error) goto out; @@ -525,7 +525,7 @@ static int link_dinode(struct gfs2_inode *dip, const struct qstr *name, int error; if (da->nr_blocks) { - error = gfs2_quota_lock_check(dip); + error = gfs2_quota_lock_check(dip, &ap); if (error) goto fail_quota_locks; @@ -687,7 +687,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, } gfs2_set_inode_flags(inode); - if ((GFS2_I(sdp->sd_root_dir->d_inode) == dip) || + if ((GFS2_I(d_inode(sdp->sd_root_dir)) == dip) || (dip->i_diskflags & GFS2_DIF_TOPDIR)) aflags |= GFS2_AF_ORLOV; @@ -888,7 +888,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir, { struct gfs2_inode *dip = GFS2_I(dir); struct gfs2_sbd *sdp = GFS2_SB(dir); - struct inode *inode = old_dentry->d_inode; + struct inode *inode = d_inode(old_dentry); struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_holder ghs[2]; struct buffer_head *dibh; @@ -953,7 +953,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir, if (da.nr_blocks) { struct gfs2_alloc_parms ap = { .target = da.nr_blocks, }; - error = gfs2_quota_lock_check(dip); + error = gfs2_quota_lock_check(dip, &ap); if (error) goto out_gunlock; @@ -1055,7 +1055,7 @@ static int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name, static int gfs2_unlink_inode(struct gfs2_inode *dip, const struct dentry *dentry) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct gfs2_inode *ip = GFS2_I(inode); int error; @@ -1091,7 +1091,7 @@ static int gfs2_unlink(struct inode *dir, struct dentry *dentry) { struct gfs2_inode *dip = GFS2_I(dir); struct gfs2_sbd *sdp = GFS2_SB(dir); - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_holder ghs[3]; struct gfs2_rgrpd *rgd; @@ -1241,7 +1241,7 @@ static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry, return PTR_ERR(d); if (d != NULL) dentry = d; - if (dentry->d_inode) { + if (d_really_is_positive(dentry)) { if (!(*opened & FILE_OPENED)) return finish_no_open(file, d); dput(d); @@ -1282,7 +1282,7 @@ static int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to) error = -EINVAL; break; } - if (dir == sb->s_root->d_inode) { + if (dir == d_inode(sb->s_root)) { error = 0; break; } @@ -1321,7 +1321,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry, { struct gfs2_inode *odip = GFS2_I(odir); struct gfs2_inode *ndip = GFS2_I(ndir); - struct gfs2_inode *ip = GFS2_I(odentry->d_inode); + struct gfs2_inode *ip = GFS2_I(d_inode(odentry)); struct gfs2_inode *nip = NULL; struct gfs2_sbd *sdp = GFS2_SB(odir); struct gfs2_holder ghs[5], r_gh = { .gh_gl = NULL, }; @@ -1332,8 +1332,8 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry, unsigned int x; int error; - if (ndentry->d_inode) { - nip = GFS2_I(ndentry->d_inode); + if (d_really_is_positive(ndentry)) { + nip = GFS2_I(d_inode(ndentry)); if (ip == nip) return 0; } @@ -1457,7 +1457,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry, /* Check out the dir to be renamed */ if (dir_rename) { - error = gfs2_permission(odentry->d_inode, MAY_WRITE); + error = gfs2_permission(d_inode(odentry), MAY_WRITE); if (error) goto out_gunlock; } @@ -1470,7 +1470,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry, if (da.nr_blocks) { struct gfs2_alloc_parms ap = { .target = da.nr_blocks, }; - error = gfs2_quota_lock_check(ndip); + error = gfs2_quota_lock_check(ndip, &ap); if (error) goto out_gunlock; @@ -1550,7 +1550,7 @@ out: static void *gfs2_follow_link(struct dentry *dentry, struct nameidata *nd) { - struct gfs2_inode *ip = GFS2_I(dentry->d_inode); + struct gfs2_inode *ip = GFS2_I(d_inode(dentry)); struct gfs2_holder i_gh; struct buffer_head *dibh; unsigned int size; @@ -1669,6 +1669,7 @@ static int setattr_chown(struct inode *inode, struct iattr *attr) kuid_t ouid, nuid; kgid_t ogid, ngid; int error; + struct gfs2_alloc_parms ap; ouid = inode->i_uid; ogid = inode->i_gid; @@ -1696,9 +1697,11 @@ static int setattr_chown(struct inode *inode, struct iattr *attr) if (error) goto out; + ap.target = gfs2_get_inode_blocks(&ip->i_inode); + if (!uid_eq(ouid, NO_UID_QUOTA_CHANGE) || !gid_eq(ogid, NO_GID_QUOTA_CHANGE)) { - error = gfs2_quota_check(ip, nuid, ngid); + error = gfs2_quota_check(ip, nuid, ngid, &ap); if (error) goto out_gunlock_q; } @@ -1713,9 +1716,8 @@ static int setattr_chown(struct inode *inode, struct iattr *attr) if (!uid_eq(ouid, NO_UID_QUOTA_CHANGE) || !gid_eq(ogid, NO_GID_QUOTA_CHANGE)) { - u64 blocks = gfs2_get_inode_blocks(&ip->i_inode); - gfs2_quota_change(ip, -blocks, ouid, ogid); - gfs2_quota_change(ip, blocks, nuid, ngid); + gfs2_quota_change(ip, -ap.target, ouid, ogid); + gfs2_quota_change(ip, ap.target, nuid, ngid); } out_end_trans: @@ -1740,7 +1742,7 @@ out: static int gfs2_setattr(struct dentry *dentry, struct iattr *attr) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_holder i_gh; int error; @@ -1796,7 +1798,7 @@ out: static int gfs2_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_holder gh; int error; @@ -1819,7 +1821,7 @@ static int gfs2_getattr(struct vfsmount *mnt, struct dentry *dentry, static int gfs2_setxattr(struct dentry *dentry, const char *name, const void *data, size_t size, int flags) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_holder gh; int ret; @@ -1839,7 +1841,7 @@ static int gfs2_setxattr(struct dentry *dentry, const char *name, static ssize_t gfs2_getxattr(struct dentry *dentry, const char *name, void *data, size_t size) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_holder gh; int ret; @@ -1860,7 +1862,7 @@ static ssize_t gfs2_getxattr(struct dentry *dentry, const char *name, static int gfs2_removexattr(struct dentry *dentry, const char *name) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_holder gh; int ret; diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index efc8e25..35b49f4 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -647,7 +647,7 @@ out_unlock: static int init_journal(struct gfs2_sbd *sdp, int undo) { - struct inode *master = sdp->sd_master_dir->d_inode; + struct inode *master = d_inode(sdp->sd_master_dir); struct gfs2_holder ji_gh; struct gfs2_inode *ip; int jindex = 1; @@ -782,7 +782,7 @@ static struct lock_class_key gfs2_quota_imutex_key; static int init_inodes(struct gfs2_sbd *sdp, int undo) { int error = 0; - struct inode *master = sdp->sd_master_dir->d_inode; + struct inode *master = d_inode(sdp->sd_master_dir); if (undo) goto fail_qinode; @@ -848,7 +848,7 @@ static int init_per_node(struct gfs2_sbd *sdp, int undo) char buf[30]; int error = 0; struct gfs2_inode *ip; - struct inode *master = sdp->sd_master_dir->d_inode; + struct inode *master = d_inode(sdp->sd_master_dir); if (sdp->sd_args.ar_spectator) return 0; @@ -1357,7 +1357,7 @@ static struct dentry *gfs2_mount_meta(struct file_system_type *fs_type, return ERR_PTR(error); } s = sget(&gfs2_fs_type, test_gfs2_super, set_meta_super, flags, - path.dentry->d_inode->i_sb->s_bdev); + d_inode(path.dentry)->i_sb->s_bdev); path_put(&path); if (IS_ERR(s)) { pr_warn("gfs2 mount does not exist\n"); diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index 3aa17d4..e3065cb 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c @@ -923,6 +923,9 @@ restart: if (error) return error; + if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags)) + force_refresh = FORCE; + qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr; if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) { @@ -974,11 +977,8 @@ int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid) sizeof(struct gfs2_quota_data *), sort_qd, NULL); for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) { - int force = NO_FORCE; qd = ip->i_res->rs_qa_qd[x]; - if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags)) - force = FORCE; - error = do_glock(qd, force, &ip->i_res->rs_qa_qd_ghs[x]); + error = do_glock(qd, NO_FORCE, &ip->i_res->rs_qa_qd_ghs[x]); if (error) break; } @@ -1094,14 +1094,33 @@ static int print_message(struct gfs2_quota_data *qd, char *type) return 0; } -int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid) +/** + * gfs2_quota_check - check if allocating new blocks will exceed quota + * @ip: The inode for which this check is being performed + * @uid: The uid to check against + * @gid: The gid to check against + * @ap: The allocation parameters. ap->target contains the requested + * blocks. ap->min_target, if set, contains the minimum blks + * requested. + * + * Returns: 0 on success. + * min_req = ap->min_target ? ap->min_target : ap->target; + * quota must allow atleast min_req blks for success and + * ap->allowed is set to the number of blocks allowed + * + * -EDQUOT otherwise, quota violation. ap->allowed is set to number + * of blocks available. + */ +int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid, + struct gfs2_alloc_parms *ap) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct gfs2_quota_data *qd; - s64 value; + s64 value, warn, limit; unsigned int x; int error = 0; + ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */ if (!test_bit(GIF_QD_LOCKED, &ip->i_flags)) return 0; @@ -1115,30 +1134,37 @@ int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid) qid_eq(qd->qd_id, make_kqid_gid(gid)))) continue; + warn = (s64)be64_to_cpu(qd->qd_qb.qb_warn); + limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit); value = (s64)be64_to_cpu(qd->qd_qb.qb_value); spin_lock(&qd_lock); value += qd->qd_change; spin_unlock(&qd_lock); - if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) { - print_message(qd, "exceeded"); - quota_send_warning(qd->qd_id, - sdp->sd_vfs->s_dev, QUOTA_NL_BHARDWARN); - - error = -EDQUOT; - break; - } else if (be64_to_cpu(qd->qd_qb.qb_warn) && - (s64)be64_to_cpu(qd->qd_qb.qb_warn) < value && + if (limit > 0 && (limit - value) < ap->allowed) + ap->allowed = limit - value; + /* If we can't meet the target */ + if (limit && limit < (value + (s64)ap->target)) { + /* If no min_target specified or we don't meet + * min_target, return -EDQUOT */ + if (!ap->min_target || ap->min_target > ap->allowed) { + print_message(qd, "exceeded"); + quota_send_warning(qd->qd_id, + sdp->sd_vfs->s_dev, + QUOTA_NL_BHARDWARN); + error = -EDQUOT; + break; + } + } else if (warn && warn < value && time_after_eq(jiffies, qd->qd_last_warn + - gfs2_tune_get(sdp, - gt_quota_warn_period) * HZ)) { + gfs2_tune_get(sdp, gt_quota_warn_period) + * HZ)) { quota_send_warning(qd->qd_id, sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN); error = print_message(qd, "warning"); qd->qd_last_warn = jiffies; } } - return error; } @@ -1468,32 +1494,34 @@ int gfs2_quotad(void *data) return 0; } -static int gfs2_quota_get_xstate(struct super_block *sb, - struct fs_quota_stat *fqs) +static int gfs2_quota_get_state(struct super_block *sb, struct qc_state *state) { struct gfs2_sbd *sdp = sb->s_fs_info; - memset(fqs, 0, sizeof(struct fs_quota_stat)); - fqs->qs_version = FS_QSTAT_VERSION; + memset(state, 0, sizeof(*state)); switch (sdp->sd_args.ar_quota) { case GFS2_QUOTA_ON: - fqs->qs_flags |= (FS_QUOTA_UDQ_ENFD | FS_QUOTA_GDQ_ENFD); + state->s_state[USRQUOTA].flags |= QCI_LIMITS_ENFORCED; + state->s_state[GRPQUOTA].flags |= QCI_LIMITS_ENFORCED; /*FALLTHRU*/ case GFS2_QUOTA_ACCOUNT: - fqs->qs_flags |= (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT); + state->s_state[USRQUOTA].flags |= QCI_ACCT_ENABLED | + QCI_SYSFILE; + state->s_state[GRPQUOTA].flags |= QCI_ACCT_ENABLED | + QCI_SYSFILE; break; case GFS2_QUOTA_OFF: break; } - if (sdp->sd_quota_inode) { - fqs->qs_uquota.qfs_ino = GFS2_I(sdp->sd_quota_inode)->i_no_addr; - fqs->qs_uquota.qfs_nblks = sdp->sd_quota_inode->i_blocks; + state->s_state[USRQUOTA].ino = + GFS2_I(sdp->sd_quota_inode)->i_no_addr; + state->s_state[USRQUOTA].blocks = sdp->sd_quota_inode->i_blocks; } - fqs->qs_uquota.qfs_nextents = 1; /* unsupported */ - fqs->qs_gquota = fqs->qs_uquota; /* its the same inode in both cases */ - fqs->qs_incoredqs = list_lru_count(&gfs2_qd_lru); + state->s_state[USRQUOTA].nextents = 1; /* unsupported */ + state->s_state[GRPQUOTA] = state->s_state[USRQUOTA]; + state->s_incoredqs = list_lru_count(&gfs2_qd_lru); return 0; } @@ -1638,7 +1666,7 @@ out_put: const struct quotactl_ops gfs2_quotactl_ops = { .quota_sync = gfs2_quota_sync, - .get_xstate = gfs2_quota_get_xstate, + .get_state = gfs2_quota_get_state, .get_dqblk = gfs2_get_dqblk, .set_dqblk = gfs2_set_dqblk, }; diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h index 55d506e..ad04b3ac 100644 --- a/fs/gfs2/quota.h +++ b/fs/gfs2/quota.h @@ -24,7 +24,8 @@ extern void gfs2_quota_unhold(struct gfs2_inode *ip); extern int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid); extern void gfs2_quota_unlock(struct gfs2_inode *ip); -extern int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid); +extern int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid, + struct gfs2_alloc_parms *ap); extern void gfs2_quota_change(struct gfs2_inode *ip, s64 change, kuid_t uid, kgid_t gid); @@ -37,7 +38,8 @@ extern int gfs2_quotad(void *data); extern void gfs2_wake_up_statfs(struct gfs2_sbd *sdp); -static inline int gfs2_quota_lock_check(struct gfs2_inode *ip) +static inline int gfs2_quota_lock_check(struct gfs2_inode *ip, + struct gfs2_alloc_parms *ap) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); int ret; @@ -48,7 +50,7 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip) return ret; if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON) return 0; - ret = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid); + ret = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid, ap); if (ret) gfs2_quota_unlock(ip); return ret; diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index 9150207..6af2396 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c @@ -1946,10 +1946,18 @@ static inline int fast_to_acquire(struct gfs2_rgrpd *rgd) * @ip: the inode to reserve space for * @ap: the allocation parameters * - * Returns: errno + * We try our best to find an rgrp that has at least ap->target blocks + * available. After a couple of passes (loops == 2), the prospects of finding + * such an rgrp diminish. At this stage, we return the first rgrp that has + * atleast ap->min_target blocks available. Either way, we set ap->allowed to + * the number of blocks available in the chosen rgrp. + * + * Returns: 0 on success, + * -ENOMEM if a suitable rgrp can't be found + * errno otherwise */ -int gfs2_inplace_reserve(struct gfs2_inode *ip, const struct gfs2_alloc_parms *ap) +int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct gfs2_rgrpd *begin = NULL; @@ -2012,7 +2020,7 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, const struct gfs2_alloc_parms *a /* Skip unuseable resource groups */ if ((rs->rs_rbm.rgd->rd_flags & (GFS2_RGF_NOALLOC | GFS2_RDF_ERROR)) || - (ap->target > rs->rs_rbm.rgd->rd_extfail_pt)) + (loops == 0 && ap->target > rs->rs_rbm.rgd->rd_extfail_pt)) goto skip_rgrp; if (sdp->sd_args.ar_rgrplvb) @@ -2027,11 +2035,13 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, const struct gfs2_alloc_parms *a goto check_rgrp; /* If rgrp has enough free space, use it */ - if (rs->rs_rbm.rgd->rd_free_clone >= ap->target) { + if (rs->rs_rbm.rgd->rd_free_clone >= ap->target || + (loops == 2 && ap->min_target && + rs->rs_rbm.rgd->rd_free_clone >= ap->min_target)) { ip->i_rgd = rs->rs_rbm.rgd; + ap->allowed = ip->i_rgd->rd_free_clone; return 0; } - check_rgrp: /* Check for unlinked inodes which can be reclaimed */ if (rs->rs_rbm.rgd->rd_flags & GFS2_RDF_CHECK) diff --git a/fs/gfs2/rgrp.h b/fs/gfs2/rgrp.h index b104f4a..68972ec 100644 --- a/fs/gfs2/rgrp.h +++ b/fs/gfs2/rgrp.h @@ -41,7 +41,8 @@ extern void gfs2_rgrp_go_unlock(struct gfs2_holder *gh); extern struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip); #define GFS2_AF_ORLOV 1 -extern int gfs2_inplace_reserve(struct gfs2_inode *ip, const struct gfs2_alloc_parms *ap); +extern int gfs2_inplace_reserve(struct gfs2_inode *ip, + struct gfs2_alloc_parms *ap); extern void gfs2_inplace_release(struct gfs2_inode *ip); extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n, diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index 1666382..859c6ed 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -1171,7 +1171,7 @@ static int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *s static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf) { - struct super_block *sb = dentry->d_inode->i_sb; + struct super_block *sb = d_inode(dentry)->i_sb; struct gfs2_sbd *sdp = sb->s_fs_info; struct gfs2_statfs_change_host sc; int error; diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c index 0b81f78..4c096fa 100644 --- a/fs/gfs2/xattr.c +++ b/fs/gfs2/xattr.c @@ -420,7 +420,7 @@ static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh, ssize_t gfs2_listxattr(struct dentry *dentry, char *buffer, size_t size) { - struct gfs2_inode *ip = GFS2_I(dentry->d_inode); + struct gfs2_inode *ip = GFS2_I(d_inode(dentry)); struct gfs2_ea_request er; struct gfs2_holder i_gh; int error; @@ -586,7 +586,7 @@ out: static int gfs2_xattr_get(struct dentry *dentry, const char *name, void *buffer, size_t size, int type) { - struct gfs2_inode *ip = GFS2_I(dentry->d_inode); + struct gfs2_inode *ip = GFS2_I(d_inode(dentry)); struct gfs2_ea_location el; int error; @@ -732,7 +732,7 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er, if (error) return error; - error = gfs2_quota_lock_check(ip); + error = gfs2_quota_lock_check(ip, &ap); if (error) return error; @@ -1230,7 +1230,7 @@ int __gfs2_xattr_set(struct inode *inode, const char *name, static int gfs2_xattr_set(struct dentry *dentry, const char *name, const void *value, size_t size, int flags, int type) { - return __gfs2_xattr_set(dentry->d_inode, name, value, + return __gfs2_xattr_set(d_inode(dentry), name, value, size, flags, type); } diff --git a/fs/hfs/attr.c b/fs/hfs/attr.c index e057ec5..8d931b1 100644 --- a/fs/hfs/attr.c +++ b/fs/hfs/attr.c @@ -16,7 +16,7 @@ int hfs_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct hfs_find_data fd; hfs_cat_rec rec; struct hfs_cat_file *file; @@ -59,7 +59,7 @@ out: ssize_t hfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct hfs_find_data fd; hfs_cat_rec rec; struct hfs_cat_file *file; @@ -105,7 +105,7 @@ out: ssize_t hfs_listxattr(struct dentry *dentry, char *buffer, size_t size) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); if (!S_ISREG(inode->i_mode) || HFS_IS_RSRC(inode)) return -EOPNOTSUPP; diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c index 1455668..70788e0 100644 --- a/fs/hfs/dir.c +++ b/fs/hfs/dir.c @@ -197,7 +197,7 @@ static int hfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, inode = hfs_new_inode(dir, &dentry->d_name, mode); if (!inode) - return -ENOSPC; + return -ENOMEM; res = hfs_cat_create(inode->i_ino, dir, &dentry->d_name, inode); if (res) { @@ -226,7 +226,7 @@ static int hfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) inode = hfs_new_inode(dir, &dentry->d_name, S_IFDIR | mode); if (!inode) - return -ENOSPC; + return -ENOMEM; res = hfs_cat_create(inode->i_ino, dir, &dentry->d_name, inode); if (res) { @@ -253,7 +253,7 @@ static int hfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) */ static int hfs_remove(struct inode *dir, struct dentry *dentry) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); int res; if (S_ISDIR(inode->i_mode) && inode->i_size != 2) @@ -285,18 +285,18 @@ static int hfs_rename(struct inode *old_dir, struct dentry *old_dentry, int res; /* Unlink destination if it already exists */ - if (new_dentry->d_inode) { + if (d_really_is_positive(new_dentry)) { res = hfs_remove(new_dir, new_dentry); if (res) return res; } - res = hfs_cat_move(old_dentry->d_inode->i_ino, + res = hfs_cat_move(d_inode(old_dentry)->i_ino, old_dir, &old_dentry->d_name, new_dir, &new_dentry->d_name); if (!res) hfs_cat_build_key(old_dir->i_sb, - (btree_key *)&HFS_I(old_dentry->d_inode)->cat_key, + (btree_key *)&HFS_I(d_inode(old_dentry))->cat_key, new_dir->i_ino, &new_dentry->d_name); return res; } diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c index d0929bc..b99ebdd 100644 --- a/fs/hfs/inode.c +++ b/fs/hfs/inode.c @@ -14,7 +14,7 @@ #include <linux/pagemap.h> #include <linux/mpage.h> #include <linux/sched.h> -#include <linux/aio.h> +#include <linux/uio.h> #include "hfs_fs.h" #include "btree.h" @@ -124,8 +124,8 @@ static int hfs_releasepage(struct page *page, gfp_t mask) return res ? try_to_free_buffers(page) : 0; } -static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb, - struct iov_iter *iter, loff_t offset) +static ssize_t hfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, + loff_t offset) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; @@ -133,13 +133,13 @@ static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb, size_t count = iov_iter_count(iter); ssize_t ret; - ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, hfs_get_block); + ret = blockdev_direct_IO(iocb, inode, iter, offset, hfs_get_block); /* * In case of error extending write may have instantiated a few * blocks outside i_size. Trim these off again. */ - if (unlikely((rw & WRITE) && ret < 0)) { + if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) { loff_t isize = i_size_read(inode); loff_t end = offset + count; @@ -600,7 +600,7 @@ static int hfs_file_release(struct inode *inode, struct file *file) int hfs_inode_setattr(struct dentry *dentry, struct iattr * attr) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct hfs_sb_info *hsb = HFS_SB(inode->i_sb); int error; @@ -674,9 +674,7 @@ static int hfs_file_fsync(struct file *filp, loff_t start, loff_t end, static const struct file_operations hfs_file_operations = { .llseek = generic_file_llseek, - .read = new_sync_read, .read_iter = generic_file_read_iter, - .write = new_sync_write, .write_iter = generic_file_write_iter, .mmap = generic_file_mmap, .splice_read = generic_file_splice_read, diff --git a/fs/hfs/sysdep.c b/fs/hfs/sysdep.c index 91b91fd..2875961 100644 --- a/fs/hfs/sysdep.c +++ b/fs/hfs/sysdep.c @@ -21,7 +21,7 @@ static int hfs_revalidate_dentry(struct dentry *dentry, unsigned int flags) if (flags & LOOKUP_RCU) return -ECHILD; - inode = dentry->d_inode; + inode = d_inode(dentry); if(!inode) return 1; diff --git a/fs/hfsplus/bfind.c b/fs/hfsplus/bfind.c index c1422d9..528e38b 100644 --- a/fs/hfsplus/bfind.c +++ b/fs/hfsplus/bfind.c @@ -118,9 +118,7 @@ int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd, int b, e; int res; - if (!rec_found) - BUG(); - + BUG_ON(!rec_found); b = 0; e = bnode->num_recs - 1; res = -ENOENT; diff --git a/fs/hfsplus/brec.c b/fs/hfsplus/brec.c index 6e560d5..754fdf8 100644 --- a/fs/hfsplus/brec.c +++ b/fs/hfsplus/brec.c @@ -131,13 +131,16 @@ skip: hfs_bnode_write(node, entry, data_off + key_len, entry_len); hfs_bnode_dump(node); - if (new_node) { - /* update parent key if we inserted a key - * at the start of the first node - */ - if (!rec && new_node != node) - hfs_brec_update_parent(fd); + /* + * update parent key if we inserted a key + * at the start of the node and it is not the new node + */ + if (!rec && new_node != node) { + hfs_bnode_read_key(node, fd->search_key, data_off + size); + hfs_brec_update_parent(fd); + } + if (new_node) { hfs_bnode_put(fd->bnode); if (!new_node->parent) { hfs_btree_inc_height(tree); @@ -168,9 +171,6 @@ skip: goto again; } - if (!rec) - hfs_brec_update_parent(fd); - return 0; } @@ -370,6 +370,8 @@ again: if (IS_ERR(parent)) return PTR_ERR(parent); __hfs_brec_find(parent, fd, hfs_find_rec_by_key); + if (fd->record < 0) + return -ENOENT; hfs_bnode_dump(parent); rec = fd->record; diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c index 7892e6f..022974a 100644 --- a/fs/hfsplus/catalog.c +++ b/fs/hfsplus/catalog.c @@ -350,10 +350,11 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str) &fd.search_key->cat.name.unicode, off + 2, len); fd.search_key->key_len = cpu_to_be16(6 + len); - } else + } else { err = hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, str); if (unlikely(err)) goto out; + } err = hfs_brec_find(&fd, hfs_find_rec_by_key); if (err) diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c index f0235c1..d0f39dc 100644 --- a/fs/hfsplus/dir.c +++ b/fs/hfsplus/dir.c @@ -81,7 +81,7 @@ again: HFSPLUS_I(HFSPLUS_SB(sb)->hidden_dir)-> create_date || entry.file.create_date == - HFSPLUS_I(sb->s_root->d_inode)-> + HFSPLUS_I(d_inode(sb->s_root))-> create_date) && HFSPLUS_SB(sb)->hidden_dir) { struct qstr str; @@ -296,8 +296,8 @@ static int hfsplus_link(struct dentry *src_dentry, struct inode *dst_dir, struct dentry *dst_dentry) { struct hfsplus_sb_info *sbi = HFSPLUS_SB(dst_dir->i_sb); - struct inode *inode = src_dentry->d_inode; - struct inode *src_dir = src_dentry->d_parent->d_inode; + struct inode *inode = d_inode(src_dentry); + struct inode *src_dir = d_inode(src_dentry->d_parent); struct qstr str; char name[32]; u32 cnid, id; @@ -353,7 +353,7 @@ out: static int hfsplus_unlink(struct inode *dir, struct dentry *dentry) { struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb); - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct qstr str; char name[32]; u32 cnid; @@ -410,7 +410,7 @@ out: static int hfsplus_rmdir(struct inode *dir, struct dentry *dentry) { struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb); - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); int res; if (inode->i_size != 2) @@ -434,7 +434,7 @@ static int hfsplus_symlink(struct inode *dir, struct dentry *dentry, { struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb); struct inode *inode; - int res = -ENOSPC; + int res = -ENOMEM; mutex_lock(&sbi->vh_mutex); inode = hfsplus_new_inode(dir->i_sb, S_IFLNK | S_IRWXUGO); @@ -476,7 +476,7 @@ static int hfsplus_mknod(struct inode *dir, struct dentry *dentry, { struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb); struct inode *inode; - int res = -ENOSPC; + int res = -ENOMEM; mutex_lock(&sbi->vh_mutex); inode = hfsplus_new_inode(dir->i_sb, mode); @@ -529,7 +529,7 @@ static int hfsplus_rename(struct inode *old_dir, struct dentry *old_dentry, int res; /* Unlink destination if it already exists */ - if (new_dentry->d_inode) { + if (d_really_is_positive(new_dentry)) { if (d_is_dir(new_dentry)) res = hfsplus_rmdir(new_dir, new_dentry); else diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index 0cf786f..6dd107d 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c @@ -14,7 +14,7 @@ #include <linux/pagemap.h> #include <linux/mpage.h> #include <linux/sched.h> -#include <linux/aio.h> +#include <linux/uio.h> #include "hfsplus_fs.h" #include "hfsplus_raw.h" @@ -122,8 +122,8 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask) return res ? try_to_free_buffers(page) : 0; } -static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb, - struct iov_iter *iter, loff_t offset) +static ssize_t hfsplus_direct_IO(struct kiocb *iocb, struct iov_iter *iter, + loff_t offset) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; @@ -131,14 +131,13 @@ static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb, size_t count = iov_iter_count(iter); ssize_t ret; - ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, - hfsplus_get_block); + ret = blockdev_direct_IO(iocb, inode, iter, offset, hfsplus_get_block); /* * In case of error extending write may have instantiated a few * blocks outside i_size. Trim these off again. */ - if (unlikely((rw & WRITE) && ret < 0)) { + if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) { loff_t isize = i_size_read(inode); loff_t end = offset + count; @@ -244,7 +243,7 @@ static int hfsplus_file_release(struct inode *inode, struct file *file) static int hfsplus_setattr(struct dentry *dentry, struct iattr *attr) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); int error; error = inode_change_ok(inode, attr); @@ -254,6 +253,12 @@ static int hfsplus_setattr(struct dentry *dentry, struct iattr *attr) if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size != i_size_read(inode)) { inode_dio_wait(inode); + if (attr->ia_size > inode->i_size) { + error = generic_cont_expand_simple(inode, + attr->ia_size); + if (error) + return error; + } truncate_setsize(inode, attr->ia_size); hfsplus_file_truncate(inode); } @@ -341,9 +346,7 @@ static const struct inode_operations hfsplus_file_inode_operations = { static const struct file_operations hfsplus_file_operations = { .llseek = generic_file_llseek, - .read = new_sync_read, .read_iter = generic_file_read_iter, - .write = new_sync_write, .write_iter = generic_file_write_iter, .mmap = generic_file_mmap, .splice_read = generic_file_splice_read, diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c index d3ff5cc..0624ce4 100644 --- a/fs/hfsplus/ioctl.c +++ b/fs/hfsplus/ioctl.c @@ -26,7 +26,7 @@ static int hfsplus_ioctl_bless(struct file *file, int __user *user_flags) { struct dentry *dentry = file->f_path.dentry; - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb); struct hfsplus_vh *vh = sbi->s_vhdr; struct hfsplus_vh *bvh = sbi->s_backup_vhdr; @@ -76,7 +76,7 @@ static int hfsplus_ioctl_setflags(struct file *file, int __user *user_flags) { struct inode *inode = file_inode(file); struct hfsplus_inode_info *hip = HFSPLUS_I(inode); - unsigned int flags; + unsigned int flags, new_fl = 0; int err = 0; err = mnt_want_write_file(file); @@ -110,14 +110,12 @@ static int hfsplus_ioctl_setflags(struct file *file, int __user *user_flags) } if (flags & FS_IMMUTABLE_FL) - inode->i_flags |= S_IMMUTABLE; - else - inode->i_flags &= ~S_IMMUTABLE; + new_fl |= S_IMMUTABLE; if (flags & FS_APPEND_FL) - inode->i_flags |= S_APPEND; - else - inode->i_flags &= ~S_APPEND; + new_fl |= S_APPEND; + + inode_set_flags(inode, new_fl, S_IMMUTABLE | S_APPEND); if (flags & FS_NODUMP_FL) hip->userflags |= HFSPLUS_FLG_NODUMP; diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c index d98094a..416b1db 100644 --- a/fs/hfsplus/xattr.c +++ b/fs/hfsplus/xattr.c @@ -44,7 +44,7 @@ static int strcmp_xattr_acl(const char *name) return -1; } -static inline int is_known_namespace(const char *name) +static bool is_known_namespace(const char *name) { if (strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) && strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) && @@ -424,6 +424,28 @@ static int copy_name(char *buffer, const char *xattr_name, int name_len) return len; } +int hfsplus_setxattr(struct dentry *dentry, const char *name, + const void *value, size_t size, int flags, + const char *prefix, size_t prefixlen) +{ + char *xattr_name; + int res; + + if (!strcmp(name, "")) + return -EINVAL; + + xattr_name = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN + 1, + GFP_KERNEL); + if (!xattr_name) + return -ENOMEM; + strcpy(xattr_name, prefix); + strcpy(xattr_name + prefixlen, name); + res = __hfsplus_setxattr(d_inode(dentry), xattr_name, value, size, + flags); + kfree(xattr_name); + return res; +} + static ssize_t hfsplus_getxattr_finder_info(struct inode *inode, void *value, size_t size) { @@ -560,6 +582,30 @@ failed_getxattr_init: return res; } +ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name, + void *value, size_t size, + const char *prefix, size_t prefixlen) +{ + int res; + char *xattr_name; + + if (!strcmp(name, "")) + return -EINVAL; + + xattr_name = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN + 1, + GFP_KERNEL); + if (!xattr_name) + return -ENOMEM; + + strcpy(xattr_name, prefix); + strcpy(xattr_name + prefixlen, name); + + res = __hfsplus_getxattr(d_inode(dentry), xattr_name, value, size); + kfree(xattr_name); + return res; + +} + static inline int can_list(const char *xattr_name) { if (!xattr_name) @@ -574,7 +620,7 @@ static ssize_t hfsplus_listxattr_finder_info(struct dentry *dentry, char *buffer, size_t size) { ssize_t res = 0; - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct hfs_find_data fd; u16 entry_type; u8 folder_finder_info[sizeof(struct DInfo) + sizeof(struct DXInfo)]; @@ -642,7 +688,7 @@ ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size) { ssize_t err; ssize_t res = 0; - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct hfs_find_data fd; u16 key_len = 0; struct hfsplus_attr_key attr_key; @@ -806,9 +852,6 @@ end_removexattr: static int hfsplus_osx_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size, int type) { - char *xattr_name; - int res; - if (!strcmp(name, "")) return -EINVAL; @@ -818,24 +861,19 @@ static int hfsplus_osx_getxattr(struct dentry *dentry, const char *name, */ if (is_known_namespace(name)) return -EOPNOTSUPP; - xattr_name = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN - + XATTR_MAC_OSX_PREFIX_LEN + 1, GFP_KERNEL); - if (!xattr_name) - return -ENOMEM; - strcpy(xattr_name, XATTR_MAC_OSX_PREFIX); - strcpy(xattr_name + XATTR_MAC_OSX_PREFIX_LEN, name); - res = hfsplus_getxattr(dentry, xattr_name, buffer, size); - kfree(xattr_name); - return res; + /* + * osx is the namespace we use to indicate an unprefixed + * attribute on the filesystem (like the ones that OS X + * creates), so we pass the name through unmodified (after + * ensuring it doesn't conflict with another namespace). + */ + return __hfsplus_getxattr(d_inode(dentry), name, buffer, size); } static int hfsplus_osx_setxattr(struct dentry *dentry, const char *name, const void *buffer, size_t size, int flags, int type) { - char *xattr_name; - int res; - if (!strcmp(name, "")) return -EINVAL; @@ -845,16 +883,14 @@ static int hfsplus_osx_setxattr(struct dentry *dentry, const char *name, */ if (is_known_namespace(name)) return -EOPNOTSUPP; - xattr_name = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN - + XATTR_MAC_OSX_PREFIX_LEN + 1, GFP_KERNEL); - if (!xattr_name) - return -ENOMEM; - strcpy(xattr_name, XATTR_MAC_OSX_PREFIX); - strcpy(xattr_name + XATTR_MAC_OSX_PREFIX_LEN, name); - res = hfsplus_setxattr(dentry, xattr_name, buffer, size, flags); - kfree(xattr_name); - return res; + /* + * osx is the namespace we use to indicate an unprefixed + * attribute on the filesystem (like the ones that OS X + * creates), so we pass the name through unmodified (after + * ensuring it doesn't conflict with another namespace). + */ + return __hfsplus_setxattr(d_inode(dentry), name, buffer, size, flags); } static size_t hfsplus_osx_listxattr(struct dentry *dentry, char *list, diff --git a/fs/hfsplus/xattr.h b/fs/hfsplus/xattr.h index 288530c..f9b0955 100644 --- a/fs/hfsplus/xattr.h +++ b/fs/hfsplus/xattr.h @@ -21,22 +21,16 @@ extern const struct xattr_handler *hfsplus_xattr_handlers[]; int __hfsplus_setxattr(struct inode *inode, const char *name, const void *value, size_t size, int flags); -static inline int hfsplus_setxattr(struct dentry *dentry, const char *name, - const void *value, size_t size, int flags) -{ - return __hfsplus_setxattr(dentry->d_inode, name, value, size, flags); -} +int hfsplus_setxattr(struct dentry *dentry, const char *name, + const void *value, size_t size, int flags, + const char *prefix, size_t prefixlen); ssize_t __hfsplus_getxattr(struct inode *inode, const char *name, - void *value, size_t size); - -static inline ssize_t hfsplus_getxattr(struct dentry *dentry, - const char *name, - void *value, - size_t size) -{ - return __hfsplus_getxattr(dentry->d_inode, name, value, size); -} + void *value, size_t size); + +ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name, + void *value, size_t size, + const char *prefix, size_t prefixlen); ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size); diff --git a/fs/hfsplus/xattr_security.c b/fs/hfsplus/xattr_security.c index 6ec5e10..aacff00 100644 --- a/fs/hfsplus/xattr_security.c +++ b/fs/hfsplus/xattr_security.c @@ -16,43 +16,17 @@ static int hfsplus_security_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size, int type) { - char *xattr_name; - int res; - - if (!strcmp(name, "")) - return -EINVAL; - - xattr_name = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN + 1, - GFP_KERNEL); - if (!xattr_name) - return -ENOMEM; - strcpy(xattr_name, XATTR_SECURITY_PREFIX); - strcpy(xattr_name + XATTR_SECURITY_PREFIX_LEN, name); - - res = hfsplus_getxattr(dentry, xattr_name, buffer, size); - kfree(xattr_name); - return res; + return hfsplus_getxattr(dentry, name, buffer, size, + XATTR_SECURITY_PREFIX, + XATTR_SECURITY_PREFIX_LEN); } static int hfsplus_security_setxattr(struct dentry *dentry, const char *name, const void *buffer, size_t size, int flags, int type) { - char *xattr_name; - int res; - - if (!strcmp(name, "")) - return -EINVAL; - - xattr_name = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN + 1, - GFP_KERNEL); - if (!xattr_name) - return -ENOMEM; - strcpy(xattr_name, XATTR_SECURITY_PREFIX); - strcpy(xattr_name + XATTR_SECURITY_PREFIX_LEN, name); - - res = hfsplus_setxattr(dentry, xattr_name, buffer, size, flags); - kfree(xattr_name); - return res; + return hfsplus_setxattr(dentry, name, buffer, size, flags, + XATTR_SECURITY_PREFIX, + XATTR_SECURITY_PREFIX_LEN); } static size_t hfsplus_security_listxattr(struct dentry *dentry, char *list, diff --git a/fs/hfsplus/xattr_trusted.c b/fs/hfsplus/xattr_trusted.c index 3c5f27e..bcf65089 100644 --- a/fs/hfsplus/xattr_trusted.c +++ b/fs/hfsplus/xattr_trusted.c @@ -14,43 +14,16 @@ static int hfsplus_trusted_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size, int type) { - char *xattr_name; - int res; - - if (!strcmp(name, "")) - return -EINVAL; - - xattr_name = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN + 1, - GFP_KERNEL); - if (!xattr_name) - return -ENOMEM; - strcpy(xattr_name, XATTR_TRUSTED_PREFIX); - strcpy(xattr_name + XATTR_TRUSTED_PREFIX_LEN, name); - - res = hfsplus_getxattr(dentry, xattr_name, buffer, size); - kfree(xattr_name); - return res; + return hfsplus_getxattr(dentry, name, buffer, size, + XATTR_TRUSTED_PREFIX, + XATTR_TRUSTED_PREFIX_LEN); } static int hfsplus_trusted_setxattr(struct dentry *dentry, const char *name, const void *buffer, size_t size, int flags, int type) { - char *xattr_name; - int res; - - if (!strcmp(name, "")) - return -EINVAL; - - xattr_name = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN + 1, - GFP_KERNEL); - if (!xattr_name) - return -ENOMEM; - strcpy(xattr_name, XATTR_TRUSTED_PREFIX); - strcpy(xattr_name + XATTR_TRUSTED_PREFIX_LEN, name); - - res = hfsplus_setxattr(dentry, xattr_name, buffer, size, flags); - kfree(xattr_name); - return res; + return hfsplus_setxattr(dentry, name, buffer, size, flags, + XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN); } static size_t hfsplus_trusted_listxattr(struct dentry *dentry, char *list, diff --git a/fs/hfsplus/xattr_user.c b/fs/hfsplus/xattr_user.c index 2b625a5..5aa0e6d 100644 --- a/fs/hfsplus/xattr_user.c +++ b/fs/hfsplus/xattr_user.c @@ -14,43 +14,16 @@ static int hfsplus_user_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size, int type) { - char *xattr_name; - int res; - if (!strcmp(name, "")) - return -EINVAL; - - xattr_name = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN + 1, - GFP_KERNEL); - if (!xattr_name) - return -ENOMEM; - strcpy(xattr_name, XATTR_USER_PREFIX); - strcpy(xattr_name + XATTR_USER_PREFIX_LEN, name); - - res = hfsplus_getxattr(dentry, xattr_name, buffer, size); - kfree(xattr_name); - return res; + return hfsplus_getxattr(dentry, name, buffer, size, + XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN); } static int hfsplus_user_setxattr(struct dentry *dentry, const char *name, const void *buffer, size_t size, int flags, int type) { - char *xattr_name; - int res; - - if (!strcmp(name, "")) - return -EINVAL; - - xattr_name = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN + 1, - GFP_KERNEL); - if (!xattr_name) - return -ENOMEM; - strcpy(xattr_name, XATTR_USER_PREFIX); - strcpy(xattr_name + XATTR_USER_PREFIX_LEN, name); - - res = hfsplus_setxattr(dentry, xattr_name, buffer, size, flags); - kfree(xattr_name); - return res; + return hfsplus_setxattr(dentry, name, buffer, size, flags, + XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN); } static size_t hfsplus_user_listxattr(struct dentry *dentry, char *list, diff --git a/fs/hostfs/hostfs.h b/fs/hostfs/hostfs.h index 4fcd40d..91e19f9 100644 --- a/fs/hostfs/hostfs.h +++ b/fs/hostfs/hostfs.h @@ -66,7 +66,8 @@ extern int stat_file(const char *path, struct hostfs_stat *p, int fd); extern int access_file(char *path, int r, int w, int x); extern int open_file(char *path, int r, int w, int append); extern void *open_dir(char *path, int *err_out); -extern char *read_dir(void *stream, unsigned long long *pos, +extern void seek_dir(void *stream, unsigned long long pos); +extern char *read_dir(void *stream, unsigned long long *pos_out, unsigned long long *ino_out, int *len_out, unsigned int *type_out); extern void close_file(void *stream); @@ -77,8 +78,7 @@ extern int write_file(int fd, unsigned long long *offset, const char *buf, int len); extern int lseek_file(int fd, long long offset, int whence); extern int fsync_file(int fd, int datasync); -extern int file_create(char *name, int ur, int uw, int ux, int gr, - int gw, int gx, int or, int ow, int ox); +extern int file_create(char *name, int mode); extern int set_attr(const char *file, struct hostfs_iattr *attrs, int fd); extern int make_symlink(const char *from, const char *to); extern int unlink_file(const char *file); diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index fd62cae..ef26317 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c @@ -24,6 +24,7 @@ struct hostfs_inode_info { int fd; fmode_t mode; struct inode vfs_inode; + struct mutex open_mutex; }; static inline struct hostfs_inode_info *HOSTFS_I(struct inode *inode) @@ -92,16 +93,22 @@ static char *__dentry_name(struct dentry *dentry, char *name) __putname(name); return NULL; } + + /* + * This function relies on the fact that dentry_path_raw() will place + * the path name at the end of the provided buffer. + */ + BUG_ON(p + strlen(p) + 1 != name + PATH_MAX); + strlcpy(name, root, PATH_MAX); if (len > p - name) { __putname(name); return NULL; } - if (p > name + len) { - char *s = name + len; - while ((*s++ = *p++) != '\0') - ; - } + + if (p > name + len) + strcpy(name + len, p); + return name; } @@ -135,21 +142,19 @@ static char *follow_link(char *link) int len, n; char *name, *resolved, *end; - len = 64; - while (1) { + name = __getname(); + if (!name) { n = -ENOMEM; - name = kmalloc(len, GFP_KERNEL); - if (name == NULL) - goto out; - - n = hostfs_do_readlink(link, name, len); - if (n < len) - break; - len *= 2; - kfree(name); + goto out_free; } + + n = hostfs_do_readlink(link, name, PATH_MAX); if (n < 0) goto out_free; + else if (n == PATH_MAX) { + n = -E2BIG; + goto out_free; + } if (*name == '/') return name; @@ -168,13 +173,12 @@ static char *follow_link(char *link) } sprintf(resolved, "%s%s", link, name); - kfree(name); + __putname(name); kfree(link); return resolved; out_free: - kfree(name); - out: + __putname(name); return ERR_PTR(n); } @@ -225,6 +229,7 @@ static struct inode *hostfs_alloc_inode(struct super_block *sb) hi->fd = -1; hi->mode = 0; inode_init_once(&hi->vfs_inode); + mutex_init(&hi->open_mutex); return &hi->vfs_inode; } @@ -257,6 +262,9 @@ static int hostfs_show_options(struct seq_file *seq, struct dentry *root) if (strlen(root_path) > offset) seq_printf(seq, ",%s", root_path + offset); + if (append) + seq_puts(seq, ",append"); + return 0; } @@ -284,6 +292,7 @@ static int hostfs_readdir(struct file *file, struct dir_context *ctx) if (dir == NULL) return -error; next = ctx->pos; + seek_dir(dir, next); while ((name = read_dir(dir, &next, &ino, &len, &type)) != NULL) { if (!dir_emit(ctx, name, len, ino, type)) break; @@ -293,13 +302,12 @@ static int hostfs_readdir(struct file *file, struct dir_context *ctx) return 0; } -static int hostfs_file_open(struct inode *ino, struct file *file) +static int hostfs_open(struct inode *ino, struct file *file) { - static DEFINE_MUTEX(open_mutex); char *name; - fmode_t mode = 0; + fmode_t mode; int err; - int r = 0, w = 0, fd; + int r, w, fd; mode = file->f_mode & (FMODE_READ | FMODE_WRITE); if ((mode & HOSTFS_I(ino)->mode) == mode) @@ -308,12 +316,12 @@ static int hostfs_file_open(struct inode *ino, struct file *file) mode |= HOSTFS_I(ino)->mode; retry: + r = w = 0; + if (mode & FMODE_READ) r = 1; if (mode & FMODE_WRITE) - w = 1; - if (w) - r = 1; + r = w = 1; name = dentry_name(file->f_path.dentry); if (name == NULL) @@ -324,15 +332,16 @@ retry: if (fd < 0) return fd; - mutex_lock(&open_mutex); + mutex_lock(&HOSTFS_I(ino)->open_mutex); /* somebody else had handled it first? */ if ((mode & HOSTFS_I(ino)->mode) == mode) { - mutex_unlock(&open_mutex); + mutex_unlock(&HOSTFS_I(ino)->open_mutex); + close_file(&fd); return 0; } if ((mode | HOSTFS_I(ino)->mode) != mode) { mode |= HOSTFS_I(ino)->mode; - mutex_unlock(&open_mutex); + mutex_unlock(&HOSTFS_I(ino)->open_mutex); close_file(&fd); goto retry; } @@ -342,12 +351,12 @@ retry: err = replace_file(fd, HOSTFS_I(ino)->fd); close_file(&fd); if (err < 0) { - mutex_unlock(&open_mutex); + mutex_unlock(&HOSTFS_I(ino)->open_mutex); return err; } } HOSTFS_I(ino)->mode = mode; - mutex_unlock(&open_mutex); + mutex_unlock(&HOSTFS_I(ino)->open_mutex); return 0; } @@ -378,13 +387,11 @@ static int hostfs_fsync(struct file *file, loff_t start, loff_t end, static const struct file_operations hostfs_file_fops = { .llseek = generic_file_llseek, - .read = new_sync_read, .splice_read = generic_file_splice_read, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, - .write = new_sync_write, .mmap = generic_file_mmap, - .open = hostfs_file_open, + .open = hostfs_open, .release = hostfs_file_release, .fsync = hostfs_fsync, }; @@ -393,6 +400,8 @@ static const struct file_operations hostfs_dir_fops = { .llseek = generic_file_llseek, .iterate = hostfs_readdir, .read = generic_read_dir, + .open = hostfs_open, + .fsync = hostfs_fsync, }; static int hostfs_writepage(struct page *page, struct writeback_control *wbc) @@ -400,7 +409,7 @@ static int hostfs_writepage(struct page *page, struct writeback_control *wbc) struct address_space *mapping = page->mapping; struct inode *inode = mapping->host; char *buffer; - unsigned long long base; + loff_t base = page_offset(page); int count = PAGE_CACHE_SIZE; int end_index = inode->i_size >> PAGE_CACHE_SHIFT; int err; @@ -409,7 +418,6 @@ static int hostfs_writepage(struct page *page, struct writeback_control *wbc) count = inode->i_size & (PAGE_CACHE_SIZE-1); buffer = kmap(page); - base = ((unsigned long long) page->index) << PAGE_CACHE_SHIFT; err = write_file(HOSTFS_I(inode)->fd, &base, buffer, count); if (err != count) { @@ -434,26 +442,29 @@ static int hostfs_writepage(struct page *page, struct writeback_control *wbc) static int hostfs_readpage(struct file *file, struct page *page) { char *buffer; - long long start; - int err = 0; + loff_t start = page_offset(page); + int bytes_read, ret = 0; - start = (long long) page->index << PAGE_CACHE_SHIFT; buffer = kmap(page); - err = read_file(FILE_HOSTFS_I(file)->fd, &start, buffer, + bytes_read = read_file(FILE_HOSTFS_I(file)->fd, &start, buffer, PAGE_CACHE_SIZE); - if (err < 0) + if (bytes_read < 0) { + ClearPageUptodate(page); + SetPageError(page); + ret = bytes_read; goto out; + } - memset(&buffer[err], 0, PAGE_CACHE_SIZE - err); + memset(buffer + bytes_read, 0, PAGE_CACHE_SIZE - bytes_read); - flush_dcache_page(page); + ClearPageError(page); SetPageUptodate(page); - if (PageError(page)) ClearPageError(page); - err = 0; + out: + flush_dcache_page(page); kunmap(page); unlock_page(page); - return err; + return ret; } static int hostfs_write_begin(struct file *file, struct address_space *mapping, @@ -530,11 +541,13 @@ static int read_name(struct inode *ino, char *name) init_special_inode(ino, st.mode & S_IFMT, rdev); ino->i_op = &hostfs_iops; break; - - default: + case S_IFREG: ino->i_op = &hostfs_iops; ino->i_fop = &hostfs_file_fops; ino->i_mapping->a_ops = &hostfs_aops; + break; + default: + return -EIO; } ino->i_ino = st.ino; @@ -568,10 +581,7 @@ static int hostfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, if (name == NULL) goto out_put; - fd = file_create(name, - mode & S_IRUSR, mode & S_IWUSR, mode & S_IXUSR, - mode & S_IRGRP, mode & S_IWGRP, mode & S_IXGRP, - mode & S_IROTH, mode & S_IWOTH, mode & S_IXOTH); + fd = file_create(name, mode & S_IFMT); if (fd < 0) error = fd; else @@ -797,7 +807,7 @@ static int hostfs_permission(struct inode *ino, int desired) static int hostfs_setattr(struct dentry *dentry, struct iattr *attr) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct hostfs_iattr attrs; char *name; int err; diff --git a/fs/hostfs/hostfs_user.c b/fs/hostfs/hostfs_user.c index 9765dab..9c1e0f0 100644 --- a/fs/hostfs/hostfs_user.c +++ b/fs/hostfs/hostfs_user.c @@ -97,21 +97,27 @@ void *open_dir(char *path, int *err_out) return dir; } -char *read_dir(void *stream, unsigned long long *pos, +void seek_dir(void *stream, unsigned long long pos) +{ + DIR *dir = stream; + + seekdir(dir, pos); +} + +char *read_dir(void *stream, unsigned long long *pos_out, unsigned long long *ino_out, int *len_out, unsigned int *type_out) { DIR *dir = stream; struct dirent *ent; - seekdir(dir, *pos); ent = readdir(dir); if (ent == NULL) return NULL; *len_out = strlen(ent->d_name); *ino_out = ent->d_ino; *type_out = ent->d_type; - *pos = telldir(dir); + *pos_out = ent->d_off; return ent->d_name; } @@ -175,21 +181,10 @@ void close_dir(void *stream) closedir(stream); } -int file_create(char *name, int ur, int uw, int ux, int gr, - int gw, int gx, int or, int ow, int ox) +int file_create(char *name, int mode) { - int mode, fd; - - mode = 0; - mode |= ur ? S_IRUSR : 0; - mode |= uw ? S_IWUSR : 0; - mode |= ux ? S_IXUSR : 0; - mode |= gr ? S_IRGRP : 0; - mode |= gw ? S_IWGRP : 0; - mode |= gx ? S_IXGRP : 0; - mode |= or ? S_IROTH : 0; - mode |= ow ? S_IWOTH : 0; - mode |= ox ? S_IXOTH : 0; + int fd; + fd = open64(name, O_CREAT | O_RDWR, mode); if (fd < 0) return -errno; diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c index 7f54e5f..6d8cfe9 100644 --- a/fs/hpfs/file.c +++ b/fs/hpfs/file.c @@ -197,9 +197,7 @@ const struct address_space_operations hpfs_aops = { const struct file_operations hpfs_file_ops = { .llseek = generic_file_llseek, - .read = new_sync_read, .read_iter = generic_file_read_iter, - .write = new_sync_write, .write_iter = generic_file_write_iter, .mmap = generic_file_mmap, .release = hpfs_file_release, diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c index 7ce4b74..933c737 100644 --- a/fs/hpfs/inode.c +++ b/fs/hpfs/inode.c @@ -257,7 +257,7 @@ void hpfs_write_inode_nolock(struct inode *i) int hpfs_setattr(struct dentry *dentry, struct iattr *attr) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); int error = -EINVAL; hpfs_lock(inode->i_sb); diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c index bdbc2c3..a0872f2 100644 --- a/fs/hpfs/namei.c +++ b/fs/hpfs/namei.c @@ -359,7 +359,7 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry) unsigned len = dentry->d_name.len; struct quad_buffer_head qbh; struct hpfs_dirent *de; - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); dnode_secno dno; int r; int rep = 0; @@ -433,7 +433,7 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry) unsigned len = dentry->d_name.len; struct quad_buffer_head qbh; struct hpfs_dirent *de; - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); dnode_secno dno; int n_items = 0; int err; @@ -522,8 +522,8 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry, unsigned old_len = old_dentry->d_name.len; const unsigned char *new_name = new_dentry->d_name.name; unsigned new_len = new_dentry->d_name.len; - struct inode *i = old_dentry->d_inode; - struct inode *new_inode = new_dentry->d_inode; + struct inode *i = d_inode(old_dentry); + struct inode *new_inode = d_inode(new_dentry); struct quad_buffer_head qbh, qbh1; struct hpfs_dirent *dep, *nde; struct hpfs_dirent de; diff --git a/fs/hppfs/hppfs.c b/fs/hppfs/hppfs.c index 043ac9d..fa2bd53 100644 --- a/fs/hppfs/hppfs.c +++ b/fs/hppfs/hppfs.c @@ -153,9 +153,9 @@ static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry, return ERR_PTR(-ENOENT); parent = HPPFS_I(ino)->proc_dentry; - mutex_lock(&parent->d_inode->i_mutex); + mutex_lock(&d_inode(parent)->i_mutex); proc_dentry = lookup_one_len(name->name, parent, name->len); - mutex_unlock(&parent->d_inode->i_mutex); + mutex_unlock(&d_inode(parent)->i_mutex); if (IS_ERR(proc_dentry)) return proc_dentry; @@ -637,25 +637,25 @@ static const struct super_operations hppfs_sbops = { static int hppfs_readlink(struct dentry *dentry, char __user *buffer, int buflen) { - struct dentry *proc_dentry = HPPFS_I(dentry->d_inode)->proc_dentry; - return proc_dentry->d_inode->i_op->readlink(proc_dentry, buffer, + struct dentry *proc_dentry = HPPFS_I(d_inode(dentry))->proc_dentry; + return d_inode(proc_dentry)->i_op->readlink(proc_dentry, buffer, buflen); } static void *hppfs_follow_link(struct dentry *dentry, struct nameidata *nd) { - struct dentry *proc_dentry = HPPFS_I(dentry->d_inode)->proc_dentry; + struct dentry *proc_dentry = HPPFS_I(d_inode(dentry))->proc_dentry; - return proc_dentry->d_inode->i_op->follow_link(proc_dentry, nd); + return d_inode(proc_dentry)->i_op->follow_link(proc_dentry, nd); } static void hppfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) { - struct dentry *proc_dentry = HPPFS_I(dentry->d_inode)->proc_dentry; + struct dentry *proc_dentry = HPPFS_I(d_inode(dentry))->proc_dentry; - if (proc_dentry->d_inode->i_op->put_link) - proc_dentry->d_inode->i_op->put_link(proc_dentry, nd, cookie); + if (d_inode(proc_dentry)->i_op->put_link) + d_inode(proc_dentry)->i_op->put_link(proc_dentry, nd, cookie); } static const struct inode_operations hppfs_dir_iops = { @@ -670,7 +670,7 @@ static const struct inode_operations hppfs_link_iops = { static struct inode *get_inode(struct super_block *sb, struct dentry *dentry) { - struct inode *proc_ino = dentry->d_inode; + struct inode *proc_ino = d_inode(dentry); struct inode *inode = new_inode(sb); if (!inode) { diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index c274aca..87724c1 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -34,6 +34,7 @@ #include <linux/security.h> #include <linux/magic.h> #include <linux/migrate.h> +#include <linux/uio.h> #include <asm/uaccess.h> @@ -47,9 +48,10 @@ struct hugetlbfs_config { kuid_t uid; kgid_t gid; umode_t mode; - long nr_blocks; + long max_hpages; long nr_inodes; struct hstate *hstate; + long min_hpages; }; struct hugetlbfs_inode_info { @@ -67,7 +69,7 @@ int sysctl_hugetlb_shm_group; enum { Opt_size, Opt_nr_inodes, Opt_mode, Opt_uid, Opt_gid, - Opt_pagesize, + Opt_pagesize, Opt_min_size, Opt_err, }; @@ -78,6 +80,7 @@ static const match_table_t tokens = { {Opt_uid, "uid=%u"}, {Opt_gid, "gid=%u"}, {Opt_pagesize, "pagesize=%s"}, + {Opt_min_size, "min_size=%s"}, {Opt_err, NULL}, }; @@ -179,42 +182,33 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, } #endif -static int +static size_t hugetlbfs_read_actor(struct page *page, unsigned long offset, - char __user *buf, unsigned long count, - unsigned long size) + struct iov_iter *to, unsigned long size) { - char *kaddr; - unsigned long left, copied = 0; + size_t copied = 0; int i, chunksize; - if (size > count) - size = count; - /* Find which 4k chunk and offset with in that chunk */ i = offset >> PAGE_CACHE_SHIFT; offset = offset & ~PAGE_CACHE_MASK; while (size) { + size_t n; chunksize = PAGE_CACHE_SIZE; if (offset) chunksize -= offset; if (chunksize > size) chunksize = size; - kaddr = kmap(&page[i]); - left = __copy_to_user(buf, kaddr + offset, chunksize); - kunmap(&page[i]); - if (left) { - copied += (chunksize - left); - break; - } + n = copy_page_to_iter(&page[i], offset, chunksize, to); + copied += n; + if (n != chunksize) + return copied; offset = 0; size -= chunksize; - buf += chunksize; - copied += chunksize; i++; } - return copied ? copied : -EFAULT; + return copied; } /* @@ -222,39 +216,34 @@ hugetlbfs_read_actor(struct page *page, unsigned long offset, * data. Its *very* similar to do_generic_mapping_read(), we can't use that * since it has PAGE_CACHE_SIZE assumptions. */ -static ssize_t hugetlbfs_read(struct file *filp, char __user *buf, - size_t len, loff_t *ppos) +static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) { - struct hstate *h = hstate_file(filp); - struct address_space *mapping = filp->f_mapping; + struct file *file = iocb->ki_filp; + struct hstate *h = hstate_file(file); + struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; - unsigned long index = *ppos >> huge_page_shift(h); - unsigned long offset = *ppos & ~huge_page_mask(h); + unsigned long index = iocb->ki_pos >> huge_page_shift(h); + unsigned long offset = iocb->ki_pos & ~huge_page_mask(h); unsigned long end_index; loff_t isize; ssize_t retval = 0; - /* validate length */ - if (len == 0) - goto out; - - for (;;) { + while (iov_iter_count(to)) { struct page *page; - unsigned long nr, ret; - int ra; + size_t nr, copied; /* nr is the maximum number of bytes to copy from this page */ nr = huge_page_size(h); isize = i_size_read(inode); if (!isize) - goto out; + break; end_index = (isize - 1) >> huge_page_shift(h); - if (index >= end_index) { - if (index > end_index) - goto out; + if (index > end_index) + break; + if (index == end_index) { nr = ((isize - 1) & ~huge_page_mask(h)) + 1; if (nr <= offset) - goto out; + break; } nr = nr - offset; @@ -265,39 +254,27 @@ static ssize_t hugetlbfs_read(struct file *filp, char __user *buf, * We have a HOLE, zero out the user-buffer for the * length of the hole or request. */ - ret = len < nr ? len : nr; - if (clear_user(buf, ret)) - ra = -EFAULT; - else - ra = 0; + copied = iov_iter_zero(nr, to); } else { unlock_page(page); /* * We have the page, copy it to user space buffer. */ - ra = hugetlbfs_read_actor(page, offset, buf, len, nr); - ret = ra; + copied = hugetlbfs_read_actor(page, offset, to, nr); page_cache_release(page); } - if (ra < 0) { - if (retval == 0) - retval = ra; - goto out; + offset += copied; + retval += copied; + if (copied != nr && iov_iter_count(to)) { + if (!retval) + retval = -EFAULT; + break; } - - offset += ret; - retval += ret; - len -= ret; index += offset >> huge_page_shift(h); offset &= ~huge_page_mask(h); - - /* short read or no more work */ - if ((ret != nr) || (len == 0)) - break; } -out: - *ppos = ((loff_t)index << huge_page_shift(h)) + offset; + iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset; return retval; } @@ -319,7 +296,7 @@ static int hugetlbfs_write_end(struct file *file, struct address_space *mapping, static void truncate_huge_page(struct page *page) { - cancel_dirty_page(page, /* No IO accounting for huge pages? */0); + ClearPageDirty(page); ClearPageUptodate(page); delete_from_page_cache(page); } @@ -416,7 +393,7 @@ static int hugetlb_vmtruncate(struct inode *inode, loff_t offset) static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct hstate *h = hstate_inode(inode); int error; unsigned int ia_valid = attr->ia_valid; @@ -610,7 +587,7 @@ static int hugetlbfs_migrate_page(struct address_space *mapping, static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb); - struct hstate *h = hstate_inode(dentry->d_inode); + struct hstate *h = hstate_inode(d_inode(dentry)); buf->f_type = HUGETLBFS_MAGIC; buf->f_bsize = huge_page_size(h); @@ -721,7 +698,7 @@ static void init_once(void *foo) } const struct file_operations hugetlbfs_file_operations = { - .read = hugetlbfs_read, + .read_iter = hugetlbfs_read_iter, .mmap = hugetlbfs_file_mmap, .fsync = noop_fsync, .get_unmapped_area = hugetlb_get_unmapped_area, @@ -754,14 +731,38 @@ static const struct super_operations hugetlbfs_ops = { .show_options = generic_show_options, }; +enum { NO_SIZE, SIZE_STD, SIZE_PERCENT }; + +/* + * Convert size option passed from command line to number of huge pages + * in the pool specified by hstate. Size option could be in bytes + * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT). + */ +static long long +hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt, + int val_type) +{ + if (val_type == NO_SIZE) + return -1; + + if (val_type == SIZE_PERCENT) { + size_opt <<= huge_page_shift(h); + size_opt *= h->max_huge_pages; + do_div(size_opt, 100); + } + + size_opt >>= huge_page_shift(h); + return size_opt; +} + static int hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig) { char *p, *rest; substring_t args[MAX_OPT_ARGS]; int option; - unsigned long long size = 0; - enum { NO_SIZE, SIZE_STD, SIZE_PERCENT } setsize = NO_SIZE; + unsigned long long max_size_opt = 0, min_size_opt = 0; + int max_val_type = NO_SIZE, min_val_type = NO_SIZE; if (!options) return 0; @@ -799,10 +800,10 @@ hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig) /* memparse() will accept a K/M/G without a digit */ if (!isdigit(*args[0].from)) goto bad_val; - size = memparse(args[0].from, &rest); - setsize = SIZE_STD; + max_size_opt = memparse(args[0].from, &rest); + max_val_type = SIZE_STD; if (*rest == '%') - setsize = SIZE_PERCENT; + max_val_type = SIZE_PERCENT; break; } @@ -825,6 +826,17 @@ hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig) break; } + case Opt_min_size: { + /* memparse() will accept a K/M/G without a digit */ + if (!isdigit(*args[0].from)) + goto bad_val; + min_size_opt = memparse(args[0].from, &rest); + min_val_type = SIZE_STD; + if (*rest == '%') + min_val_type = SIZE_PERCENT; + break; + } + default: pr_err("Bad mount option: \"%s\"\n", p); return -EINVAL; @@ -832,15 +844,22 @@ hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig) } } - /* Do size after hstate is set up */ - if (setsize > NO_SIZE) { - struct hstate *h = pconfig->hstate; - if (setsize == SIZE_PERCENT) { - size <<= huge_page_shift(h); - size *= h->max_huge_pages; - do_div(size, 100); - } - pconfig->nr_blocks = (size >> huge_page_shift(h)); + /* + * Use huge page pool size (in hstate) to convert the size + * options to number of huge pages. If NO_SIZE, -1 is returned. + */ + pconfig->max_hpages = hugetlbfs_size_to_hpages(pconfig->hstate, + max_size_opt, max_val_type); + pconfig->min_hpages = hugetlbfs_size_to_hpages(pconfig->hstate, + min_size_opt, min_val_type); + + /* + * If max_size was specified, then min_size must be smaller + */ + if (max_val_type > NO_SIZE && + pconfig->min_hpages > pconfig->max_hpages) { + pr_err("minimum size can not be greater than maximum size\n"); + return -EINVAL; } return 0; @@ -859,12 +878,13 @@ hugetlbfs_fill_super(struct super_block *sb, void *data, int silent) save_mount_options(sb, data); - config.nr_blocks = -1; /* No limit on size by default */ + config.max_hpages = -1; /* No limit on size by default */ config.nr_inodes = -1; /* No limit on number of inodes by default */ config.uid = current_fsuid(); config.gid = current_fsgid(); config.mode = 0755; config.hstate = &default_hstate; + config.min_hpages = -1; /* No default minimum size */ ret = hugetlbfs_parse_options(data, &config); if (ret) return ret; @@ -878,8 +898,15 @@ hugetlbfs_fill_super(struct super_block *sb, void *data, int silent) sbinfo->max_inodes = config.nr_inodes; sbinfo->free_inodes = config.nr_inodes; sbinfo->spool = NULL; - if (config.nr_blocks != -1) { - sbinfo->spool = hugepage_new_subpool(config.nr_blocks); + /* + * Allocate and initialize subpool if maximum or minimum size is + * specified. Any needed reservations (for minimim size) are taken + * taken when the subpool is created. + */ + if (config.max_hpages != -1 || config.min_hpages != -1) { + sbinfo->spool = hugepage_new_subpool(config.hstate, + config.max_hpages, + config.min_hpages); if (!sbinfo->spool) goto out_free; } @@ -1587,7 +1587,7 @@ static int update_time(struct inode *inode, struct timespec *time, int flags) void touch_atime(const struct path *path) { struct vfsmount *mnt = path->mnt; - struct inode *inode = path->dentry->d_inode; + struct inode *inode = d_inode(path->dentry); struct timespec now; if (inode->i_flags & S_NOATIME) @@ -1639,7 +1639,7 @@ EXPORT_SYMBOL(touch_atime); */ int should_remove_suid(struct dentry *dentry) { - umode_t mode = dentry->d_inode->i_mode; + umode_t mode = d_inode(dentry)->i_mode; int kill = 0; /* suid always must be killed */ @@ -1675,7 +1675,7 @@ static int __remove_suid(struct dentry *dentry, int kill) int file_remove_suid(struct file *file) { struct dentry *dentry = file->f_path.dentry; - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); int killsuid; int killpriv; int error = 0; @@ -1946,20 +1946,6 @@ void inode_dio_wait(struct inode *inode) EXPORT_SYMBOL(inode_dio_wait); /* - * inode_dio_done - signal finish of a direct I/O requests - * @inode: inode the direct I/O happens on - * - * This is called once we've finished processing a direct I/O request, - * and is used to wake up callers waiting for direct I/O to be quiesced. - */ -void inode_dio_done(struct inode *inode) -{ - if (atomic_dec_and_test(&inode->i_dio_count)) - wake_up_bit(&inode->i_state, __I_DIO_WAKEUP); -} -EXPORT_SYMBOL(inode_dio_done); - -/* * inode_set_flags - atomically set some inode flags * * Note: the caller should be holding i_mutex, or else be sure that diff --git a/fs/isofs/export.c b/fs/isofs/export.c index 12088d8..0c5f721 100644 --- a/fs/isofs/export.c +++ b/fs/isofs/export.c @@ -44,7 +44,7 @@ static struct dentry *isofs_export_get_parent(struct dentry *child) { unsigned long parent_block = 0; unsigned long parent_offset = 0; - struct inode *child_inode = child->d_inode; + struct inode *child_inode = d_inode(child); struct iso_inode_info *e_child_inode = ISOFS_I(child_inode); struct iso_directory_record *de = NULL; struct buffer_head * bh = NULL; diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c index f21b6fb..1ba5c97 100644 --- a/fs/jffs2/dir.c +++ b/fs/jffs2/dir.c @@ -224,14 +224,14 @@ static int jffs2_unlink(struct inode *dir_i, struct dentry *dentry) { struct jffs2_sb_info *c = JFFS2_SB_INFO(dir_i->i_sb); struct jffs2_inode_info *dir_f = JFFS2_INODE_INFO(dir_i); - struct jffs2_inode_info *dead_f = JFFS2_INODE_INFO(dentry->d_inode); + struct jffs2_inode_info *dead_f = JFFS2_INODE_INFO(d_inode(dentry)); int ret; uint32_t now = get_seconds(); ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name, dentry->d_name.len, dead_f, now); if (dead_f->inocache) - set_nlink(dentry->d_inode, dead_f->inocache->pino_nlink); + set_nlink(d_inode(dentry), dead_f->inocache->pino_nlink); if (!ret) dir_i->i_mtime = dir_i->i_ctime = ITIME(now); return ret; @@ -241,8 +241,8 @@ static int jffs2_unlink(struct inode *dir_i, struct dentry *dentry) static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct dentry *dentry) { - struct jffs2_sb_info *c = JFFS2_SB_INFO(old_dentry->d_inode->i_sb); - struct jffs2_inode_info *f = JFFS2_INODE_INFO(old_dentry->d_inode); + struct jffs2_sb_info *c = JFFS2_SB_INFO(d_inode(old_dentry)->i_sb); + struct jffs2_inode_info *f = JFFS2_INODE_INFO(d_inode(old_dentry)); struct jffs2_inode_info *dir_f = JFFS2_INODE_INFO(dir_i); int ret; uint8_t type; @@ -256,7 +256,7 @@ static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct de return -EPERM; /* XXX: This is ugly */ - type = (old_dentry->d_inode->i_mode & S_IFMT) >> 12; + type = (d_inode(old_dentry)->i_mode & S_IFMT) >> 12; if (!type) type = DT_REG; now = get_seconds(); @@ -264,11 +264,11 @@ static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct de if (!ret) { mutex_lock(&f->sem); - set_nlink(old_dentry->d_inode, ++f->inocache->pino_nlink); + set_nlink(d_inode(old_dentry), ++f->inocache->pino_nlink); mutex_unlock(&f->sem); - d_instantiate(dentry, old_dentry->d_inode); + d_instantiate(dentry, d_inode(old_dentry)); dir_i->i_mtime = dir_i->i_ctime = ITIME(now); - ihold(old_dentry->d_inode); + ihold(d_inode(old_dentry)); } return ret; } @@ -585,7 +585,7 @@ static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry) { struct jffs2_sb_info *c = JFFS2_SB_INFO(dir_i->i_sb); struct jffs2_inode_info *dir_f = JFFS2_INODE_INFO(dir_i); - struct jffs2_inode_info *f = JFFS2_INODE_INFO(dentry->d_inode); + struct jffs2_inode_info *f = JFFS2_INODE_INFO(d_inode(dentry)); struct jffs2_full_dirent *fd; int ret; uint32_t now = get_seconds(); @@ -599,7 +599,7 @@ static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry) dentry->d_name.len, f, now); if (!ret) { dir_i->i_mtime = dir_i->i_ctime = ITIME(now); - clear_nlink(dentry->d_inode); + clear_nlink(d_inode(dentry)); drop_nlink(dir_i); } return ret; @@ -770,8 +770,8 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry, * the VFS can't check whether the victim is empty. The filesystem * needs to do that for itself. */ - if (new_dentry->d_inode) { - victim_f = JFFS2_INODE_INFO(new_dentry->d_inode); + if (d_really_is_positive(new_dentry)) { + victim_f = JFFS2_INODE_INFO(d_inode(new_dentry)); if (d_is_dir(new_dentry)) { struct jffs2_full_dirent *fd; @@ -794,12 +794,12 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry, /* Make a hard link */ /* XXX: This is ugly */ - type = (old_dentry->d_inode->i_mode & S_IFMT) >> 12; + type = (d_inode(old_dentry)->i_mode & S_IFMT) >> 12; if (!type) type = DT_REG; now = get_seconds(); ret = jffs2_do_link(c, JFFS2_INODE_INFO(new_dir_i), - old_dentry->d_inode->i_ino, type, + d_inode(old_dentry)->i_ino, type, new_dentry->d_name.name, new_dentry->d_name.len, now); if (ret) @@ -808,9 +808,9 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry, if (victim_f) { /* There was a victim. Kill it off nicely */ if (d_is_dir(new_dentry)) - clear_nlink(new_dentry->d_inode); + clear_nlink(d_inode(new_dentry)); else - drop_nlink(new_dentry->d_inode); + drop_nlink(d_inode(new_dentry)); /* Don't oops if the victim was a dirent pointing to an inode which didn't exist. */ if (victim_f->inocache) { @@ -836,9 +836,9 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry, if (ret) { /* Oh shit. We really ought to make a single node which can do both atomically */ - struct jffs2_inode_info *f = JFFS2_INODE_INFO(old_dentry->d_inode); + struct jffs2_inode_info *f = JFFS2_INODE_INFO(d_inode(old_dentry)); mutex_lock(&f->sem); - inc_nlink(old_dentry->d_inode); + inc_nlink(d_inode(old_dentry)); if (f->inocache && !d_is_dir(old_dentry)) f->inocache->pino_nlink++; mutex_unlock(&f->sem); @@ -846,8 +846,8 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry, pr_notice("%s(): Link succeeded, unlink failed (err %d). You now have a hard link\n", __func__, ret); /* Might as well let the VFS know */ - d_instantiate(new_dentry, old_dentry->d_inode); - ihold(old_dentry->d_inode); + d_instantiate(new_dentry, d_inode(old_dentry)); + ihold(d_inode(old_dentry)); new_dir_i->i_mtime = new_dir_i->i_ctime = ITIME(now); return ret; } diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c index 64989ca9..f509f62 100644 --- a/fs/jffs2/file.c +++ b/fs/jffs2/file.c @@ -51,9 +51,7 @@ const struct file_operations jffs2_file_operations = { .llseek = generic_file_llseek, .open = generic_file_open, - .read = new_sync_read, .read_iter = generic_file_read_iter, - .write = new_sync_write, .write_iter = generic_file_write_iter, .unlocked_ioctl=jffs2_ioctl, .mmap = generic_file_readonly_mmap, diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c index 601afd1..fe5ea08 100644 --- a/fs/jffs2/fs.c +++ b/fs/jffs2/fs.c @@ -190,7 +190,7 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) int jffs2_setattr(struct dentry *dentry, struct iattr *iattr) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); int rc; rc = inode_change_ok(inode, iattr); diff --git a/fs/jffs2/security.c b/fs/jffs2/security.c index aca97f3..d4b43fb 100644 --- a/fs/jffs2/security.c +++ b/fs/jffs2/security.c @@ -54,7 +54,7 @@ static int jffs2_security_getxattr(struct dentry *dentry, const char *name, if (!strcmp(name, "")) return -EINVAL; - return do_jffs2_getxattr(dentry->d_inode, JFFS2_XPREFIX_SECURITY, + return do_jffs2_getxattr(d_inode(dentry), JFFS2_XPREFIX_SECURITY, name, buffer, size); } @@ -64,7 +64,7 @@ static int jffs2_security_setxattr(struct dentry *dentry, const char *name, if (!strcmp(name, "")) return -EINVAL; - return do_jffs2_setxattr(dentry->d_inode, JFFS2_XPREFIX_SECURITY, + return do_jffs2_setxattr(d_inode(dentry), JFFS2_XPREFIX_SECURITY, name, buffer, size, flags); } diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c index 3d76f28..d86c5e3 100644 --- a/fs/jffs2/super.c +++ b/fs/jffs2/super.c @@ -140,14 +140,14 @@ static struct dentry *jffs2_get_parent(struct dentry *child) BUG_ON(!d_is_dir(child)); - f = JFFS2_INODE_INFO(child->d_inode); + f = JFFS2_INODE_INFO(d_inode(child)); pino = f->inocache->pino_nlink; JFFS2_DEBUG("Parent of directory ino #%u is #%u\n", f->inocache->ino, pino); - return d_obtain_alias(jffs2_iget(child->d_inode->i_sb, pino)); + return d_obtain_alias(jffs2_iget(d_inode(child)->i_sb, pino)); } static const struct export_operations jffs2_export_ops = { diff --git a/fs/jffs2/symlink.c b/fs/jffs2/symlink.c index c7c77b0..1fefa25 100644 --- a/fs/jffs2/symlink.c +++ b/fs/jffs2/symlink.c @@ -31,7 +31,7 @@ const struct inode_operations jffs2_symlink_inode_operations = static void *jffs2_follow_link(struct dentry *dentry, struct nameidata *nd) { - struct jffs2_inode_info *f = JFFS2_INODE_INFO(dentry->d_inode); + struct jffs2_inode_info *f = JFFS2_INODE_INFO(d_inode(dentry)); char *p = (char *)f->target; /* diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c index d72817a..f092fee 100644 --- a/fs/jffs2/xattr.c +++ b/fs/jffs2/xattr.c @@ -195,7 +195,7 @@ static int do_verify_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_dat /* unchecked xdatum is chained with c->xattr_unchecked */ list_del_init(&xd->xindex); - dbg_xattr("success on verfying xdatum (xid=%u, version=%u)\n", + dbg_xattr("success on verifying xdatum (xid=%u, version=%u)\n", xd->xid, xd->version); return 0; @@ -960,7 +960,7 @@ static const struct xattr_handler *xprefix_to_handler(int xprefix) { ssize_t jffs2_listxattr(struct dentry *dentry, char *buffer, size_t size) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); struct jffs2_inode_cache *ic = f->inocache; @@ -1266,7 +1266,6 @@ int jffs2_garbage_collect_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ if (rc) { JFFS2_WARNING("%s: jffs2_reserve_space_gc() = %d, request = %u\n", __func__, rc, totlen); - rc = rc ? rc : -EBADFD; goto out; } rc = save_xattr_ref(c, ref); diff --git a/fs/jffs2/xattr_trusted.c b/fs/jffs2/xattr_trusted.c index 1c86819..ceaf9c6 100644 --- a/fs/jffs2/xattr_trusted.c +++ b/fs/jffs2/xattr_trusted.c @@ -21,7 +21,7 @@ static int jffs2_trusted_getxattr(struct dentry *dentry, const char *name, { if (!strcmp(name, "")) return -EINVAL; - return do_jffs2_getxattr(dentry->d_inode, JFFS2_XPREFIX_TRUSTED, + return do_jffs2_getxattr(d_inode(dentry), JFFS2_XPREFIX_TRUSTED, name, buffer, size); } @@ -30,7 +30,7 @@ static int jffs2_trusted_setxattr(struct dentry *dentry, const char *name, { if (!strcmp(name, "")) return -EINVAL; - return do_jffs2_setxattr(dentry->d_inode, JFFS2_XPREFIX_TRUSTED, + return do_jffs2_setxattr(d_inode(dentry), JFFS2_XPREFIX_TRUSTED, name, buffer, size, flags); } diff --git a/fs/jffs2/xattr_user.c b/fs/jffs2/xattr_user.c index 916b5c9..a71391e 100644 --- a/fs/jffs2/xattr_user.c +++ b/fs/jffs2/xattr_user.c @@ -21,7 +21,7 @@ static int jffs2_user_getxattr(struct dentry *dentry, const char *name, { if (!strcmp(name, "")) return -EINVAL; - return do_jffs2_getxattr(dentry->d_inode, JFFS2_XPREFIX_USER, + return do_jffs2_getxattr(d_inode(dentry), JFFS2_XPREFIX_USER, name, buffer, size); } @@ -30,7 +30,7 @@ static int jffs2_user_setxattr(struct dentry *dentry, const char *name, { if (!strcmp(name, "")) return -EINVAL; - return do_jffs2_setxattr(dentry->d_inode, JFFS2_XPREFIX_USER, + return do_jffs2_setxattr(d_inode(dentry), JFFS2_XPREFIX_USER, name, buffer, size, flags); } diff --git a/fs/jfs/file.c b/fs/jfs/file.c index 10815f8..e98d39d 100644 --- a/fs/jfs/file.c +++ b/fs/jfs/file.c @@ -100,7 +100,7 @@ static int jfs_release(struct inode *inode, struct file *file) int jfs_setattr(struct dentry *dentry, struct iattr *iattr) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); int rc; rc = inode_change_ok(inode, iattr); @@ -151,8 +151,6 @@ const struct inode_operations jfs_file_inode_operations = { const struct file_operations jfs_file_operations = { .open = jfs_open, .llseek = generic_file_llseek, - .write = new_sync_write, - .read = new_sync_read, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, .mmap = generic_file_mmap, diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index bd3df1c..070dc4b 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c @@ -22,8 +22,8 @@ #include <linux/buffer_head.h> #include <linux/pagemap.h> #include <linux/quotaops.h> +#include <linux/uio.h> #include <linux/writeback.h> -#include <linux/aio.h> #include "jfs_incore.h" #include "jfs_inode.h" #include "jfs_filsys.h" @@ -330,8 +330,8 @@ static sector_t jfs_bmap(struct address_space *mapping, sector_t block) return generic_block_bmap(mapping, block, jfs_get_block); } -static ssize_t jfs_direct_IO(int rw, struct kiocb *iocb, - struct iov_iter *iter, loff_t offset) +static ssize_t jfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, + loff_t offset) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; @@ -339,13 +339,13 @@ static ssize_t jfs_direct_IO(int rw, struct kiocb *iocb, size_t count = iov_iter_count(iter); ssize_t ret; - ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, jfs_get_block); + ret = blockdev_direct_IO(iocb, inode, iter, offset, jfs_get_block); /* * In case of error extending write may have instantiated a few * blocks outside i_size. Trim these off again. */ - if (unlikely((rw & WRITE) && ret < 0)) { + if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) { loff_t isize = i_size_read(inode); loff_t end = offset + count; diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c index 49ba7ff..16a0922 100644 --- a/fs/jfs/jfs_metapage.c +++ b/fs/jfs/jfs_metapage.c @@ -183,30 +183,23 @@ static inline void remove_metapage(struct page *page, struct metapage *mp) #endif -static void init_once(void *foo) -{ - struct metapage *mp = (struct metapage *)foo; - - mp->lid = 0; - mp->lsn = 0; - mp->flag = 0; - mp->data = NULL; - mp->clsn = 0; - mp->log = NULL; - set_bit(META_free, &mp->flag); - init_waitqueue_head(&mp->wait); -} - static inline struct metapage *alloc_metapage(gfp_t gfp_mask) { - return mempool_alloc(metapage_mempool, gfp_mask); + struct metapage *mp = mempool_alloc(metapage_mempool, gfp_mask); + + if (mp) { + mp->lid = 0; + mp->lsn = 0; + mp->data = NULL; + mp->clsn = 0; + mp->log = NULL; + init_waitqueue_head(&mp->wait); + } + return mp; } static inline void free_metapage(struct metapage *mp) { - mp->flag = 0; - set_bit(META_free, &mp->flag); - mempool_free(mp, metapage_mempool); } @@ -216,7 +209,7 @@ int __init metapage_init(void) * Allocate the metapage structures */ metapage_cache = kmem_cache_create("jfs_mp", sizeof(struct metapage), - 0, 0, init_once); + 0, 0, NULL); if (metapage_cache == NULL) return -ENOMEM; diff --git a/fs/jfs/jfs_metapage.h b/fs/jfs/jfs_metapage.h index a78beda..337e9e5 100644 --- a/fs/jfs/jfs_metapage.h +++ b/fs/jfs/jfs_metapage.h @@ -48,7 +48,6 @@ struct metapage { /* metapage flag */ #define META_locked 0 -#define META_free 1 #define META_dirty 2 #define META_sync 3 #define META_discard 4 diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c index 38fdc53..66db7bc 100644 --- a/fs/jfs/namei.c +++ b/fs/jfs/namei.c @@ -346,7 +346,7 @@ static int jfs_rmdir(struct inode *dip, struct dentry *dentry) { int rc; tid_t tid; /* transaction id */ - struct inode *ip = dentry->d_inode; + struct inode *ip = d_inode(dentry); ino_t ino; struct component_name dname; struct inode *iplist[2]; @@ -472,7 +472,7 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry) { int rc; tid_t tid; /* transaction id */ - struct inode *ip = dentry->d_inode; + struct inode *ip = d_inode(dentry); ino_t ino; struct component_name dname; /* object name */ struct inode *iplist[2]; @@ -791,7 +791,7 @@ static int jfs_link(struct dentry *old_dentry, { int rc; tid_t tid; - struct inode *ip = old_dentry->d_inode; + struct inode *ip = d_inode(old_dentry); ino_t ino; struct component_name dname; struct btstack btstack; @@ -879,7 +879,7 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry, struct component_name dname; int ssize; /* source pathname size */ struct btstack btstack; - struct inode *ip = dentry->d_inode; + struct inode *ip = d_inode(dentry); unchar *i_fastsymlink; s64 xlen = 0; int bmask = 0, xsize; @@ -1086,8 +1086,8 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry, dquot_initialize(old_dir); dquot_initialize(new_dir); - old_ip = old_dentry->d_inode; - new_ip = new_dentry->d_inode; + old_ip = d_inode(old_dentry); + new_ip = d_inode(new_dentry); if ((rc = get_UCSname(&old_dname, old_dentry))) goto out1; @@ -1500,9 +1500,9 @@ struct dentry *jfs_get_parent(struct dentry *dentry) unsigned long parent_ino; parent_ino = - le32_to_cpu(JFS_IP(dentry->d_inode)->i_dtroot.header.idotdot); + le32_to_cpu(JFS_IP(d_inode(dentry))->i_dtroot.header.idotdot); - return d_obtain_alias(jfs_iget(dentry->d_inode->i_sb, parent_ino)); + return d_obtain_alias(jfs_iget(d_inode(dentry)->i_sb, parent_ino)); } const struct inode_operations jfs_dir_inode_operations = { @@ -1578,7 +1578,7 @@ static int jfs_ci_revalidate(struct dentry *dentry, unsigned int flags) * positive dentry isn't good idea. So it's unsupported like * rename("filename", "FILENAME") for now. */ - if (dentry->d_inode) + if (d_really_is_positive(dentry)) return 1; /* diff --git a/fs/jfs/super.c b/fs/jfs/super.c index 5d30c56..4cd9798f 100644 --- a/fs/jfs/super.c +++ b/fs/jfs/super.c @@ -102,7 +102,7 @@ void jfs_error(struct super_block *sb, const char *fmt, ...) vaf.fmt = fmt; vaf.va = &args; - pr_err("ERROR: (device %s): %pf: %pV\n", + pr_err("ERROR: (device %s): %ps: %pV\n", sb->s_id, __builtin_return_address(0), &vaf); va_end(args); diff --git a/fs/jfs/symlink.c b/fs/jfs/symlink.c index 205b946..80f42bc 100644 --- a/fs/jfs/symlink.c +++ b/fs/jfs/symlink.c @@ -24,7 +24,7 @@ static void *jfs_follow_link(struct dentry *dentry, struct nameidata *nd) { - char *s = JFS_IP(dentry->d_inode)->i_inline; + char *s = JFS_IP(d_inode(dentry))->i_inline; nd_set_link(nd, s); return NULL; } diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c index 46325d5..48b15a6 100644 --- a/fs/jfs/xattr.c +++ b/fs/jfs/xattr.c @@ -849,7 +849,7 @@ int __jfs_setxattr(tid_t tid, struct inode *inode, const char *name, int jfs_setxattr(struct dentry *dentry, const char *name, const void *value, size_t value_len, int flags) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct jfs_inode_info *ji = JFS_IP(inode); int rc; tid_t tid; @@ -872,7 +872,7 @@ int jfs_setxattr(struct dentry *dentry, const char *name, const void *value, tid = txBegin(inode->i_sb, 0); mutex_lock(&ji->commit_mutex); - rc = __jfs_setxattr(tid, dentry->d_inode, name, value, value_len, + rc = __jfs_setxattr(tid, d_inode(dentry), name, value, value_len, flags); if (!rc) rc = txCommit(tid, 1, &inode, 0); @@ -959,7 +959,7 @@ ssize_t jfs_getxattr(struct dentry *dentry, const char *name, void *data, return -EOPNOTSUPP; } - err = __jfs_getxattr(dentry->d_inode, name, data, buf_size); + err = __jfs_getxattr(d_inode(dentry), name, data, buf_size); return err; } @@ -976,7 +976,7 @@ static inline int can_list(struct jfs_ea *ea) ssize_t jfs_listxattr(struct dentry * dentry, char *data, size_t buf_size) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); char *buffer; ssize_t size = 0; int xattr_size; @@ -1029,7 +1029,7 @@ ssize_t jfs_listxattr(struct dentry * dentry, char *data, size_t buf_size) int jfs_removexattr(struct dentry *dentry, const char *name) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct jfs_inode_info *ji = JFS_IP(inode); int rc; tid_t tid; @@ -1047,7 +1047,7 @@ int jfs_removexattr(struct dentry *dentry, const char *name) tid = txBegin(inode->i_sb, 0); mutex_lock(&ji->commit_mutex); - rc = __jfs_setxattr(tid, dentry->d_inode, name, NULL, 0, XATTR_REPLACE); + rc = __jfs_setxattr(tid, d_inode(dentry), name, NULL, 0, XATTR_REPLACE); if (!rc) rc = txCommit(tid, 1, &inode, 0); txEnd(tid); diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c index 6acc964..f131fc2 100644 --- a/fs/kernfs/dir.c +++ b/fs/kernfs/dir.c @@ -444,7 +444,7 @@ static int kernfs_dop_revalidate(struct dentry *dentry, unsigned int flags) return -ECHILD; /* Always perform fresh lookup for negatives */ - if (!dentry->d_inode) + if (d_really_is_negative(dentry)) goto out_bad_unlocked; kn = dentry->d_fsdata; diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c index 9000874..2da8493 100644 --- a/fs/kernfs/inode.c +++ b/fs/kernfs/inode.c @@ -111,7 +111,7 @@ int kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr) int kernfs_iop_setattr(struct dentry *dentry, struct iattr *iattr) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct kernfs_node *kn = dentry->d_fsdata; int error; @@ -172,11 +172,11 @@ int kernfs_iop_setxattr(struct dentry *dentry, const char *name, if (!strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN)) { const char *suffix = name + XATTR_SECURITY_PREFIX_LEN; - error = security_inode_setsecurity(dentry->d_inode, suffix, + error = security_inode_setsecurity(d_inode(dentry), suffix, value, size, flags); if (error) return error; - error = security_inode_getsecctx(dentry->d_inode, + error = security_inode_getsecctx(d_inode(dentry), &secdata, &secdata_len); if (error) return error; @@ -271,7 +271,7 @@ int kernfs_iop_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { struct kernfs_node *kn = dentry->d_fsdata; - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); mutex_lock(&kernfs_mutex); kernfs_refresh_inode(kn, inode); @@ -22,13 +22,13 @@ static inline int simple_positive(struct dentry *dentry) { - return dentry->d_inode && !d_unhashed(dentry); + return d_really_is_positive(dentry) && !d_unhashed(dentry); } int simple_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); generic_fillattr(inode, stat); stat->blocks = inode->i_mapping->nrpages << (PAGE_CACHE_SHIFT - 9); return 0; @@ -94,7 +94,7 @@ EXPORT_SYMBOL(dcache_dir_close); loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence) { struct dentry *dentry = file->f_path.dentry; - mutex_lock(&dentry->d_inode->i_mutex); + mutex_lock(&d_inode(dentry)->i_mutex); switch (whence) { case 1: offset += file->f_pos; @@ -102,7 +102,7 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence) if (offset >= 0) break; default: - mutex_unlock(&dentry->d_inode->i_mutex); + mutex_unlock(&d_inode(dentry)->i_mutex); return -EINVAL; } if (offset != file->f_pos) { @@ -129,7 +129,7 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence) spin_unlock(&dentry->d_lock); } } - mutex_unlock(&dentry->d_inode->i_mutex); + mutex_unlock(&d_inode(dentry)->i_mutex); return offset; } EXPORT_SYMBOL(dcache_dir_lseek); @@ -169,7 +169,7 @@ int dcache_readdir(struct file *file, struct dir_context *ctx) spin_unlock(&next->d_lock); spin_unlock(&dentry->d_lock); if (!dir_emit(ctx, next->d_name.name, next->d_name.len, - next->d_inode->i_ino, dt_type(next->d_inode))) + d_inode(next)->i_ino, dt_type(d_inode(next)))) return 0; spin_lock(&dentry->d_lock); spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED); @@ -270,7 +270,7 @@ EXPORT_SYMBOL(simple_open); int simple_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { - struct inode *inode = old_dentry->d_inode; + struct inode *inode = d_inode(old_dentry); inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; inc_nlink(inode); @@ -304,7 +304,7 @@ EXPORT_SYMBOL(simple_empty); int simple_unlink(struct inode *dir, struct dentry *dentry) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; drop_nlink(inode); @@ -318,7 +318,7 @@ int simple_rmdir(struct inode *dir, struct dentry *dentry) if (!simple_empty(dentry)) return -ENOTEMPTY; - drop_nlink(dentry->d_inode); + drop_nlink(d_inode(dentry)); simple_unlink(dir, dentry); drop_nlink(dir); return 0; @@ -328,16 +328,16 @@ EXPORT_SYMBOL(simple_rmdir); int simple_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { - struct inode *inode = old_dentry->d_inode; + struct inode *inode = d_inode(old_dentry); int they_are_dirs = d_is_dir(old_dentry); if (!simple_empty(new_dentry)) return -ENOTEMPTY; - if (new_dentry->d_inode) { + if (d_really_is_positive(new_dentry)) { simple_unlink(new_dir, new_dentry); if (they_are_dirs) { - drop_nlink(new_dentry->d_inode); + drop_nlink(d_inode(new_dentry)); drop_nlink(old_dir); } } else if (they_are_dirs) { @@ -368,7 +368,7 @@ EXPORT_SYMBOL(simple_rename); */ int simple_setattr(struct dentry *dentry, struct iattr *iattr) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); int error; error = inode_change_ok(inode, iattr); diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c index 665ef5a..a563ddb 100644 --- a/fs/lockd/svcsubs.c +++ b/fs/lockd/svcsubs.c @@ -31,7 +31,7 @@ static struct hlist_head nlm_files[FILE_NRHASH]; static DEFINE_MUTEX(nlm_file_mutex); -#ifdef NFSD_DEBUG +#ifdef CONFIG_SUNRPC_DEBUG static inline void nlm_debug_print_fh(char *msg, struct nfs_fh *f) { u32 *fhp = (u32*)f->data; @@ -203,11 +203,11 @@ static struct kmem_cache *flctx_cache __read_mostly; static struct kmem_cache *filelock_cache __read_mostly; static struct file_lock_context * -locks_get_lock_context(struct inode *inode) +locks_get_lock_context(struct inode *inode, int type) { struct file_lock_context *new; - if (likely(inode->i_flctx)) + if (likely(inode->i_flctx) || type == F_UNLCK) goto out; new = kmem_cache_alloc(flctx_cache, GFP_KERNEL); @@ -223,14 +223,7 @@ locks_get_lock_context(struct inode *inode) * Assign the pointer if it's not already assigned. If it is, then * free the context we just allocated. */ - spin_lock(&inode->i_lock); - if (likely(!inode->i_flctx)) { - inode->i_flctx = new; - new = NULL; - } - spin_unlock(&inode->i_lock); - - if (new) + if (cmpxchg(&inode->i_flctx, NULL, new)) kmem_cache_free(flctx_cache, new); out: return inode->i_flctx; @@ -276,8 +269,10 @@ void locks_release_private(struct file_lock *fl) } if (fl->fl_lmops) { - if (fl->fl_lmops->lm_put_owner) - fl->fl_lmops->lm_put_owner(fl); + if (fl->fl_lmops->lm_put_owner) { + fl->fl_lmops->lm_put_owner(fl->fl_owner); + fl->fl_owner = NULL; + } fl->fl_lmops = NULL; } } @@ -333,7 +328,7 @@ void locks_copy_conflock(struct file_lock *new, struct file_lock *fl) if (fl->fl_lmops) { if (fl->fl_lmops->lm_get_owner) - fl->fl_lmops->lm_get_owner(new, fl); + fl->fl_lmops->lm_get_owner(fl->fl_owner); } } EXPORT_SYMBOL(locks_copy_conflock); @@ -592,11 +587,15 @@ posix_owner_key(struct file_lock *fl) static void locks_insert_global_blocked(struct file_lock *waiter) { + lockdep_assert_held(&blocked_lock_lock); + hash_add(blocked_hash, &waiter->fl_link, posix_owner_key(waiter)); } static void locks_delete_global_blocked(struct file_lock *waiter) { + lockdep_assert_held(&blocked_lock_lock); + hash_del(&waiter->fl_link); } @@ -730,7 +729,7 @@ static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *s /* POSIX locks owned by the same process do not conflict with * each other. */ - if (!IS_POSIX(sys_fl) || posix_same_owner(caller_fl, sys_fl)) + if (posix_same_owner(caller_fl, sys_fl)) return (0); /* Check whether they overlap */ @@ -748,7 +747,7 @@ static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *s /* FLOCK locks referring to the same filp do not conflict with * each other. */ - if (!IS_FLOCK(sys_fl) || (caller_fl->fl_file == sys_fl->fl_file)) + if (caller_fl->fl_file == sys_fl->fl_file) return (0); if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND)) return 0; @@ -838,6 +837,8 @@ static int posix_locks_deadlock(struct file_lock *caller_fl, { int i = 0; + lockdep_assert_held(&blocked_lock_lock); + /* * This deadlock detector can't reasonably detect deadlocks with * FL_OFDLCK locks, since they aren't owned by a process, per-se. @@ -871,9 +872,12 @@ static int flock_lock_file(struct file *filp, struct file_lock *request) bool found = false; LIST_HEAD(dispose); - ctx = locks_get_lock_context(inode); - if (!ctx) - return -ENOMEM; + ctx = locks_get_lock_context(inode, request->fl_type); + if (!ctx) { + if (request->fl_type != F_UNLCK) + return -ENOMEM; + return (request->fl_flags & FL_EXISTS) ? -ENOENT : 0; + } if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) { new_fl = locks_alloc_lock(); @@ -939,9 +943,9 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str bool added = false; LIST_HEAD(dispose); - ctx = locks_get_lock_context(inode); + ctx = locks_get_lock_context(inode, request->fl_type); if (!ctx) - return -ENOMEM; + return (request->fl_type == F_UNLCK) ? 0 : -ENOMEM; /* * We may need two file_lock structures for this operation, @@ -964,8 +968,6 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str */ if (request->fl_type != F_UNLCK) { list_for_each_entry(fl, &ctx->flc_posix, fl_list) { - if (!IS_POSIX(fl)) - continue; if (!posix_locks_conflict(request, fl)) continue; if (conflock) @@ -1388,9 +1390,8 @@ any_leases_conflict(struct inode *inode, struct file_lock *breaker) int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) { int error = 0; - struct file_lock *new_fl; struct file_lock_context *ctx = inode->i_flctx; - struct file_lock *fl; + struct file_lock *new_fl, *fl, *tmp; unsigned long break_time; int want_write = (mode & O_ACCMODE) != O_RDONLY; LIST_HEAD(dispose); @@ -1420,7 +1421,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) break_time++; /* so that 0 means no break time */ } - list_for_each_entry(fl, &ctx->flc_lease, fl_list) { + list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) { if (!leases_conflict(fl, new_fl)) continue; if (want_write) { @@ -1606,7 +1607,8 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr lease = *flp; trace_generic_add_lease(inode, lease); - ctx = locks_get_lock_context(inode); + /* Note that arg is never F_UNLCK here */ + ctx = locks_get_lock_context(inode, arg); if (!ctx) return -ENOMEM; @@ -2556,15 +2558,10 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl, : (fl->fl_type == F_WRLCK) ? "WRITE" : "READ "); } if (inode) { -#ifdef WE_CAN_BREAK_LSLK_NOW - seq_printf(f, "%d %s:%ld ", fl_pid, - inode->i_sb->s_id, inode->i_ino); -#else - /* userspace relies on this representation of dev_t ;-( */ + /* userspace relies on this representation of dev_t */ seq_printf(f, "%d %02x:%02x:%ld ", fl_pid, MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev), inode->i_ino); -#endif } else { seq_printf(f, "%d <none>:0 ", fl_pid); } @@ -2593,6 +2590,44 @@ static int locks_show(struct seq_file *f, void *v) return 0; } +static void __show_fd_locks(struct seq_file *f, + struct list_head *head, int *id, + struct file *filp, struct files_struct *files) +{ + struct file_lock *fl; + + list_for_each_entry(fl, head, fl_list) { + + if (filp != fl->fl_file) + continue; + if (fl->fl_owner != files && + fl->fl_owner != filp) + continue; + + (*id)++; + seq_puts(f, "lock:\t"); + lock_get_status(f, fl, *id, ""); + } +} + +void show_fd_locks(struct seq_file *f, + struct file *filp, struct files_struct *files) +{ + struct inode *inode = file_inode(filp); + struct file_lock_context *ctx; + int id = 0; + + ctx = inode->i_flctx; + if (!ctx) + return; + + spin_lock(&ctx->flc_lock); + __show_fd_locks(f, &ctx->flc_flock, &id, filp, files); + __show_fd_locks(f, &ctx->flc_posix, &id, filp, files); + __show_fd_locks(f, &ctx->flc_lease, &id, filp, files); + spin_unlock(&ctx->flc_lock); +} + static void *locks_start(struct seq_file *f, loff_t *pos) __acquires(&blocked_lock_lock) { diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c index 6bdc347..4cf38f1 100644 --- a/fs/logfs/dir.c +++ b/fs/logfs/dir.c @@ -213,7 +213,7 @@ static void abort_transaction(struct inode *inode, struct logfs_transaction *ta) static int logfs_unlink(struct inode *dir, struct dentry *dentry) { struct logfs_super *super = logfs_super(dir->i_sb); - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct logfs_transaction *ta; struct page *page; pgoff_t index; @@ -271,7 +271,7 @@ static inline int logfs_empty_dir(struct inode *dir) static int logfs_rmdir(struct inode *dir, struct dentry *dentry) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); if (!logfs_empty_dir(inode)) return -ENOTEMPTY; @@ -537,7 +537,7 @@ static int logfs_symlink(struct inode *dir, struct dentry *dentry, static int logfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { - struct inode *inode = old_dentry->d_inode; + struct inode *inode = d_inode(old_dentry); inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; ihold(inode); @@ -607,7 +607,7 @@ static int logfs_rename_cross(struct inode *old_dir, struct dentry *old_dentry, /* 2. write target dd */ mutex_lock(&super->s_dirop_mutex); logfs_add_transaction(new_dir, ta); - err = logfs_write_dir(new_dir, new_dentry, old_dentry->d_inode); + err = logfs_write_dir(new_dir, new_dentry, d_inode(old_dentry)); if (!err) err = write_inode(new_dir); @@ -658,8 +658,8 @@ static int logfs_rename_target(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct logfs_super *super = logfs_super(old_dir->i_sb); - struct inode *old_inode = old_dentry->d_inode; - struct inode *new_inode = new_dentry->d_inode; + struct inode *old_inode = d_inode(old_dentry); + struct inode *new_inode = d_inode(new_dentry); int isdir = S_ISDIR(old_inode->i_mode); struct logfs_disk_dentry dd; struct logfs_transaction *ta; @@ -719,7 +719,7 @@ out: static int logfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { - if (new_dentry->d_inode) + if (d_really_is_positive(new_dentry)) return logfs_rename_target(old_dir, old_dentry, new_dir, new_dentry); return logfs_rename_cross(old_dir, old_dentry, new_dir, new_dentry); diff --git a/fs/logfs/file.c b/fs/logfs/file.c index 8538752..1a6f016 100644 --- a/fs/logfs/file.c +++ b/fs/logfs/file.c @@ -241,7 +241,7 @@ int logfs_fsync(struct file *file, loff_t start, loff_t end, int datasync) static int logfs_setattr(struct dentry *dentry, struct iattr *attr) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); int err = 0; err = inode_change_ok(inode, attr); @@ -271,8 +271,6 @@ const struct file_operations logfs_reg_fops = { .llseek = generic_file_llseek, .mmap = generic_file_readonly_mmap, .open = generic_file_open, - .read = new_sync_read, - .write = new_sync_write, }; const struct address_space_operations logfs_reg_aops = { diff --git a/fs/minix/dir.c b/fs/minix/dir.c index dfaf6fa..118e4e7 100644 --- a/fs/minix/dir.c +++ b/fs/minix/dir.c @@ -156,7 +156,7 @@ minix_dirent *minix_find_entry(struct dentry *dentry, struct page **res_page) { const char * name = dentry->d_name.name; int namelen = dentry->d_name.len; - struct inode * dir = dentry->d_parent->d_inode; + struct inode * dir = d_inode(dentry->d_parent); struct super_block * sb = dir->i_sb; struct minix_sb_info * sbi = minix_sb(sb); unsigned long n; @@ -203,7 +203,7 @@ found: int minix_add_link(struct dentry *dentry, struct inode *inode) { - struct inode *dir = dentry->d_parent->d_inode; + struct inode *dir = d_inode(dentry->d_parent); const char * name = dentry->d_name.name; int namelen = dentry->d_name.len; struct super_block * sb = dir->i_sb; diff --git a/fs/minix/file.c b/fs/minix/file.c index a967de0..94f0eb9a 100644 --- a/fs/minix/file.c +++ b/fs/minix/file.c @@ -14,9 +14,7 @@ */ const struct file_operations minix_file_operations = { .llseek = generic_file_llseek, - .read = new_sync_read, .read_iter = generic_file_read_iter, - .write = new_sync_write, .write_iter = generic_file_write_iter, .mmap = generic_file_mmap, .fsync = generic_file_fsync, @@ -25,7 +23,7 @@ const struct file_operations minix_file_operations = { static int minix_setattr(struct dentry *dentry, struct iattr *attr) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); int error; error = inode_change_ok(inode, attr); diff --git a/fs/minix/inode.c b/fs/minix/inode.c index 3f57af1..1182d1e 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c @@ -626,8 +626,8 @@ static int minix_write_inode(struct inode *inode, struct writeback_control *wbc) int minix_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { struct super_block *sb = dentry->d_sb; - generic_fillattr(dentry->d_inode, stat); - if (INODE_VERSION(dentry->d_inode) == MINIX_V1) + generic_fillattr(d_inode(dentry), stat); + if (INODE_VERSION(d_inode(dentry)) == MINIX_V1) stat->blocks = (BLOCK_SIZE / 512) * V1_minix_blocks(stat->size, sb); else stat->blocks = (sb->s_blocksize / 512) * V2_minix_blocks(stat->size, sb); diff --git a/fs/minix/namei.c b/fs/minix/namei.c index cd950e2..a795a11 100644 --- a/fs/minix/namei.c +++ b/fs/minix/namei.c @@ -104,7 +104,7 @@ out_fail: static int minix_link(struct dentry * old_dentry, struct inode * dir, struct dentry *dentry) { - struct inode *inode = old_dentry->d_inode; + struct inode *inode = d_inode(old_dentry); inode->i_ctime = CURRENT_TIME_SEC; inode_inc_link_count(inode); @@ -151,7 +151,7 @@ out_dir: static int minix_unlink(struct inode * dir, struct dentry *dentry) { int err = -ENOENT; - struct inode * inode = dentry->d_inode; + struct inode * inode = d_inode(dentry); struct page * page; struct minix_dir_entry * de; @@ -171,7 +171,7 @@ end_unlink: static int minix_rmdir(struct inode * dir, struct dentry *dentry) { - struct inode * inode = dentry->d_inode; + struct inode * inode = d_inode(dentry); int err = -ENOTEMPTY; if (minix_empty_dir(inode)) { @@ -187,8 +187,8 @@ static int minix_rmdir(struct inode * dir, struct dentry *dentry) static int minix_rename(struct inode * old_dir, struct dentry *old_dentry, struct inode * new_dir, struct dentry *new_dentry) { - struct inode * old_inode = old_dentry->d_inode; - struct inode * new_inode = new_dentry->d_inode; + struct inode * old_inode = d_inode(old_dentry); + struct inode * new_inode = d_inode(new_dentry); struct page * dir_page = NULL; struct minix_dir_entry * dir_de = NULL; struct page * old_page; @@ -119,15 +119,14 @@ * PATH_MAX includes the nul terminator --RR. */ -#define EMBEDDED_NAME_MAX (PATH_MAX - sizeof(struct filename)) +#define EMBEDDED_NAME_MAX (PATH_MAX - offsetof(struct filename, iname)) struct filename * getname_flags(const char __user *filename, int flags, int *empty) { - struct filename *result, *err; - int len; - long max; + struct filename *result; char *kname; + int len; result = audit_reusename(filename); if (result) @@ -136,22 +135,18 @@ getname_flags(const char __user *filename, int flags, int *empty) result = __getname(); if (unlikely(!result)) return ERR_PTR(-ENOMEM); - result->refcnt = 1; /* * First, try to embed the struct filename inside the names_cache * allocation */ - kname = (char *)result + sizeof(*result); + kname = (char *)result->iname; result->name = kname; - result->separate = false; - max = EMBEDDED_NAME_MAX; -recopy: - len = strncpy_from_user(kname, filename, max); + len = strncpy_from_user(kname, filename, EMBEDDED_NAME_MAX); if (unlikely(len < 0)) { - err = ERR_PTR(len); - goto error; + __putname(result); + return ERR_PTR(len); } /* @@ -160,43 +155,49 @@ recopy: * names_cache allocation for the pathname, and re-do the copy from * userland. */ - if (len == EMBEDDED_NAME_MAX && max == EMBEDDED_NAME_MAX) { + if (unlikely(len == EMBEDDED_NAME_MAX)) { + const size_t size = offsetof(struct filename, iname[1]); kname = (char *)result; - result = kzalloc(sizeof(*result), GFP_KERNEL); - if (!result) { - err = ERR_PTR(-ENOMEM); - result = (struct filename *)kname; - goto error; + /* + * size is chosen that way we to guarantee that + * result->iname[0] is within the same object and that + * kname can't be equal to result->iname, no matter what. + */ + result = kzalloc(size, GFP_KERNEL); + if (unlikely(!result)) { + __putname(kname); + return ERR_PTR(-ENOMEM); } result->name = kname; - result->separate = true; - result->refcnt = 1; - max = PATH_MAX; - goto recopy; + len = strncpy_from_user(kname, filename, PATH_MAX); + if (unlikely(len < 0)) { + __putname(kname); + kfree(result); + return ERR_PTR(len); + } + if (unlikely(len == PATH_MAX)) { + __putname(kname); + kfree(result); + return ERR_PTR(-ENAMETOOLONG); + } } + result->refcnt = 1; /* The empty path is special. */ if (unlikely(!len)) { if (empty) *empty = 1; - err = ERR_PTR(-ENOENT); - if (!(flags & LOOKUP_EMPTY)) - goto error; + if (!(flags & LOOKUP_EMPTY)) { + putname(result); + return ERR_PTR(-ENOENT); + } } - err = ERR_PTR(-ENAMETOOLONG); - if (unlikely(len >= PATH_MAX)) - goto error; - result->uptr = filename; result->aname = NULL; audit_getname(result); return result; - -error: - putname(result); - return err; } struct filename * @@ -216,8 +217,7 @@ getname_kernel(const char * filename) return ERR_PTR(-ENOMEM); if (len <= EMBEDDED_NAME_MAX) { - result->name = (char *)(result) + sizeof(*result); - result->separate = false; + result->name = (char *)result->iname; } else if (len <= PATH_MAX) { struct filename *tmp; @@ -227,7 +227,6 @@ getname_kernel(const char * filename) return ERR_PTR(-ENOMEM); } tmp->name = (char *)result; - tmp->separate = true; result = tmp; } else { __putname(result); @@ -249,7 +248,7 @@ void putname(struct filename *name) if (--name->refcnt > 0) return; - if (name->separate) { + if (name->name != name->iname) { __putname(name->name); kfree(name); } else @@ -1586,12 +1585,13 @@ static inline int walk_component(struct nameidata *nd, struct path *path, inode = path->dentry->d_inode; } err = -ENOENT; - if (!inode || d_is_negative(path->dentry)) + if (d_is_negative(path->dentry)) goto out_path_put; if (should_follow_link(path->dentry, follow)) { if (nd->flags & LOOKUP_RCU) { - if (unlikely(unlazy_walk(nd, path->dentry))) { + if (unlikely(nd->path.mnt != path->mnt || + unlazy_walk(nd, path->dentry))) { err = -ECHILD; goto out_err; } @@ -1851,10 +1851,11 @@ static int link_path_walk(const char *name, struct nameidata *nd) return err; } -static int path_init(int dfd, const char *name, unsigned int flags, +static int path_init(int dfd, const struct filename *name, unsigned int flags, struct nameidata *nd) { int retval = 0; + const char *s = name->name; nd->last_type = LAST_ROOT; /* if there are only slashes... */ nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT; @@ -1863,7 +1864,7 @@ static int path_init(int dfd, const char *name, unsigned int flags, if (flags & LOOKUP_ROOT) { struct dentry *root = nd->root.dentry; struct inode *inode = root->d_inode; - if (*name) { + if (*s) { if (!d_can_lookup(root)) return -ENOTDIR; retval = inode_permission(inode, MAY_EXEC); @@ -1885,7 +1886,7 @@ static int path_init(int dfd, const char *name, unsigned int flags, nd->root.mnt = NULL; nd->m_seq = read_seqbegin(&mount_lock); - if (*name=='/') { + if (*s == '/') { if (flags & LOOKUP_RCU) { rcu_read_lock(); nd->seq = set_root_rcu(nd); @@ -1919,7 +1920,7 @@ static int path_init(int dfd, const char *name, unsigned int flags, dentry = f.file->f_path.dentry; - if (*name) { + if (*s) { if (!d_can_lookup(dentry)) { fdput(f); return -ENOTDIR; @@ -1949,7 +1950,7 @@ static int path_init(int dfd, const char *name, unsigned int flags, return -ECHILD; done: current->total_link_count = 0; - return link_path_walk(name, nd); + return link_path_walk(s, nd); } static void path_cleanup(struct nameidata *nd) @@ -1972,7 +1973,7 @@ static inline int lookup_last(struct nameidata *nd, struct path *path) } /* Returns 0 and nd will be valid on success; Retuns error, otherwise. */ -static int path_lookupat(int dfd, const char *name, +static int path_lookupat(int dfd, const struct filename *name, unsigned int flags, struct nameidata *nd) { struct path path; @@ -2027,31 +2028,17 @@ static int path_lookupat(int dfd, const char *name, static int filename_lookup(int dfd, struct filename *name, unsigned int flags, struct nameidata *nd) { - int retval = path_lookupat(dfd, name->name, flags | LOOKUP_RCU, nd); + int retval = path_lookupat(dfd, name, flags | LOOKUP_RCU, nd); if (unlikely(retval == -ECHILD)) - retval = path_lookupat(dfd, name->name, flags, nd); + retval = path_lookupat(dfd, name, flags, nd); if (unlikely(retval == -ESTALE)) - retval = path_lookupat(dfd, name->name, - flags | LOOKUP_REVAL, nd); + retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd); if (likely(!retval)) audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT); return retval; } -static int do_path_lookup(int dfd, const char *name, - unsigned int flags, struct nameidata *nd) -{ - struct filename *filename = getname_kernel(name); - int retval = PTR_ERR(filename); - - if (!IS_ERR(filename)) { - retval = filename_lookup(dfd, filename, flags, nd); - putname(filename); - } - return retval; -} - /* does lookup, returns the object with parent locked */ struct dentry *kern_path_locked(const char *name, struct path *path) { @@ -2089,9 +2076,15 @@ out: int kern_path(const char *name, unsigned int flags, struct path *path) { struct nameidata nd; - int res = do_path_lookup(AT_FDCWD, name, flags, &nd); - if (!res) - *path = nd.path; + struct filename *filename = getname_kernel(name); + int res = PTR_ERR(filename); + + if (!IS_ERR(filename)) { + res = filename_lookup(AT_FDCWD, filename, flags, &nd); + putname(filename); + if (!res) + *path = nd.path; + } return res; } EXPORT_SYMBOL(kern_path); @@ -2108,15 +2101,22 @@ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt, const char *name, unsigned int flags, struct path *path) { - struct nameidata nd; - int err; - nd.root.dentry = dentry; - nd.root.mnt = mnt; + struct filename *filename = getname_kernel(name); + int err = PTR_ERR(filename); + BUG_ON(flags & LOOKUP_PARENT); - /* the first argument of do_path_lookup() is ignored with LOOKUP_ROOT */ - err = do_path_lookup(AT_FDCWD, name, flags | LOOKUP_ROOT, &nd); - if (!err) - *path = nd.path; + + /* the first argument of filename_lookup() is ignored with LOOKUP_ROOT */ + if (!IS_ERR(filename)) { + struct nameidata nd; + nd.root.dentry = dentry; + nd.root.mnt = mnt; + err = filename_lookup(AT_FDCWD, filename, + flags | LOOKUP_ROOT, &nd); + if (!err) + *path = nd.path; + putname(filename); + } return err; } EXPORT_SYMBOL(vfs_path_lookup); @@ -2138,9 +2138,7 @@ static struct dentry *lookup_hash(struct nameidata *nd) * @len: maximum length @len should be interpreted to * * Note that this routine is purely a helper for filesystem usage and should - * not be called by generic code. Also note that by using this function the - * nameidata argument is passed to the filesystem methods and a filesystem - * using this helper needs to be prepared for that. + * not be called by generic code. */ struct dentry *lookup_one_len(const char *name, struct dentry *base, int len) { @@ -2313,7 +2311,7 @@ mountpoint_last(struct nameidata *nd, struct path *path) mutex_unlock(&dir->d_inode->i_mutex); done: - if (!dentry->d_inode || d_is_negative(dentry)) { + if (d_is_negative(dentry)) { error = -ENOENT; dput(dentry); goto out; @@ -2341,7 +2339,8 @@ out: * Returns 0 and "path" will be valid on success; Returns error otherwise. */ static int -path_mountpoint(int dfd, const char *name, struct path *path, unsigned int flags) +path_mountpoint(int dfd, const struct filename *name, struct path *path, + unsigned int flags) { struct nameidata nd; int err; @@ -2370,20 +2369,20 @@ out: } static int -filename_mountpoint(int dfd, struct filename *s, struct path *path, +filename_mountpoint(int dfd, struct filename *name, struct path *path, unsigned int flags) { int error; - if (IS_ERR(s)) - return PTR_ERR(s); - error = path_mountpoint(dfd, s->name, path, flags | LOOKUP_RCU); + if (IS_ERR(name)) + return PTR_ERR(name); + error = path_mountpoint(dfd, name, path, flags | LOOKUP_RCU); if (unlikely(error == -ECHILD)) - error = path_mountpoint(dfd, s->name, path, flags); + error = path_mountpoint(dfd, name, path, flags); if (unlikely(error == -ESTALE)) - error = path_mountpoint(dfd, s->name, path, flags | LOOKUP_REVAL); + error = path_mountpoint(dfd, name, path, flags | LOOKUP_REVAL); if (likely(!error)) - audit_inode(s, path->dentry, 0); - putname(s); + audit_inode(name, path->dentry, 0); + putname(name); return error; } @@ -3040,14 +3039,15 @@ retry_lookup: finish_lookup: /* we _can_ be in RCU mode here */ error = -ENOENT; - if (!inode || d_is_negative(path->dentry)) { + if (d_is_negative(path->dentry)) { path_to_nameidata(path, nd); goto out; } if (should_follow_link(path->dentry, !symlink_ok)) { if (nd->flags & LOOKUP_RCU) { - if (unlikely(unlazy_walk(nd, path->dentry))) { + if (unlikely(nd->path.mnt != path->mnt || + unlazy_walk(nd, path->dentry))) { error = -ECHILD; goto out; } @@ -3079,7 +3079,7 @@ finish_open: error = -ENOTDIR; if ((nd->flags & LOOKUP_DIRECTORY) && !d_can_lookup(nd->path.dentry)) goto out; - if (!S_ISREG(nd->inode->i_mode)) + if (!d_is_reg(nd->path.dentry)) will_truncate = false; if (will_truncate) { @@ -3156,7 +3156,7 @@ static int do_tmpfile(int dfd, struct filename *pathname, static const struct qstr name = QSTR_INIT("/", 1); struct dentry *dentry, *child; struct inode *dir; - int error = path_lookupat(dfd, pathname->name, + int error = path_lookupat(dfd, pathname, flags | LOOKUP_DIRECTORY, nd); if (unlikely(error)) return error; @@ -3229,7 +3229,7 @@ static struct file *path_openat(int dfd, struct filename *pathname, goto out; } - error = path_init(dfd, pathname->name, flags, nd); + error = path_init(dfd, pathname, flags, nd); if (unlikely(error)) goto out; diff --git a/fs/namespace.c b/fs/namespace.c index 82ef140..1f4f9da 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -632,14 +632,17 @@ struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry) */ struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry) { - struct mount *p, *res; - res = p = __lookup_mnt(mnt, dentry); + struct mount *p, *res = NULL; + p = __lookup_mnt(mnt, dentry); if (!p) goto out; + if (!(p->mnt.mnt_flags & MNT_UMOUNT)) + res = p; hlist_for_each_entry_continue(p, mnt_hash) { if (&p->mnt_parent->mnt != mnt || p->mnt_mountpoint != dentry) break; - res = p; + if (!(p->mnt.mnt_flags & MNT_UMOUNT)) + res = p; } out: return res; @@ -795,10 +798,8 @@ static void __touch_mnt_namespace(struct mnt_namespace *ns) /* * vfsmount lock must be held for write */ -static void detach_mnt(struct mount *mnt, struct path *old_path) +static void unhash_mnt(struct mount *mnt) { - old_path->dentry = mnt->mnt_mountpoint; - old_path->mnt = &mnt->mnt_parent->mnt; mnt->mnt_parent = mnt; mnt->mnt_mountpoint = mnt->mnt.mnt_root; list_del_init(&mnt->mnt_child); @@ -811,6 +812,26 @@ static void detach_mnt(struct mount *mnt, struct path *old_path) /* * vfsmount lock must be held for write */ +static void detach_mnt(struct mount *mnt, struct path *old_path) +{ + old_path->dentry = mnt->mnt_mountpoint; + old_path->mnt = &mnt->mnt_parent->mnt; + unhash_mnt(mnt); +} + +/* + * vfsmount lock must be held for write + */ +static void umount_mnt(struct mount *mnt) +{ + /* old mountpoint will be dropped when we can do that */ + mnt->mnt_ex_mountpoint = mnt->mnt_mountpoint; + unhash_mnt(mnt); +} + +/* + * vfsmount lock must be held for write + */ void mnt_set_mountpoint(struct mount *mnt, struct mountpoint *mp, struct mount *child_mnt) @@ -1078,6 +1099,13 @@ static void mntput_no_expire(struct mount *mnt) rcu_read_unlock(); list_del(&mnt->mnt_instance); + + if (unlikely(!list_empty(&mnt->mnt_mounts))) { + struct mount *p, *tmp; + list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) { + umount_mnt(p); + } + } unlock_mount_hash(); if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) { @@ -1298,17 +1326,15 @@ static HLIST_HEAD(unmounted); /* protected by namespace_sem */ static void namespace_unlock(void) { - struct hlist_head head = unmounted; + struct hlist_head head; - if (likely(hlist_empty(&head))) { - up_write(&namespace_sem); - return; - } + hlist_move_list(&unmounted, &head); - head.first->pprev = &head.first; - INIT_HLIST_HEAD(&unmounted); up_write(&namespace_sem); + if (likely(hlist_empty(&head))) + return; + synchronize_rcu(); group_pin_kill(&head); @@ -1319,49 +1345,63 @@ static inline void namespace_lock(void) down_write(&namespace_sem); } +enum umount_tree_flags { + UMOUNT_SYNC = 1, + UMOUNT_PROPAGATE = 2, + UMOUNT_CONNECTED = 4, +}; /* * mount_lock must be held * namespace_sem must be held for write - * how = 0 => just this tree, don't propagate - * how = 1 => propagate; we know that nobody else has reference to any victims - * how = 2 => lazy umount */ -void umount_tree(struct mount *mnt, int how) +static void umount_tree(struct mount *mnt, enum umount_tree_flags how) { - HLIST_HEAD(tmp_list); + LIST_HEAD(tmp_list); struct mount *p; + if (how & UMOUNT_PROPAGATE) + propagate_mount_unlock(mnt); + + /* Gather the mounts to umount */ for (p = mnt; p; p = next_mnt(p, mnt)) { - hlist_del_init_rcu(&p->mnt_hash); - hlist_add_head(&p->mnt_hash, &tmp_list); + p->mnt.mnt_flags |= MNT_UMOUNT; + list_move(&p->mnt_list, &tmp_list); } - hlist_for_each_entry(p, &tmp_list, mnt_hash) + /* Hide the mounts from mnt_mounts */ + list_for_each_entry(p, &tmp_list, mnt_list) { list_del_init(&p->mnt_child); + } - if (how) + /* Add propogated mounts to the tmp_list */ + if (how & UMOUNT_PROPAGATE) propagate_umount(&tmp_list); - while (!hlist_empty(&tmp_list)) { - p = hlist_entry(tmp_list.first, struct mount, mnt_hash); - hlist_del_init_rcu(&p->mnt_hash); + while (!list_empty(&tmp_list)) { + bool disconnect; + p = list_first_entry(&tmp_list, struct mount, mnt_list); list_del_init(&p->mnt_expire); list_del_init(&p->mnt_list); __touch_mnt_namespace(p->mnt_ns); p->mnt_ns = NULL; - if (how < 2) + if (how & UMOUNT_SYNC) p->mnt.mnt_flags |= MNT_SYNC_UMOUNT; - pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt, &unmounted); + disconnect = !(((how & UMOUNT_CONNECTED) && + mnt_has_parent(p) && + (p->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) || + IS_MNT_LOCKED_AND_LAZY(p)); + + pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt, + disconnect ? &unmounted : NULL); if (mnt_has_parent(p)) { - hlist_del_init(&p->mnt_mp_list); - put_mountpoint(p->mnt_mp); mnt_add_count(p->mnt_parent, -1); - /* old mountpoint will be dropped when we can do that */ - p->mnt_ex_mountpoint = p->mnt_mountpoint; - p->mnt_mountpoint = p->mnt.mnt_root; - p->mnt_parent = p; - p->mnt_mp = NULL; + if (!disconnect) { + /* Don't forget about p */ + list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts); + } else { + umount_mnt(p); + } } change_mnt_propagation(p, MS_PRIVATE); } @@ -1447,14 +1487,14 @@ static int do_umount(struct mount *mnt, int flags) if (flags & MNT_DETACH) { if (!list_empty(&mnt->mnt_list)) - umount_tree(mnt, 2); + umount_tree(mnt, UMOUNT_PROPAGATE); retval = 0; } else { shrink_submounts(mnt); retval = -EBUSY; if (!propagate_mount_busy(mnt, 2)) { if (!list_empty(&mnt->mnt_list)) - umount_tree(mnt, 1); + umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC); retval = 0; } } @@ -1480,13 +1520,20 @@ void __detach_mounts(struct dentry *dentry) namespace_lock(); mp = lookup_mountpoint(dentry); - if (!mp) + if (IS_ERR_OR_NULL(mp)) goto out_unlock; lock_mount_hash(); while (!hlist_empty(&mp->m_list)) { mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list); - umount_tree(mnt, 2); + if (mnt->mnt.mnt_flags & MNT_UMOUNT) { + struct mount *p, *tmp; + list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) { + hlist_add_head(&p->mnt_umount.s_list, &unmounted); + umount_mnt(p); + } + } + else umount_tree(mnt, UMOUNT_CONNECTED); } unlock_mount_hash(); put_mountpoint(mp); @@ -1648,7 +1695,7 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry, out: if (res) { lock_mount_hash(); - umount_tree(res, 0); + umount_tree(res, UMOUNT_SYNC); unlock_mount_hash(); } return q; @@ -1660,8 +1707,11 @@ struct vfsmount *collect_mounts(struct path *path) { struct mount *tree; namespace_lock(); - tree = copy_tree(real_mount(path->mnt), path->dentry, - CL_COPY_ALL | CL_PRIVATE); + if (!check_mnt(real_mount(path->mnt))) + tree = ERR_PTR(-EINVAL); + else + tree = copy_tree(real_mount(path->mnt), path->dentry, + CL_COPY_ALL | CL_PRIVATE); namespace_unlock(); if (IS_ERR(tree)) return ERR_CAST(tree); @@ -1672,7 +1722,7 @@ void drop_collected_mounts(struct vfsmount *mnt) { namespace_lock(); lock_mount_hash(); - umount_tree(real_mount(mnt), 0); + umount_tree(real_mount(mnt), UMOUNT_SYNC); unlock_mount_hash(); namespace_unlock(); } @@ -1855,7 +1905,7 @@ static int attach_recursive_mnt(struct mount *source_mnt, out_cleanup_ids: while (!hlist_empty(&tree_list)) { child = hlist_entry(tree_list.first, struct mount, mnt_hash); - umount_tree(child, 0); + umount_tree(child, UMOUNT_SYNC); } unlock_mount_hash(); cleanup_group_ids(source_mnt, NULL); @@ -2035,7 +2085,7 @@ static int do_loopback(struct path *path, const char *old_name, err = graft_tree(mnt, parent, mp); if (err) { lock_mount_hash(); - umount_tree(mnt, 0); + umount_tree(mnt, UMOUNT_SYNC); unlock_mount_hash(); } out2: @@ -2406,7 +2456,7 @@ void mark_mounts_for_expiry(struct list_head *mounts) while (!list_empty(&graveyard)) { mnt = list_first_entry(&graveyard, struct mount, mnt_expire); touch_mnt_namespace(mnt->mnt_ns); - umount_tree(mnt, 1); + umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC); } unlock_mount_hash(); namespace_unlock(); @@ -2477,7 +2527,7 @@ static void shrink_submounts(struct mount *mnt) m = list_first_entry(&graveyard, struct mount, mnt_expire); touch_mnt_namespace(m->mnt_ns); - umount_tree(m, 1); + umount_tree(m, UMOUNT_PROPAGATE|UMOUNT_SYNC); } } } diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c index e7ca827..80021c7 100644 --- a/fs/ncpfs/dir.c +++ b/fs/ncpfs/dir.c @@ -127,7 +127,7 @@ static inline int ncp_case_sensitive(const struct inode *i) static int ncp_hash_dentry(const struct dentry *dentry, struct qstr *this) { - struct inode *inode = ACCESS_ONCE(dentry->d_inode); + struct inode *inode = d_inode_rcu(dentry); if (!inode) return 0; @@ -162,7 +162,7 @@ ncp_compare_dentry(const struct dentry *parent, const struct dentry *dentry, if (len != name->len) return 1; - pinode = ACCESS_ONCE(parent->d_inode); + pinode = d_inode_rcu(parent); if (!pinode) return 1; @@ -180,7 +180,7 @@ ncp_compare_dentry(const struct dentry *parent, const struct dentry *dentry, static int ncp_delete_dentry(const struct dentry * dentry) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); if (inode) { if (is_bad_inode(inode)) @@ -224,7 +224,7 @@ ncp_force_unlink(struct inode *dir, struct dentry* dentry) memset(&info, 0, sizeof(info)); /* remove the Read-Only flag on the NW server */ - inode = dentry->d_inode; + inode = d_inode(dentry); old_nwattr = NCP_FINFO(inode)->nwattr; info.attributes = old_nwattr & ~(aRONLY|aDELETEINHIBIT|aRENAMEINHIBIT); @@ -254,7 +254,7 @@ ncp_force_rename(struct inode *old_dir, struct dentry* old_dentry, char *_old_na { struct nw_modify_dos_info info; int res=0x90,res2; - struct inode *old_inode = old_dentry->d_inode; + struct inode *old_inode = d_inode(old_dentry); __le32 old_nwattr = NCP_FINFO(old_inode)->nwattr; __le32 new_nwattr = 0; /* shut compiler warning */ int old_nwattr_changed = 0; @@ -268,8 +268,8 @@ ncp_force_rename(struct inode *old_dir, struct dentry* old_dentry, char *_old_na res2 = ncp_modify_file_or_subdir_dos_info_path(NCP_SERVER(old_inode), old_inode, NULL, DM_ATTRIBUTES, &info); if (!res2) old_nwattr_changed = 1; - if (new_dentry && new_dentry->d_inode) { - new_nwattr = NCP_FINFO(new_dentry->d_inode)->nwattr; + if (new_dentry && d_really_is_positive(new_dentry)) { + new_nwattr = NCP_FINFO(d_inode(new_dentry))->nwattr; info.attributes = new_nwattr & ~(aRONLY|aRENAMEINHIBIT|aDELETEINHIBIT); res2 = ncp_modify_file_or_subdir_dos_info_path(NCP_SERVER(new_dir), new_dir, _new_name, DM_ATTRIBUTES, &info); if (!res2) @@ -324,9 +324,9 @@ ncp_lookup_validate(struct dentry *dentry, unsigned int flags) return -ECHILD; parent = dget_parent(dentry); - dir = parent->d_inode; + dir = d_inode(parent); - if (!dentry->d_inode) + if (d_really_is_negative(dentry)) goto finished; server = NCP_SERVER(dir); @@ -367,7 +367,7 @@ ncp_lookup_validate(struct dentry *dentry, unsigned int flags) * what we remember, it's not valid any more. */ if (!res) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); mutex_lock(&inode->i_mutex); if (finfo.i.dirEntNum == NCP_FINFO(inode)->dirEntNum) { @@ -388,7 +388,7 @@ finished: static time_t ncp_obtain_mtime(struct dentry *dentry) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct ncp_server *server = NCP_SERVER(inode); struct nw_info_struct i; @@ -404,7 +404,7 @@ static time_t ncp_obtain_mtime(struct dentry *dentry) static inline void ncp_invalidate_dircache_entries(struct dentry *parent) { - struct ncp_server *server = NCP_SERVER(parent->d_inode); + struct ncp_server *server = NCP_SERVER(d_inode(parent)); struct dentry *dentry; spin_lock(&parent->d_lock); @@ -418,7 +418,7 @@ ncp_invalidate_dircache_entries(struct dentry *parent) static int ncp_readdir(struct file *file, struct dir_context *ctx) { struct dentry *dentry = file->f_path.dentry; - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct page *page = NULL; struct ncp_server *server = NCP_SERVER(inode); union ncp_dir_cache *cache = NULL; @@ -491,13 +491,13 @@ static int ncp_readdir(struct file *file, struct dir_context *ctx) goto invalid_cache; } spin_unlock(&dentry->d_lock); - if (!dent->d_inode) { + if (d_really_is_negative(dent)) { dput(dent); goto invalid_cache; } over = !dir_emit(ctx, dent->d_name.name, dent->d_name.len, - dent->d_inode->i_ino, DT_UNKNOWN); + d_inode(dent)->i_ino, DT_UNKNOWN); dput(dent); if (over) goto finished; @@ -571,7 +571,7 @@ static void ncp_d_prune(struct dentry *dentry) { if (!dentry->d_fsdata) /* not referenced from page cache */ return; - NCP_FINFO(dentry->d_parent->d_inode)->flags &= ~NCPI_DIR_CACHE; + NCP_FINFO(d_inode(dentry->d_parent))->flags &= ~NCPI_DIR_CACHE; } static int @@ -580,7 +580,7 @@ ncp_fill_cache(struct file *file, struct dir_context *ctx, int inval_childs) { struct dentry *newdent, *dentry = file->f_path.dentry; - struct inode *dir = dentry->d_inode; + struct inode *dir = d_inode(dentry); struct ncp_cache_control ctl = *ctrl; struct qstr qname; int valid = 0; @@ -621,7 +621,7 @@ ncp_fill_cache(struct file *file, struct dir_context *ctx, dentry_update_name_case(newdent, &qname); } - if (!newdent->d_inode) { + if (d_really_is_negative(newdent)) { struct inode *inode; entry->opened = 0; @@ -637,7 +637,7 @@ ncp_fill_cache(struct file *file, struct dir_context *ctx, spin_unlock(&dentry->d_lock); } } else { - struct inode *inode = newdent->d_inode; + struct inode *inode = d_inode(newdent); mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD); ncp_update_inode2(inode, entry); @@ -659,10 +659,10 @@ ncp_fill_cache(struct file *file, struct dir_context *ctx, ctl.cache = kmap(ctl.page); } if (ctl.cache) { - if (newdent->d_inode) { + if (d_really_is_positive(newdent)) { newdent->d_fsdata = newdent; ctl.cache->dentry[ctl.idx] = newdent; - ino = newdent->d_inode->i_ino; + ino = d_inode(newdent)->i_ino; ncp_new_dentry(newdent); } valid = 1; @@ -807,7 +807,7 @@ int ncp_conn_logged_in(struct super_block *sb) } dent = sb->s_root; if (dent) { - struct inode* ino = dent->d_inode; + struct inode* ino = d_inode(dent); if (ino) { ncp_update_known_namespace(server, volNumber, NULL); NCP_FINFO(ino)->volNumber = volNumber; @@ -815,7 +815,7 @@ int ncp_conn_logged_in(struct super_block *sb) NCP_FINFO(ino)->DosDirNum = DosDirNum; result = 0; } else { - ncp_dbg(1, "sb->s_root->d_inode == NULL!\n"); + ncp_dbg(1, "d_inode(sb->s_root) == NULL!\n"); } } else { ncp_dbg(1, "sb->s_root == NULL!\n"); @@ -1055,7 +1055,7 @@ out: static int ncp_unlink(struct inode *dir, struct dentry *dentry) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct ncp_server *server; int error; diff --git a/fs/ncpfs/file.c b/fs/ncpfs/file.c index 1dd7007..011324c 100644 --- a/fs/ncpfs/file.c +++ b/fs/ncpfs/file.c @@ -98,30 +98,24 @@ out: } static ssize_t -ncp_file_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) +ncp_file_read_iter(struct kiocb *iocb, struct iov_iter *to) { + struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); size_t already_read = 0; - off_t pos; + off_t pos = iocb->ki_pos; size_t bufsize; int error; - void* freepage; + void *freepage; size_t freelen; ncp_dbg(1, "enter %pD2\n", file); - pos = *ppos; - - if ((ssize_t) count < 0) { - return -EINVAL; - } - if (!count) + if (!iov_iter_count(to)) return 0; if (pos > inode->i_sb->s_maxbytes) return 0; - if (pos + count > inode->i_sb->s_maxbytes) { - count = inode->i_sb->s_maxbytes - pos; - } + iov_iter_truncate(to, inode->i_sb->s_maxbytes - pos); error = ncp_make_open(inode, O_RDONLY); if (error) { @@ -138,31 +132,29 @@ ncp_file_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) goto outrel; error = 0; /* First read in as much as possible for each bufsize. */ - while (already_read < count) { + while (iov_iter_count(to)) { int read_this_time; - size_t to_read = min_t(unsigned int, + size_t to_read = min_t(size_t, bufsize - (pos % bufsize), - count - already_read); + iov_iter_count(to)); error = ncp_read_bounce(NCP_SERVER(inode), NCP_FINFO(inode)->file_handle, - pos, to_read, buf, &read_this_time, + pos, to_read, to, &read_this_time, freepage, freelen); if (error) { error = -EIO; /* NW errno -> Linux errno */ break; } pos += read_this_time; - buf += read_this_time; already_read += read_this_time; - if (read_this_time != to_read) { + if (read_this_time != to_read) break; - } } vfree(freepage); - *ppos = pos; + iocb->ki_pos = pos; file_accessed(file); @@ -173,42 +165,21 @@ outrel: } static ssize_t -ncp_file_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) +ncp_file_write_iter(struct kiocb *iocb, struct iov_iter *from) { + struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); size_t already_written = 0; - off_t pos; size_t bufsize; int errno; - void* bouncebuffer; + void *bouncebuffer; + off_t pos; ncp_dbg(1, "enter %pD2\n", file); - if ((ssize_t) count < 0) - return -EINVAL; - pos = *ppos; - if (file->f_flags & O_APPEND) { - pos = i_size_read(inode); - } + errno = generic_write_checks(iocb, from); + if (errno <= 0) + return errno; - if (pos + count > MAX_NON_LFS && !(file->f_flags&O_LARGEFILE)) { - if (pos >= MAX_NON_LFS) { - return -EFBIG; - } - if (count > MAX_NON_LFS - (u32)pos) { - count = MAX_NON_LFS - (u32)pos; - } - } - if (pos >= inode->i_sb->s_maxbytes) { - if (count || pos > inode->i_sb->s_maxbytes) { - return -EFBIG; - } - } - if (pos + count > inode->i_sb->s_maxbytes) { - count = inode->i_sb->s_maxbytes - pos; - } - - if (!count) - return 0; errno = ncp_make_open(inode, O_WRONLY); if (errno) { ncp_dbg(1, "open failed, error=%d\n", errno); @@ -216,8 +187,6 @@ ncp_file_write(struct file *file, const char __user *buf, size_t count, loff_t * } bufsize = NCP_SERVER(inode)->buffer_size; - already_written = 0; - errno = file_update_time(file); if (errno) goto outrel; @@ -227,13 +196,14 @@ ncp_file_write(struct file *file, const char __user *buf, size_t count, loff_t * errno = -EIO; /* -ENOMEM */ goto outrel; } - while (already_written < count) { + pos = iocb->ki_pos; + while (iov_iter_count(from)) { int written_this_time; - size_t to_write = min_t(unsigned int, + size_t to_write = min_t(size_t, bufsize - (pos % bufsize), - count - already_written); + iov_iter_count(from)); - if (copy_from_user(bouncebuffer, buf, to_write)) { + if (copy_from_iter(bouncebuffer, to_write, from) != to_write) { errno = -EFAULT; break; } @@ -244,16 +214,14 @@ ncp_file_write(struct file *file, const char __user *buf, size_t count, loff_t * break; } pos += written_this_time; - buf += written_this_time; already_written += written_this_time; - if (written_this_time != to_write) { + if (written_this_time != to_write) break; - } } vfree(bouncebuffer); - *ppos = pos; + iocb->ki_pos = pos; if (pos > i_size_read(inode)) { mutex_lock(&inode->i_mutex); @@ -277,8 +245,8 @@ static int ncp_release(struct inode *inode, struct file *file) { const struct file_operations ncp_file_operations = { .llseek = generic_file_llseek, - .read = ncp_file_read, - .write = ncp_file_write, + .read_iter = ncp_file_read_iter, + .write_iter = ncp_file_write_iter, .unlocked_ioctl = ncp_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ncp_compat_ioctl, diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c index 01a9e16..9605a2f 100644 --- a/fs/ncpfs/inode.c +++ b/fs/ncpfs/inode.c @@ -812,7 +812,7 @@ static int ncp_statfs(struct dentry *dentry, struct kstatfs *buf) if (!d) { goto dflt; } - i = d->d_inode; + i = d_inode(d); if (!i) { goto dflt; } @@ -865,7 +865,7 @@ dflt:; int ncp_notify_change(struct dentry *dentry, struct iattr *attr) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); int result = 0; __le32 info_mask; struct nw_modify_dos_info info; @@ -878,7 +878,7 @@ int ncp_notify_change(struct dentry *dentry, struct iattr *attr) goto out; result = -EPERM; - if (IS_DEADDIR(dentry->d_inode)) + if (IS_DEADDIR(d_inode(dentry))) goto out; /* ageing the dentry to force validation */ diff --git a/fs/ncpfs/ioctl.c b/fs/ncpfs/ioctl.c index cf7e043..79b1130 100644 --- a/fs/ncpfs/ioctl.c +++ b/fs/ncpfs/ioctl.c @@ -376,7 +376,7 @@ static long __ncp_ioctl(struct inode *inode, unsigned int cmd, unsigned long arg struct dentry* dentry = inode->i_sb->s_root; if (dentry) { - struct inode* s_inode = dentry->d_inode; + struct inode* s_inode = d_inode(dentry); if (s_inode) { sr.volNumber = NCP_FINFO(s_inode)->volNumber; @@ -384,7 +384,7 @@ static long __ncp_ioctl(struct inode *inode, unsigned int cmd, unsigned long arg sr.namespace = server->name_space[sr.volNumber]; result = 0; } else - ncp_dbg(1, "s_root->d_inode==NULL\n"); + ncp_dbg(1, "d_inode(s_root)==NULL\n"); } else ncp_dbg(1, "s_root==NULL\n"); } else { @@ -431,7 +431,7 @@ static long __ncp_ioctl(struct inode *inode, unsigned int cmd, unsigned long arg if (result == 0) { dentry = inode->i_sb->s_root; if (dentry) { - struct inode* s_inode = dentry->d_inode; + struct inode* s_inode = d_inode(dentry); if (s_inode) { NCP_FINFO(s_inode)->volNumber = vnum; @@ -439,7 +439,7 @@ static long __ncp_ioctl(struct inode *inode, unsigned int cmd, unsigned long arg NCP_FINFO(s_inode)->DosDirNum = dosde; server->root_setuped = 1; } else { - ncp_dbg(1, "s_root->d_inode==NULL\n"); + ncp_dbg(1, "d_inode(s_root)==NULL\n"); result = -EIO; } } else { diff --git a/fs/ncpfs/ncplib_kernel.c b/fs/ncpfs/ncplib_kernel.c index 4823875..88dbbc9 100644 --- a/fs/ncpfs/ncplib_kernel.c +++ b/fs/ncpfs/ncplib_kernel.c @@ -727,7 +727,7 @@ int ncp_del_file_or_subdir2(struct ncp_server *server, struct dentry *dentry) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); __u8 volnum; __le32 dirent; @@ -1001,8 +1001,8 @@ out: */ int ncp_read_bounce(struct ncp_server *server, const char *file_id, - __u32 offset, __u16 to_read, char __user *target, int *bytes_read, - void* bounce, __u32 bufsize) + __u32 offset, __u16 to_read, struct iov_iter *to, + int *bytes_read, void *bounce, __u32 bufsize) { int result; @@ -1025,7 +1025,7 @@ ncp_read_bounce(struct ncp_server *server, const char *file_id, (offset & 1); *bytes_read = len; result = 0; - if (copy_to_user(target, source, len)) + if (copy_to_iter(source, len, to) != len) result = -EFAULT; } } diff --git a/fs/ncpfs/ncplib_kernel.h b/fs/ncpfs/ncplib_kernel.h index 250e443..5233fbc 100644 --- a/fs/ncpfs/ncplib_kernel.h +++ b/fs/ncpfs/ncplib_kernel.h @@ -53,7 +53,7 @@ static inline int ncp_read_bounce_size(__u32 size) { return sizeof(struct ncp_reply_header) + 2 + 2 + size + 8; }; int ncp_read_bounce(struct ncp_server *, const char *, __u32, __u16, - char __user *, int *, void* bounce, __u32 bouncelen); + struct iov_iter *, int *, void *bounce, __u32 bouncelen); int ncp_read_kernel(struct ncp_server *, const char *, __u32, __u16, char *, int *); int ncp_write_kernel(struct ncp_server *, const char *, __u32, __u16, diff --git a/fs/ncpfs/symlink.c b/fs/ncpfs/symlink.c index 1a63bfd..421b6f9 100644 --- a/fs/ncpfs/symlink.c +++ b/fs/ncpfs/symlink.c @@ -156,7 +156,7 @@ int ncp_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { goto failfree; } - inode=dentry->d_inode; + inode=d_inode(dentry); if (ncp_make_open(inode, O_WRONLY)) goto failfree; diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig index c7abc10..f31fd0d 100644 --- a/fs/nfs/Kconfig +++ b/fs/nfs/Kconfig @@ -1,6 +1,6 @@ config NFS_FS tristate "NFS client support" - depends on INET && FILE_LOCKING + depends on INET && FILE_LOCKING && MULTIUSER select LOCKD select SUNRPC select NFS_ACL_SUPPORT if NFS_V3_ACL diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile index 1e987ac..8664417 100644 --- a/fs/nfs/Makefile +++ b/fs/nfs/Makefile @@ -22,7 +22,7 @@ nfsv3-$(CONFIG_NFS_V3_ACL) += nfs3acl.o obj-$(CONFIG_NFS_V4) += nfsv4.o CFLAGS_nfs4trace.o += -I$(src) nfsv4-y := nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o nfs4super.o nfs4file.o \ - delegation.o idmap.o callback.o callback_xdr.o callback_proc.o \ + delegation.o nfs4idmap.o callback.o callback_xdr.o callback_proc.o \ nfs4namespace.o nfs4getroot.o nfs4client.o nfs4session.o \ dns_resolve.o nfs4trace.o nfsv4-$(CONFIG_NFS_USE_LEGACY_DNS) += cache_lib.o diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c index 1cac3c1..d2554fe 100644 --- a/fs/nfs/blocklayout/blocklayout.c +++ b/fs/nfs/blocklayout/blocklayout.c @@ -890,6 +890,7 @@ static struct pnfs_layoutdriver_type blocklayout_type = { .free_deviceid_node = bl_free_deviceid_node, .pg_read_ops = &bl_pg_read_ops, .pg_write_ops = &bl_pg_write_ops, + .sync = pnfs_generic_sync, }; static int __init nfs4blocklayout_init(void) diff --git a/fs/nfs/blocklayout/dev.c b/fs/nfs/blocklayout/dev.c index 5aed4f9..e535599 100644 --- a/fs/nfs/blocklayout/dev.c +++ b/fs/nfs/blocklayout/dev.c @@ -33,7 +33,7 @@ bl_free_deviceid_node(struct nfs4_deviceid_node *d) container_of(d, struct pnfs_block_dev, node); bl_free_device(dev); - kfree(dev); + kfree_rcu(dev, node.rcu); } static int diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index 351be920..8d129bb 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c @@ -128,7 +128,7 @@ nfs41_callback_svc(void *vrqstp) if (try_to_freeze()) continue; - prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_UNINTERRUPTIBLE); + prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE); spin_lock_bh(&serv->sv_cb_lock); if (!list_empty(&serv->sv_cb_list)) { req = list_first_entry(&serv->sv_cb_list, @@ -142,10 +142,10 @@ nfs41_callback_svc(void *vrqstp) error); } else { spin_unlock_bh(&serv->sv_cb_lock); - /* schedule_timeout to game the hung task watchdog */ - schedule_timeout(60 * HZ); + schedule(); finish_wait(&serv->sv_cb_waitq, &wq); } + flush_signals(current); } return 0; } diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 1987415..892aeff 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -31,7 +31,6 @@ #include <linux/lockd/bind.h> #include <linux/seq_file.h> #include <linux/mount.h> -#include <linux/nfs_idmap.h> #include <linux/vfs.h> #include <linux/inet.h> #include <linux/in6.h> diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index a6ad688..029d688 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -378,7 +378,7 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct if (freeme == NULL) goto out; } - list_add_rcu(&delegation->super_list, &server->delegations); + list_add_tail_rcu(&delegation->super_list, &server->delegations); rcu_assign_pointer(nfsi->delegation, delegation); delegation = NULL; @@ -514,7 +514,7 @@ void nfs_inode_return_delegation_noreclaim(struct inode *inode) delegation = nfs_inode_detach_delegation(inode); if (delegation != NULL) - nfs_do_return_delegation(inode, delegation, 0); + nfs_do_return_delegation(inode, delegation, 1); } /** diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index c19e16f..b2c8b31 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -416,15 +416,14 @@ int nfs_same_file(struct dentry *dentry, struct nfs_entry *entry) { struct nfs_inode *nfsi; - if (dentry->d_inode == NULL) - goto different; + if (d_really_is_negative(dentry)) + return 0; - nfsi = NFS_I(dentry->d_inode); + nfsi = NFS_I(d_inode(dentry)); if (entry->fattr->fileid == nfsi->fileid) return 1; if (nfs_compare_fh(entry->fh, &nfsi->fh) == 0) return 1; -different: return 0; } @@ -473,7 +472,7 @@ void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry) struct qstr filename = QSTR_INIT(entry->name, entry->len); struct dentry *dentry; struct dentry *alias; - struct inode *dir = parent->d_inode; + struct inode *dir = d_inode(parent); struct inode *inode; int status; @@ -497,9 +496,9 @@ void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry) goto out; if (nfs_same_file(dentry, entry)) { nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); - status = nfs_refresh_inode(dentry->d_inode, entry->fattr); + status = nfs_refresh_inode(d_inode(dentry), entry->fattr); if (!status) - nfs_setsecurity(dentry->d_inode, entry->fattr, entry->label); + nfs_setsecurity(d_inode(dentry), entry->fattr, entry->label); goto out; } else { d_invalidate(dentry); @@ -544,6 +543,9 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en if (scratch == NULL) return -ENOMEM; + if (buflen == 0) + goto out_nopages; + xdr_init_decode_pages(&stream, &buf, xdr_pages, buflen); xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE); @@ -565,6 +567,7 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en break; } while (!entry->eof); +out_nopages: if (count == 0 || (status == -EBADCOOKIE && entry->eof != 0)) { array = nfs_readdir_get_array(page); if (!IS_ERR(array)) { @@ -870,7 +873,7 @@ static bool nfs_dir_mapping_need_revalidate(struct inode *dir) static int nfs_readdir(struct file *file, struct dir_context *ctx) { struct dentry *dentry = file->f_path.dentry; - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); nfs_readdir_descriptor_t my_desc, *desc = &my_desc; struct nfs_open_dir_context *dir_ctx = file->private_data; @@ -1118,15 +1121,15 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags) if (flags & LOOKUP_RCU) { parent = ACCESS_ONCE(dentry->d_parent); - dir = ACCESS_ONCE(parent->d_inode); + dir = d_inode_rcu(parent); if (!dir) return -ECHILD; } else { parent = dget_parent(dentry); - dir = parent->d_inode; + dir = d_inode(parent); } nfs_inc_stats(dir, NFSIOS_DENTRYREVALIDATE); - inode = dentry->d_inode; + inode = d_inode(dentry); if (!inode) { if (nfs_neg_need_reval(dir, dentry, flags)) { @@ -1242,7 +1245,7 @@ out_error: } /* - * A weaker form of d_revalidate for revalidating just the dentry->d_inode + * A weaker form of d_revalidate for revalidating just the d_inode(dentry) * when we don't really care about the dentry name. This is called when a * pathwalk ends on a dentry that was not found via a normal lookup in the * parent dir (e.g.: ".", "..", procfs symlinks or mountpoint traversals). @@ -1253,7 +1256,7 @@ out_error: static int nfs_weak_revalidate(struct dentry *dentry, unsigned int flags) { int error; - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); /* * I believe we can only get a negative dentry here in the case of a @@ -1287,7 +1290,7 @@ static int nfs_dentry_delete(const struct dentry *dentry) dentry, dentry->d_flags); /* Unhash any dentry with a stale inode */ - if (dentry->d_inode != NULL && NFS_STALE(dentry->d_inode)) + if (d_really_is_positive(dentry) && NFS_STALE(d_inode(dentry))) return 1; if (dentry->d_flags & DCACHE_NFSFS_RENAMED) { @@ -1491,7 +1494,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry, int err; /* Expect a negative dentry */ - BUG_ON(dentry->d_inode); + BUG_ON(d_inode(dentry)); dfprintk(VFS, "NFS: atomic_open(%s/%lu), %pd\n", dir->i_sb->s_id, dir->i_ino, dentry); @@ -1587,7 +1590,7 @@ static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags) if (NFS_SB(dentry->d_sb)->caps & NFS_CAP_ATOMIC_OPEN_V1) goto no_open; - inode = dentry->d_inode; + inode = d_inode(dentry); /* We can't create new files in nfs_open_revalidate(), so we * optimize away revalidation of negative dentries. @@ -1598,12 +1601,12 @@ static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags) if (flags & LOOKUP_RCU) { parent = ACCESS_ONCE(dentry->d_parent); - dir = ACCESS_ONCE(parent->d_inode); + dir = d_inode_rcu(parent); if (!dir) return -ECHILD; } else { parent = dget_parent(dentry); - dir = parent->d_inode; + dir = d_inode(parent); } if (!nfs_neg_need_reval(dir, dentry, flags)) ret = 1; @@ -1643,14 +1646,14 @@ int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fhandle, struct nfs4_label *label) { struct dentry *parent = dget_parent(dentry); - struct inode *dir = parent->d_inode; + struct inode *dir = d_inode(parent); struct inode *inode; int error = -EACCES; d_drop(dentry); /* We may have been initialized further down */ - if (dentry->d_inode) + if (d_really_is_positive(dentry)) goto out; if (fhandle->size == 0) { error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, NULL); @@ -1768,7 +1771,7 @@ EXPORT_SYMBOL_GPL(nfs_mkdir); static void nfs_dentry_handle_enoent(struct dentry *dentry) { - if (dentry->d_inode != NULL && !d_unhashed(dentry)) + if (d_really_is_positive(dentry) && !d_unhashed(dentry)) d_delete(dentry); } @@ -1780,13 +1783,13 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry) dir->i_sb->s_id, dir->i_ino, dentry); trace_nfs_rmdir_enter(dir, dentry); - if (dentry->d_inode) { + if (d_really_is_positive(dentry)) { nfs_wait_on_sillyrename(dentry); error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name); /* Ensure the VFS deletes this inode */ switch (error) { case 0: - clear_nlink(dentry->d_inode); + clear_nlink(d_inode(dentry)); break; case -ENOENT: nfs_dentry_handle_enoent(dentry); @@ -1808,8 +1811,8 @@ EXPORT_SYMBOL_GPL(nfs_rmdir); */ static int nfs_safe_remove(struct dentry *dentry) { - struct inode *dir = dentry->d_parent->d_inode; - struct inode *inode = dentry->d_inode; + struct inode *dir = d_inode(dentry->d_parent); + struct inode *inode = d_inode(dentry); int error = -EBUSY; dfprintk(VFS, "NFS: safe_remove(%pd2)\n", dentry); @@ -1853,7 +1856,7 @@ int nfs_unlink(struct inode *dir, struct dentry *dentry) if (d_count(dentry) > 1) { spin_unlock(&dentry->d_lock); /* Start asynchronous writeout of the inode */ - write_inode_now(dentry->d_inode, 0); + write_inode_now(d_inode(dentry), 0); error = nfs_sillyrename(dir, dentry); goto out; } @@ -1931,7 +1934,7 @@ int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) * No big deal if we can't add this page to the page cache here. * READLINK will get the missing page from the server if needed. */ - if (!add_to_page_cache_lru(page, dentry->d_inode->i_mapping, 0, + if (!add_to_page_cache_lru(page, d_inode(dentry)->i_mapping, 0, GFP_KERNEL)) { SetPageUptodate(page); unlock_page(page); @@ -1950,7 +1953,7 @@ EXPORT_SYMBOL_GPL(nfs_symlink); int nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { - struct inode *inode = old_dentry->d_inode; + struct inode *inode = d_inode(old_dentry); int error; dfprintk(VFS, "NFS: link(%pd2 -> %pd2)\n", @@ -1997,8 +2000,8 @@ EXPORT_SYMBOL_GPL(nfs_link); int nfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { - struct inode *old_inode = old_dentry->d_inode; - struct inode *new_inode = new_dentry->d_inode; + struct inode *old_inode = d_inode(old_dentry); + struct inode *new_inode = d_inode(new_dentry); struct dentry *dentry = NULL, *rehash = NULL; struct rpc_task *task; int error = -EBUSY; diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index e907c8c..38678d9 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -129,22 +129,25 @@ nfs_direct_good_bytes(struct nfs_direct_req *dreq, struct nfs_pgio_header *hdr) int i; ssize_t count; - WARN_ON_ONCE(hdr->pgio_mirror_idx >= dreq->mirror_count); - - count = dreq->mirrors[hdr->pgio_mirror_idx].count; - if (count + dreq->io_start < hdr->io_start + hdr->good_bytes) { - count = hdr->io_start + hdr->good_bytes - dreq->io_start; - dreq->mirrors[hdr->pgio_mirror_idx].count = count; - } - - /* update the dreq->count by finding the minimum agreed count from all - * mirrors */ - count = dreq->mirrors[0].count; + if (dreq->mirror_count == 1) { + dreq->mirrors[hdr->pgio_mirror_idx].count += hdr->good_bytes; + dreq->count += hdr->good_bytes; + } else { + /* mirrored writes */ + count = dreq->mirrors[hdr->pgio_mirror_idx].count; + if (count + dreq->io_start < hdr->io_start + hdr->good_bytes) { + count = hdr->io_start + hdr->good_bytes - dreq->io_start; + dreq->mirrors[hdr->pgio_mirror_idx].count = count; + } + /* update the dreq->count by finding the minimum agreed count from all + * mirrors */ + count = dreq->mirrors[0].count; - for (i = 1; i < dreq->mirror_count; i++) - count = min(count, dreq->mirrors[i].count); + for (i = 1; i < dreq->mirror_count; i++) + count = min(count, dreq->mirrors[i].count); - dreq->count = count; + dreq->count = count; + } } /* @@ -240,7 +243,6 @@ static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq, /** * nfs_direct_IO - NFS address space operation for direct I/O - * @rw: direction (read or write) * @iocb: target I/O control block * @iov: array of vectors that define I/O buffer * @pos: offset in file to begin the operation @@ -251,7 +253,7 @@ static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq, * shunt off direct read and write requests before the VFS gets them, * so this method is only ever called for swap. */ -ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t pos) +ssize_t nfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t pos) { struct inode *inode = iocb->ki_filp->f_mapping->host; @@ -259,18 +261,11 @@ ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t if (!IS_SWAPFILE(inode)) return 0; -#ifndef CONFIG_NFS_SWAP - dprintk("NFS: nfs_direct_IO (%pD) off/no(%Ld/%lu) EINVAL\n", - iocb->ki_filp, (long long) pos, iter->nr_segs); + VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE); - return -EINVAL; -#else - VM_BUG_ON(iocb->ki_nbytes != PAGE_SIZE); - - if (rw == READ) + if (iov_iter_rw(iter) == READ) return nfs_file_direct_read(iocb, iter, pos); - return nfs_file_direct_write(iocb, iter, pos); -#endif /* CONFIG_NFS_SWAP */ + return nfs_file_direct_write(iocb, iter); } static void nfs_direct_release_pages(struct page **pages, unsigned int npages) @@ -387,13 +382,13 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq, bool write) if (write) nfs_zap_mapping(inode, inode->i_mapping); - inode_dio_done(inode); + inode_dio_end(inode); if (dreq->iocb) { long res = (long) dreq->error; if (!res) res = (long) dreq->count; - aio_complete(dreq->iocb, res, 0); + dreq->iocb->ki_complete(dreq->iocb, res, 0); } complete_all(&dreq->completion); @@ -404,8 +399,8 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq, bool write) static void nfs_direct_readpage_release(struct nfs_page *req) { dprintk("NFS: direct read done (%s/%llu %d@%lld)\n", - req->wb_context->dentry->d_inode->i_sb->s_id, - (unsigned long long)NFS_FILEID(req->wb_context->dentry->d_inode), + d_inode(req->wb_context->dentry)->i_sb->s_id, + (unsigned long long)NFS_FILEID(d_inode(req->wb_context->dentry)), req->wb_bytes, (long long)req_offset(req)); nfs_release_request(req); @@ -487,7 +482,7 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq, &nfs_direct_read_completion_ops); get_dreq(dreq); desc.pg_dreq = dreq; - atomic_inc(&inode->i_dio_count); + inode_dio_begin(inode); while (iov_iter_count(iter)) { struct page **pagevec; @@ -539,7 +534,7 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq, * generic layer handle the completion. */ if (requested_bytes == 0) { - inode_dio_done(inode); + inode_dio_end(inode); nfs_direct_req_release(dreq); return result < 0 ? result : -EIO; } @@ -873,7 +868,7 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, &nfs_direct_write_completion_ops); desc.pg_dreq = dreq; get_dreq(dreq); - atomic_inc(&inode->i_dio_count); + inode_dio_begin(inode); NFS_I(inode)->write_io += iov_iter_count(iter); while (iov_iter_count(iter)) { @@ -929,7 +924,7 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, * generic layer handle the completion. */ if (requested_bytes == 0) { - inode_dio_done(inode); + inode_dio_end(inode); nfs_direct_req_release(dreq); return result < 0 ? result : -EIO; } @@ -960,8 +955,7 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, * Note that O_APPEND is not supported for NFS direct writes, as there * is no atomic O_APPEND write facility in the NFS protocol. */ -ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter, - loff_t pos) +ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter) { ssize_t result = -EINVAL; struct file *file = iocb->ki_filp; @@ -969,25 +963,16 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter, struct inode *inode = mapping->host; struct nfs_direct_req *dreq; struct nfs_lock_context *l_ctx; - loff_t end; - size_t count = iov_iter_count(iter); - end = (pos + count - 1) >> PAGE_CACHE_SHIFT; - - nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count); + loff_t pos, end; dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n", - file, count, (long long) pos); + file, iov_iter_count(iter), (long long) iocb->ki_pos); - result = generic_write_checks(file, &pos, &count, 0); - if (result) - goto out; + nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, + iov_iter_count(iter)); - result = -EINVAL; - if ((ssize_t) count < 0) - goto out; - result = 0; - if (!count) - goto out; + pos = iocb->ki_pos; + end = (pos + iov_iter_count(iter) - 1) >> PAGE_CACHE_SHIFT; mutex_lock(&inode->i_mutex); @@ -1002,7 +987,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter, goto out_unlock; } - task_io_account_write(count); + task_io_account_write(iov_iter_count(iter)); result = -ENOMEM; dreq = nfs_direct_req_alloc(); @@ -1010,7 +995,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter, goto out_unlock; dreq->inode = inode; - dreq->bytes_left = count; + dreq->bytes_left = iov_iter_count(iter); dreq->io_start = pos; dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp)); l_ctx = nfs_get_lock_context(dreq->ctx); @@ -1041,6 +1026,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter, if (i_size_read(inode) < iocb->ki_pos) i_size_write(inode, iocb->ki_pos); spin_unlock(&inode->i_lock); + generic_write_sync(file, pos, result); } } nfs_direct_req_release(dreq); @@ -1050,7 +1036,6 @@ out_release: nfs_direct_req_release(dreq); out_unlock: mutex_unlock(&inode->i_mutex); -out: return result; } diff --git a/fs/nfs/file.c b/fs/nfs/file.c index e679d24..8b8d83a 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -26,7 +26,6 @@ #include <linux/nfs_mount.h> #include <linux/mm.h> #include <linux/pagemap.h> -#include <linux/aio.h> #include <linux/gfp.h> #include <linux/swap.h> @@ -171,7 +170,7 @@ nfs_file_read(struct kiocb *iocb, struct iov_iter *to) struct inode *inode = file_inode(iocb->ki_filp); ssize_t result; - if (iocb->ki_filp->f_flags & O_DIRECT) + if (iocb->ki_flags & IOCB_DIRECT) return nfs_file_direct_read(iocb, to, iocb->ki_pos); dprintk("NFS: read(%pD2, %zu@%lu)\n", @@ -281,6 +280,7 @@ nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync) trace_nfs_fsync_enter(inode); + nfs_inode_dio_wait(inode); do { ret = filemap_write_and_wait_range(inode->i_mapping, start, end); if (ret != 0) @@ -675,17 +675,20 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from) unsigned long written = 0; ssize_t result; size_t count = iov_iter_count(from); - loff_t pos = iocb->ki_pos; result = nfs_key_timeout_notify(file, inode); if (result) return result; - if (file->f_flags & O_DIRECT) - return nfs_file_direct_write(iocb, from, pos); + if (iocb->ki_flags & IOCB_DIRECT) { + result = generic_write_checks(iocb, from); + if (result <= 0) + return result; + return nfs_file_direct_write(iocb, from); + } dprintk("NFS: write(%pD2, %zu@%Ld)\n", - file, count, (long long) pos); + file, count, (long long) iocb->ki_pos); result = -EBUSY; if (IS_SWAPFILE(inode)) @@ -693,7 +696,7 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from) /* * O_APPEND implies that we must revalidate the file length. */ - if (file->f_flags & O_APPEND) { + if (iocb->ki_flags & IOCB_APPEND) { result = nfs_revalidate_file_size(inode, file); if (result) goto out; @@ -780,7 +783,7 @@ do_unlk(struct file *filp, int cmd, struct file_lock *fl, int is_local) * Flush all pending writes before doing anything * with locks.. */ - nfs_sync_mapping(filp->f_mapping); + vfs_fsync(filp, 0); l_ctx = nfs_get_lock_context(nfs_file_open_context(filp)); if (!IS_ERR(l_ctx)) { @@ -927,8 +930,6 @@ EXPORT_SYMBOL_GPL(nfs_flock); const struct file_operations nfs_file_operations = { .llseek = nfs_file_llseek, - .read = new_sync_read, - .write = new_sync_write, .read_iter = nfs_file_read, .write_iter = nfs_file_write, .mmap = nfs_file_mmap, diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c index 91e88a7..a46bf6d 100644 --- a/fs/nfs/filelayout/filelayout.c +++ b/fs/nfs/filelayout/filelayout.c @@ -258,7 +258,8 @@ filelayout_set_layoutcommit(struct nfs_pgio_header *hdr) hdr->res.verf->committed != NFS_DATA_SYNC) return; - pnfs_set_layoutcommit(hdr); + pnfs_set_layoutcommit(hdr->inode, hdr->lseg, + hdr->mds_offset + hdr->res.count); dprintk("%s inode %lu pls_end_pos %lu\n", __func__, hdr->inode->i_ino, (unsigned long) NFS_I(hdr->inode)->layout->plh_lwb); } @@ -373,7 +374,7 @@ static int filelayout_commit_done_cb(struct rpc_task *task, } if (data->verf.committed == NFS_UNSTABLE) - pnfs_commit_set_layoutcommit(data); + pnfs_set_layoutcommit(data->inode, data->lseg, data->lwb); return 0; } @@ -1086,7 +1087,7 @@ filelayout_alloc_deviceid_node(struct nfs_server *server, } static void -filelayout_free_deveiceid_node(struct nfs4_deviceid_node *d) +filelayout_free_deviceid_node(struct nfs4_deviceid_node *d) { nfs4_fl_free_deviceid(container_of(d, struct nfs4_file_layout_dsaddr, id_node)); } @@ -1137,7 +1138,8 @@ static struct pnfs_layoutdriver_type filelayout_type = { .read_pagelist = filelayout_read_pagelist, .write_pagelist = filelayout_write_pagelist, .alloc_deviceid_node = filelayout_alloc_deviceid_node, - .free_deviceid_node = filelayout_free_deveiceid_node, + .free_deviceid_node = filelayout_free_deviceid_node, + .sync = pnfs_nfs_generic_sync, }; static int __init nfs4filelayout_init(void) diff --git a/fs/nfs/filelayout/filelayoutdev.c b/fs/nfs/filelayout/filelayoutdev.c index 4f372e2..4946ef4 100644 --- a/fs/nfs/filelayout/filelayoutdev.c +++ b/fs/nfs/filelayout/filelayoutdev.c @@ -55,7 +55,7 @@ nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr) nfs4_pnfs_ds_put(ds); } kfree(dsaddr->stripe_indices); - kfree(dsaddr); + kfree_rcu(dsaddr, id_node.rcu); } /* Decode opaque device data and return the result */ diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index 315cc68..7d05089 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c @@ -11,10 +11,10 @@ #include <linux/module.h> #include <linux/sunrpc/metrics.h> -#include <linux/nfs_idmap.h> #include "flexfilelayout.h" #include "../nfs4session.h" +#include "../nfs4idmap.h" #include "../internal.h" #include "../delegation.h" #include "../nfs4trace.h" @@ -891,7 +891,8 @@ static int ff_layout_read_done_cb(struct rpc_task *task, static void ff_layout_set_layoutcommit(struct nfs_pgio_header *hdr) { - pnfs_set_layoutcommit(hdr); + pnfs_set_layoutcommit(hdr->inode, hdr->lseg, + hdr->mds_offset + hdr->res.count); dprintk("%s inode %lu pls_end_pos %lu\n", __func__, hdr->inode->i_ino, (unsigned long) NFS_I(hdr->inode)->layout->plh_lwb); } @@ -1074,7 +1075,7 @@ static int ff_layout_commit_done_cb(struct rpc_task *task, } if (data->verf.committed == NFS_UNSTABLE) - pnfs_commit_set_layoutcommit(data); + pnfs_set_layoutcommit(data->inode, data->lseg, data->lwb); return 0; } @@ -1414,7 +1415,7 @@ ff_layout_get_ds_info(struct inode *inode) } static void -ff_layout_free_deveiceid_node(struct nfs4_deviceid_node *d) +ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d) { nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds, id_node)); @@ -1498,7 +1499,7 @@ static struct pnfs_layoutdriver_type flexfilelayout_type = { .pg_read_ops = &ff_layout_pg_read_ops, .pg_write_ops = &ff_layout_pg_write_ops, .get_ds_info = ff_layout_get_ds_info, - .free_deviceid_node = ff_layout_free_deveiceid_node, + .free_deviceid_node = ff_layout_free_deviceid_node, .mark_request_commit = pnfs_layout_mark_request_commit, .clear_request_commit = pnfs_generic_clear_request_commit, .scan_commit_lists = pnfs_generic_scan_commit_lists, @@ -1508,6 +1509,7 @@ static struct pnfs_layoutdriver_type flexfilelayout_type = { .write_pagelist = ff_layout_write_pagelist, .alloc_deviceid_node = ff_layout_alloc_deviceid_node, .encode_layoutreturn = ff_layout_encode_layoutreturn, + .sync = pnfs_nfs_generic_sync, }; static int __init nfs4flexfilelayout_init(void) diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c index e2c01f2..77a2d02 100644 --- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c +++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c @@ -30,7 +30,7 @@ void nfs4_ff_layout_free_deviceid(struct nfs4_ff_layout_ds *mirror_ds) { nfs4_print_deviceid(&mirror_ds->id_node.deviceid); nfs4_pnfs_ds_put(mirror_ds->ds); - kfree(mirror_ds); + kfree_rcu(mirror_ds, id_node.rcu); } /* Decode opaque device data and construct new_ds using it */ diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c index 9ac3846..a608ffd 100644 --- a/fs/nfs/getroot.c +++ b/fs/nfs/getroot.c @@ -56,11 +56,11 @@ static int nfs_superblock_set_dummy_root(struct super_block *sb, struct inode *i * This again causes shrink_dcache_for_umount_subtree() to * Oops, since the test for IS_ROOT() will fail. */ - spin_lock(&sb->s_root->d_inode->i_lock); + spin_lock(&d_inode(sb->s_root)->i_lock); spin_lock(&sb->s_root->d_lock); hlist_del_init(&sb->s_root->d_u.d_alias); spin_unlock(&sb->s_root->d_lock); - spin_unlock(&sb->s_root->d_inode->i_lock); + spin_unlock(&d_inode(sb->s_root)->i_lock); } return 0; } diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index d42dff6..f734562 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -133,6 +133,13 @@ void nfs_evict_inode(struct inode *inode) nfs_clear_inode(inode); } +int nfs_sync_inode(struct inode *inode) +{ + nfs_inode_dio_wait(inode); + return nfs_wb_all(inode); +} +EXPORT_SYMBOL_GPL(nfs_sync_inode); + /** * nfs_sync_mapping - helper to flush all mmapped dirty data to disk */ @@ -192,7 +199,6 @@ void nfs_zap_caches(struct inode *inode) nfs_zap_caches_locked(inode); spin_unlock(&inode->i_lock); } -EXPORT_SYMBOL_GPL(nfs_zap_caches); void nfs_zap_mapping(struct inode *inode, struct address_space *mapping) { @@ -495,7 +501,7 @@ EXPORT_SYMBOL_GPL(nfs_fhget); int nfs_setattr(struct dentry *dentry, struct iattr *attr) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct nfs_fattr *fattr; int error = -ENOMEM; @@ -525,10 +531,8 @@ nfs_setattr(struct dentry *dentry, struct iattr *attr) trace_nfs_setattr_enter(inode); /* Write all dirty data */ - if (S_ISREG(inode->i_mode)) { - nfs_inode_dio_wait(inode); - nfs_wb_all(inode); - } + if (S_ISREG(inode->i_mode)) + nfs_sync_inode(inode); fattr = nfs_alloc_fattr(); if (fattr == NULL) @@ -621,7 +625,7 @@ static void nfs_request_parent_use_readdirplus(struct dentry *dentry) struct dentry *parent; parent = dget_parent(dentry); - nfs_force_use_readdirplus(parent->d_inode); + nfs_force_use_readdirplus(d_inode(parent)); dput(parent); } @@ -637,15 +641,16 @@ static bool nfs_need_revalidate_inode(struct inode *inode) int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); int need_atime = NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATIME; int err = 0; trace_nfs_getattr_enter(inode); /* Flush out writes to the server in order to update c/mtime. */ if (S_ISREG(inode->i_mode)) { - nfs_inode_dio_wait(inode); - err = filemap_write_and_wait(inode->i_mapping); + mutex_lock(&inode->i_mutex); + err = nfs_sync_inode(inode); + mutex_unlock(&inode->i_mutex); if (err) goto out; } @@ -708,7 +713,7 @@ static struct nfs_lock_context *__nfs_find_lock_context(struct nfs_open_context struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx) { struct nfs_lock_context *res, *new = NULL; - struct inode *inode = ctx->dentry->d_inode; + struct inode *inode = d_inode(ctx->dentry); spin_lock(&inode->i_lock); res = __nfs_find_lock_context(ctx); @@ -736,7 +741,7 @@ EXPORT_SYMBOL_GPL(nfs_get_lock_context); void nfs_put_lock_context(struct nfs_lock_context *l_ctx) { struct nfs_open_context *ctx = l_ctx->open_context; - struct inode *inode = ctx->dentry->d_inode; + struct inode *inode = d_inode(ctx->dentry); if (!atomic_dec_and_lock(&l_ctx->count, &inode->i_lock)) return; @@ -763,7 +768,7 @@ void nfs_close_context(struct nfs_open_context *ctx, int is_sync) return; if (!is_sync) return; - inode = ctx->dentry->d_inode; + inode = d_inode(ctx->dentry); if (!list_empty(&NFS_I(inode)->open_files)) return; server = NFS_SERVER(inode); @@ -810,7 +815,7 @@ EXPORT_SYMBOL_GPL(get_nfs_open_context); static void __put_nfs_open_context(struct nfs_open_context *ctx, int is_sync) { - struct inode *inode = ctx->dentry->d_inode; + struct inode *inode = d_inode(ctx->dentry); struct super_block *sb = ctx->dentry->d_sb; if (!list_empty(&ctx->list)) { @@ -842,7 +847,7 @@ EXPORT_SYMBOL_GPL(put_nfs_open_context); */ void nfs_inode_attach_open_context(struct nfs_open_context *ctx) { - struct inode *inode = ctx->dentry->d_inode; + struct inode *inode = d_inode(ctx->dentry); struct nfs_inode *nfsi = NFS_I(inode); spin_lock(&inode->i_lock); @@ -885,7 +890,7 @@ static void nfs_file_clear_open_context(struct file *filp) struct nfs_open_context *ctx = nfs_file_open_context(filp); if (ctx) { - struct inode *inode = ctx->dentry->d_inode; + struct inode *inode = d_inode(ctx->dentry); filp->private_data = NULL; spin_lock(&inode->i_lock); @@ -1588,6 +1593,19 @@ int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fa } EXPORT_SYMBOL_GPL(nfs_post_op_update_inode_force_wcc); + +static inline bool nfs_fileid_valid(struct nfs_inode *nfsi, + struct nfs_fattr *fattr) +{ + bool ret1 = true, ret2 = true; + + if (fattr->valid & NFS_ATTR_FATTR_FILEID) + ret1 = (nfsi->fileid == fattr->fileid); + if (fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) + ret2 = (nfsi->fileid == fattr->mounted_on_fileid); + return ret1 || ret2; +} + /* * Many nfs protocol calls return the new file attributes after * an operation. Here we update the inode to reflect the state @@ -1614,7 +1632,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) nfs_display_fhandle_hash(NFS_FH(inode)), atomic_read(&inode->i_count), fattr->valid); - if ((fattr->valid & NFS_ATTR_FATTR_FILEID) && nfsi->fileid != fattr->fileid) { + if (!nfs_fileid_valid(nfsi, fattr)) { printk(KERN_ERR "NFS: server %s error: fileid changed\n" "fsid %s: expected fileid 0x%Lx, got 0x%Lx\n", NFS_SERVER(inode)->nfs_client->cl_hostname, @@ -1819,7 +1837,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) struct inode *nfs_alloc_inode(struct super_block *sb) { struct nfs_inode *nfsi; - nfsi = (struct nfs_inode *)kmem_cache_alloc(nfs_inode_cachep, GFP_KERNEL); + nfsi = kmem_cache_alloc(nfs_inode_cachep, GFP_KERNEL); if (!nfsi) return NULL; nfsi->flags = 0UL; diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c index b5a0afc..c8162c6 100644 --- a/fs/nfs/namespace.c +++ b/fs/nfs/namespace.c @@ -139,7 +139,7 @@ EXPORT_SYMBOL_GPL(nfs_path); struct vfsmount *nfs_d_automount(struct path *path) { struct vfsmount *mnt; - struct nfs_server *server = NFS_SERVER(path->dentry->d_inode); + struct nfs_server *server = NFS_SERVER(d_inode(path->dentry)); struct nfs_fh *fh = NULL; struct nfs_fattr *fattr = NULL; @@ -180,16 +180,16 @@ out_nofree: static int nfs_namespace_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { - if (NFS_FH(dentry->d_inode)->size != 0) + if (NFS_FH(d_inode(dentry))->size != 0) return nfs_getattr(mnt, dentry, stat); - generic_fillattr(dentry->d_inode, stat); + generic_fillattr(d_inode(dentry), stat); return 0; } static int nfs_namespace_setattr(struct dentry *dentry, struct iattr *attr) { - if (NFS_FH(dentry->d_inode)->size != 0) + if (NFS_FH(d_inode(dentry))->size != 0) return nfs_setattr(dentry, attr); return -EACCES; } @@ -279,7 +279,7 @@ struct vfsmount *nfs_submount(struct nfs_server *server, struct dentry *dentry, struct dentry *parent = dget_parent(dentry); /* Look it up again to get its attributes */ - err = server->nfs_client->rpc_ops->lookup(parent->d_inode, &dentry->d_name, fh, fattr, NULL); + err = server->nfs_client->rpc_ops->lookup(d_inode(parent), &dentry->d_name, fh, fattr, NULL); dput(parent); if (err != 0) return ERR_PTR(err); diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c index 658e586..1ebe2fc 100644 --- a/fs/nfs/nfs3acl.c +++ b/fs/nfs/nfs3acl.c @@ -279,7 +279,7 @@ nfs3_list_one_acl(struct inode *inode, int type, const char *name, void *data, ssize_t nfs3_listxattr(struct dentry *dentry, char *data, size_t size) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); ssize_t result = 0; int error; diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c index 1f11d25..cb28ccee 100644 --- a/fs/nfs/nfs3proc.c +++ b/fs/nfs/nfs3proc.c @@ -120,7 +120,7 @@ static int nfs3_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, struct iattr *sattr) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct nfs3_sattrargs arg = { .fh = NFS_FH(inode), .sattr = sattr, @@ -386,13 +386,13 @@ nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, * not sure this buys us anything (and I'd have * to revamp the NFSv3 XDR code) */ status = nfs3_proc_setattr(dentry, data->res.fattr, sattr); - nfs_post_op_update_inode(dentry->d_inode, data->res.fattr); + nfs_post_op_update_inode(d_inode(dentry), data->res.fattr); dprintk("NFS reply setattr (post-create): %d\n", status); if (status != 0) goto out_release_acls; } - status = nfs3_proc_setacls(dentry->d_inode, acl, default_acl); + status = nfs3_proc_setacls(d_inode(dentry), acl, default_acl); out_release_acls: posix_acl_release(acl); @@ -570,7 +570,7 @@ nfs3_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr) if (status != 0) goto out_release_acls; - status = nfs3_proc_setacls(dentry->d_inode, acl, default_acl); + status = nfs3_proc_setacls(d_inode(dentry), acl, default_acl); out_release_acls: posix_acl_release(acl); @@ -623,7 +623,7 @@ static int nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, u64 cookie, struct page **pages, unsigned int count, int plus) { - struct inode *dir = dentry->d_inode; + struct inode *dir = d_inode(dentry); __be32 *verf = NFS_I(dir)->cookieverf; struct nfs3_readdirargs arg = { .fh = NFS_FH(dir), @@ -715,7 +715,7 @@ nfs3_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr, if (status != 0) goto out_release_acls; - status = nfs3_proc_setacls(dentry->d_inode, acl, default_acl); + status = nfs3_proc_setacls(d_inode(dentry), acl, default_acl); out_release_acls: posix_acl_release(acl); diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index cb17072..3a9e752 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c @@ -36,13 +36,16 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep, loff_t offset, loff_t len) { struct inode *inode = file_inode(filep); + struct nfs_server *server = NFS_SERVER(inode); struct nfs42_falloc_args args = { .falloc_fh = NFS_FH(inode), .falloc_offset = offset, .falloc_length = len, + .falloc_bitmask = server->cache_consistency_bitmask, + }; + struct nfs42_falloc_res res = { + .falloc_server = server, }; - struct nfs42_falloc_res res; - struct nfs_server *server = NFS_SERVER(inode); int status; msg->rpc_argp = &args; @@ -52,8 +55,17 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep, if (status) return status; - return nfs4_call_sync(server->client, server, msg, - &args.seq_args, &res.seq_res, 0); + res.falloc_fattr = nfs_alloc_fattr(); + if (!res.falloc_fattr) + return -ENOMEM; + + status = nfs4_call_sync(server->client, server, msg, + &args.seq_args, &res.seq_res, 0); + if (status == 0) + status = nfs_post_op_update_inode(inode, res.falloc_fattr); + + kfree(res.falloc_fattr); + return status; } static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep, @@ -84,9 +96,13 @@ int nfs42_proc_allocate(struct file *filep, loff_t offset, loff_t len) if (!nfs_server_capable(inode, NFS_CAP_ALLOCATE)) return -EOPNOTSUPP; + mutex_lock(&inode->i_mutex); + err = nfs42_proc_fallocate(&msg, filep, offset, len); if (err == -EOPNOTSUPP) NFS_SERVER(inode)->caps &= ~NFS_CAP_ALLOCATE; + + mutex_unlock(&inode->i_mutex); return err; } @@ -101,9 +117,16 @@ int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len) if (!nfs_server_capable(inode, NFS_CAP_DEALLOCATE)) return -EOPNOTSUPP; + nfs_wb_all(inode); + mutex_lock(&inode->i_mutex); + err = nfs42_proc_fallocate(&msg, filep, offset, len); + if (err == 0) + truncate_pagecache_range(inode, offset, (offset + len) -1); if (err == -EOPNOTSUPP) NFS_SERVER(inode)->caps &= ~NFS_CAP_DEALLOCATE; + + mutex_unlock(&inode->i_mutex); return err; } diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c index 038a7e15..1a25b27 100644 --- a/fs/nfs/nfs42xdr.c +++ b/fs/nfs/nfs42xdr.c @@ -25,16 +25,20 @@ #define NFS4_enc_allocate_sz (compound_encode_hdr_maxsz + \ encode_putfh_maxsz + \ - encode_allocate_maxsz) + encode_allocate_maxsz + \ + encode_getattr_maxsz) #define NFS4_dec_allocate_sz (compound_decode_hdr_maxsz + \ decode_putfh_maxsz + \ - decode_allocate_maxsz) + decode_allocate_maxsz + \ + decode_getattr_maxsz) #define NFS4_enc_deallocate_sz (compound_encode_hdr_maxsz + \ encode_putfh_maxsz + \ - encode_deallocate_maxsz) + encode_deallocate_maxsz + \ + encode_getattr_maxsz) #define NFS4_dec_deallocate_sz (compound_decode_hdr_maxsz + \ decode_putfh_maxsz + \ - decode_deallocate_maxsz) + decode_deallocate_maxsz + \ + decode_getattr_maxsz) #define NFS4_enc_seek_sz (compound_encode_hdr_maxsz + \ encode_putfh_maxsz + \ encode_seek_maxsz) @@ -92,6 +96,7 @@ static void nfs4_xdr_enc_allocate(struct rpc_rqst *req, encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->falloc_fh, &hdr); encode_allocate(xdr, args, &hdr); + encode_getfattr(xdr, args->falloc_bitmask, &hdr); encode_nops(&hdr); } @@ -110,6 +115,7 @@ static void nfs4_xdr_enc_deallocate(struct rpc_rqst *req, encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->falloc_fh, &hdr); encode_deallocate(xdr, args, &hdr); + encode_getfattr(xdr, args->falloc_bitmask, &hdr); encode_nops(&hdr); } @@ -183,6 +189,9 @@ static int nfs4_xdr_dec_allocate(struct rpc_rqst *rqstp, if (status) goto out; status = decode_allocate(xdr, res); + if (status) + goto out; + decode_getfattr(xdr, res->falloc_fattr, res->falloc_server); out: return status; } @@ -207,6 +216,9 @@ static int nfs4_xdr_dec_deallocate(struct rpc_rqst *rqstp, if (status) goto out; status = decode_deallocate(xdr, res); + if (status) + goto out; + decode_getfattr(xdr, res->falloc_fattr, res->falloc_server); out: return status; } diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index 86d6214..e42be52 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c @@ -4,7 +4,6 @@ */ #include <linux/module.h> #include <linux/nfs_fs.h> -#include <linux/nfs_idmap.h> #include <linux/nfs_mount.h> #include <linux/sunrpc/addr.h> #include <linux/sunrpc/auth.h> @@ -15,6 +14,7 @@ #include "callback.h" #include "delegation.h" #include "nfs4session.h" +#include "nfs4idmap.h" #include "pnfs.h" #include "netns.h" @@ -1130,7 +1130,7 @@ error: */ static int nfs_probe_destination(struct nfs_server *server) { - struct inode *inode = server->super->s_root->d_inode; + struct inode *inode = d_inode(server->super->s_root); struct nfs_fattr *fattr; int error; diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c index 8b46389..f58c17b 100644 --- a/fs/nfs/nfs4file.c +++ b/fs/nfs/nfs4file.c @@ -10,6 +10,8 @@ #include "fscache.h" #include "pnfs.h" +#include "nfstrace.h" + #ifdef CONFIG_NFS_V4_2 #include "nfs42.h" #endif @@ -46,7 +48,7 @@ nfs4_file_open(struct inode *inode, struct file *filp) openflags &= ~(O_CREAT|O_EXCL); parent = dget_parent(dentry); - dir = parent->d_inode; + dir = d_inode(parent); ctx = alloc_nfs_open_context(filp->f_path.dentry, filp->f_mode); err = PTR_ERR(ctx); @@ -57,7 +59,7 @@ nfs4_file_open(struct inode *inode, struct file *filp) if (openflags & O_TRUNC) { attr.ia_valid |= ATTR_SIZE; attr.ia_size = 0; - nfs_wb_all(inode); + nfs_sync_inode(inode); } inode = NFS_PROTO(dir)->open_context(dir, ctx, openflags, &attr, &opened); @@ -74,7 +76,7 @@ nfs4_file_open(struct inode *inode, struct file *filp) goto out_drop; } } - if (inode != dentry->d_inode) + if (inode != d_inode(dentry)) goto out_drop; nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); @@ -100,6 +102,9 @@ nfs4_file_fsync(struct file *file, loff_t start, loff_t end, int datasync) int ret; struct inode *inode = file_inode(file); + trace_nfs_fsync_enter(inode); + + nfs_inode_dio_wait(inode); do { ret = filemap_write_and_wait_range(inode->i_mapping, start, end); if (ret != 0) @@ -107,7 +112,7 @@ nfs4_file_fsync(struct file *file, loff_t start, loff_t end, int datasync) mutex_lock(&inode->i_mutex); ret = nfs_file_fsync_commit(file, start, end, datasync); if (!ret) - ret = pnfs_layoutcommit_inode(inode, true); + ret = pnfs_sync_inode(inode, !!datasync); mutex_unlock(&inode->i_mutex); /* * If nfs_file_fsync_commit detected a server reboot, then @@ -118,6 +123,7 @@ nfs4_file_fsync(struct file *file, loff_t start, loff_t end, int datasync) end = LLONG_MAX; } while (ret == -EAGAIN); + trace_nfs_fsync_exit(inode, ret); return ret; } @@ -152,15 +158,9 @@ static long nfs42_fallocate(struct file *filep, int mode, loff_t offset, loff_t if (ret < 0) return ret; - mutex_lock(&inode->i_mutex); if (mode & FALLOC_FL_PUNCH_HOLE) - ret = nfs42_proc_deallocate(filep, offset, len); - else - ret = nfs42_proc_allocate(filep, offset, len); - mutex_unlock(&inode->i_mutex); - - nfs_zap_caches(inode); - return ret; + return nfs42_proc_deallocate(filep, offset, len); + return nfs42_proc_allocate(filep, offset, len); } #endif /* CONFIG_NFS_V4_2 */ @@ -170,8 +170,6 @@ const struct file_operations nfs4_file_operations = { #else .llseek = nfs_file_llseek, #endif - .read = new_sync_read, - .write = new_sync_write, .read_iter = nfs_file_read, .write_iter = nfs_file_write, .mmap = nfs_file_mmap, diff --git a/fs/nfs/idmap.c b/fs/nfs/nfs4idmap.c index 857e2a9..2e1737c 100644 --- a/fs/nfs/idmap.c +++ b/fs/nfs/nfs4idmap.c @@ -36,7 +36,6 @@ #include <linux/types.h> #include <linux/parser.h> #include <linux/fs.h> -#include <linux/nfs_idmap.h> #include <net/net_namespace.h> #include <linux/sunrpc/rpc_pipe_fs.h> #include <linux/nfs_fs.h> @@ -49,6 +48,7 @@ #include "internal.h" #include "netns.h" +#include "nfs4idmap.h" #include "nfs4trace.h" #define NFS_UINT_MAXLEN 11 diff --git a/fs/nfs/nfs4idmap.h b/fs/nfs/nfs4idmap.h new file mode 100644 index 0000000..de44d73 --- /dev/null +++ b/fs/nfs/nfs4idmap.h @@ -0,0 +1,68 @@ +/* + * fs/nfs/nfs4idmap.h + * + * UID and GID to name mapping for clients. + * + * Copyright (c) 2002 The Regents of the University of Michigan. + * All rights reserved. + * + * Marius Aamodt Eriksen <marius@umich.edu> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef NFS_IDMAP_H +#define NFS_IDMAP_H + +#include <linux/uidgid.h> +#include <uapi/linux/nfs_idmap.h> + + +/* Forward declaration to make this header independent of others */ +struct nfs_client; +struct nfs_server; +struct nfs_fattr; +struct nfs4_string; + +int nfs_idmap_init(void); +void nfs_idmap_quit(void); +int nfs_idmap_new(struct nfs_client *); +void nfs_idmap_delete(struct nfs_client *); + +void nfs_fattr_init_names(struct nfs_fattr *fattr, + struct nfs4_string *owner_name, + struct nfs4_string *group_name); +void nfs_fattr_free_names(struct nfs_fattr *); +void nfs_fattr_map_and_free_names(struct nfs_server *, struct nfs_fattr *); + +int nfs_map_name_to_uid(const struct nfs_server *, const char *, size_t, kuid_t *); +int nfs_map_group_to_gid(const struct nfs_server *, const char *, size_t, kgid_t *); +int nfs_map_uid_to_name(const struct nfs_server *, kuid_t, char *, size_t); +int nfs_map_gid_to_group(const struct nfs_server *, kgid_t, char *, size_t); + +int nfs_map_string_to_numeric(const char *name, size_t namelen, __u32 *res); + +extern unsigned int nfs_idmap_cache_timeout; +#endif /* NFS_IDMAP_H */ diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c index 3d83cb1..f592672 100644 --- a/fs/nfs/nfs4namespace.c +++ b/fs/nfs/nfs4namespace.c @@ -375,7 +375,7 @@ static struct vfsmount *nfs_do_refmount(struct rpc_clnt *client, struct dentry * dprintk("%s: getting locations for %pd2\n", __func__, dentry); - err = nfs4_proc_fs_locations(client, parent->d_inode, &dentry->d_name, fs_locations, page); + err = nfs4_proc_fs_locations(client, d_inode(parent), &dentry->d_name, fs_locations, page); dput(parent); if (err != 0 || fs_locations->nlocations <= 0 || @@ -396,7 +396,7 @@ struct vfsmount *nfs4_submount(struct nfs_server *server, struct dentry *dentry, { rpc_authflavor_t flavor = server->client->cl_auth->au_flavor; struct dentry *parent = dget_parent(dentry); - struct inode *dir = parent->d_inode; + struct inode *dir = d_inode(parent); struct qstr *name = &dentry->d_name; struct rpc_clnt *client; struct vfsmount *mnt; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 627f37c..45b35b9 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -51,7 +51,6 @@ #include <linux/namei.h> #include <linux/mount.h> #include <linux/module.h> -#include <linux/nfs_idmap.h> #include <linux/xattr.h> #include <linux/utsname.h> #include <linux/freezer.h> @@ -63,6 +62,7 @@ #include "callback.h" #include "pnfs.h" #include "netns.h" +#include "nfs4idmap.h" #include "nfs4session.h" #include "fscache.h" @@ -185,7 +185,8 @@ const u32 nfs4_fattr_bitmap[3] = { | FATTR4_WORD1_SPACE_USED | FATTR4_WORD1_TIME_ACCESS | FATTR4_WORD1_TIME_METADATA - | FATTR4_WORD1_TIME_MODIFY, + | FATTR4_WORD1_TIME_MODIFY + | FATTR4_WORD1_MOUNTED_ON_FILEID, #ifdef CONFIG_NFS_V4_SECURITY_LABEL FATTR4_WORD2_SECURITY_LABEL #endif @@ -293,7 +294,7 @@ static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dent *p++ = xdr_one; /* bitmap length */ *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */ *p++ = htonl(8); /* attribute buffer length */ - p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_inode)); + p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry))); } *p++ = xdr_one; /* next */ @@ -305,7 +306,7 @@ static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dent *p++ = xdr_one; /* bitmap length */ *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */ *p++ = htonl(8); /* attribute buffer length */ - p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_parent->d_inode)); + p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent))); readdir->pgbase = (char *)p - (char *)start; readdir->count -= readdir->pgbase; @@ -1004,7 +1005,7 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, gfp_t gfp_mask) { struct dentry *parent = dget_parent(dentry); - struct inode *dir = parent->d_inode; + struct inode *dir = d_inode(parent); struct nfs_server *server = NFS_SERVER(dir); struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); struct nfs4_opendata *p; @@ -1057,7 +1058,7 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, case NFS4_OPEN_CLAIM_FH: case NFS4_OPEN_CLAIM_DELEG_CUR_FH: case NFS4_OPEN_CLAIM_DELEG_PREV_FH: - p->o_arg.fh = NFS_FH(dentry->d_inode); + p->o_arg.fh = NFS_FH(d_inode(dentry)); } if (attrs != NULL && attrs->ia_valid != 0) { __u32 verf[2]; @@ -1794,7 +1795,7 @@ static const struct rpc_call_ops nfs4_open_confirm_ops = { */ static int _nfs4_proc_open_confirm(struct nfs4_opendata *data) { - struct nfs_server *server = NFS_SERVER(data->dir->d_inode); + struct nfs_server *server = NFS_SERVER(d_inode(data->dir)); struct rpc_task *task; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM], @@ -1951,7 +1952,7 @@ static const struct rpc_call_ops nfs4_open_ops = { static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover) { - struct inode *dir = data->dir->d_inode; + struct inode *dir = d_inode(data->dir); struct nfs_server *server = NFS_SERVER(dir); struct nfs_openargs *o_arg = &data->o_arg; struct nfs_openres *o_res = &data->o_res; @@ -1998,7 +1999,7 @@ static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover) static int _nfs4_recover_proc_open(struct nfs4_opendata *data) { - struct inode *dir = data->dir->d_inode; + struct inode *dir = d_inode(data->dir); struct nfs_openres *o_res = &data->o_res; int status; @@ -2067,7 +2068,7 @@ static int nfs4_opendata_access(struct rpc_cred *cred, */ static int _nfs4_proc_open(struct nfs4_opendata *data) { - struct inode *dir = data->dir->d_inode; + struct inode *dir = d_inode(data->dir); struct nfs_server *server = NFS_SERVER(dir); struct nfs_openargs *o_arg = &data->o_arg; struct nfs_openres *o_res = &data->o_res; @@ -2314,7 +2315,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); dentry = opendata->dentry; - if (dentry->d_inode == NULL) { + if (d_really_is_negative(dentry)) { /* FIXME: Is this d_drop() ever needed? */ d_drop(dentry); dentry = d_add_unique(dentry, igrab(state->inode)); @@ -2325,7 +2326,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, ctx->dentry = dget(dentry); } nfs_set_verifier(dentry, - nfs_save_change_attribute(opendata->dir->d_inode)); + nfs_save_change_attribute(d_inode(opendata->dir))); } ret = nfs4_opendata_access(sp->so_cred, opendata, state, fmode, flags); @@ -2333,7 +2334,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, goto out; ctx->state = state; - if (dentry->d_inode == state->inode) { + if (d_inode(dentry) == state->inode) { nfs_inode_attach_open_context(ctx); if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) nfs4_schedule_stateid_recovery(server, state); @@ -2374,10 +2375,10 @@ static int _nfs4_do_open(struct inode *dir, status = nfs4_recover_expired_lease(server); if (status != 0) goto err_put_state_owner; - if (dentry->d_inode != NULL) - nfs4_return_incompatible_delegation(dentry->d_inode, fmode); + if (d_really_is_positive(dentry)) + nfs4_return_incompatible_delegation(d_inode(dentry), fmode); status = -ENOMEM; - if (dentry->d_inode) + if (d_really_is_positive(dentry)) claim = NFS4_OPEN_CLAIM_FH; opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr, label, claim, GFP_KERNEL); @@ -2400,8 +2401,8 @@ static int _nfs4_do_open(struct inode *dir, } opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0]; } - if (dentry->d_inode != NULL) - opendata->state = nfs4_get_open_state(dentry->d_inode, sp); + if (d_really_is_positive(dentry)) + opendata->state = nfs4_get_open_state(d_inode(dentry), sp); status = _nfs4_open_and_get_state(opendata, fmode, flags, ctx); if (status != 0) @@ -3095,16 +3096,13 @@ int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *info, bool auth_probe) { - int status; + int status = 0; - switch (auth_probe) { - case false: + if (!auth_probe) status = nfs4_lookup_root(server, fhandle, info); - if (status != -NFS4ERR_WRONGSEC) - break; - default: + + if (auth_probe || status == NFS4ERR_WRONGSEC) status = nfs4_do_find_root_sec(server, fhandle, info); - } if (status == 0) status = nfs4_server_capabilities(server, fhandle); @@ -3254,7 +3252,7 @@ static int nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, struct iattr *sattr) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct rpc_cred *cred = NULL; struct nfs4_state *state = NULL; struct nfs4_label *label = NULL; @@ -3871,13 +3869,13 @@ static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, u64 cookie, struct page **pages, unsigned int count, int plus) { - struct inode *dir = dentry->d_inode; + struct inode *dir = d_inode(dentry); struct nfs4_readdir_arg args = { .fh = NFS_FH(dir), .pages = pages, .pgbase = 0, .count = count, - .bitmask = NFS_SERVER(dentry->d_inode)->attr_bitmask, + .bitmask = NFS_SERVER(d_inode(dentry))->attr_bitmask, .plus = plus, }; struct nfs4_readdir_res res; @@ -3914,8 +3912,8 @@ static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, do { err = _nfs4_proc_readdir(dentry, cred, cookie, pages, count, plus); - trace_nfs4_readdir(dentry->d_inode, err); - err = nfs4_handle_exception(NFS_SERVER(dentry->d_inode), err, + trace_nfs4_readdir(d_inode(dentry), err); + err = nfs4_handle_exception(NFS_SERVER(d_inode(dentry)), err, &exception); } while (exception.retry); return err; @@ -4830,7 +4828,7 @@ nfs4_set_security_label(struct dentry *dentry, const void *buf, size_t buflen) struct nfs4_label ilabel, *olabel = NULL; struct nfs_fattr fattr; struct rpc_cred *cred; - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); int status; if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) @@ -5670,7 +5668,7 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata) data->rpc_status = task->tk_status; switch (task->tk_status) { case 0: - renew_lease(NFS_SERVER(data->ctx->dentry->d_inode), + renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)), data->timestamp); if (data->arg.new_lock) { data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS); @@ -6112,7 +6110,7 @@ static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key, if (strcmp(key, "") != 0) return -EINVAL; - return nfs4_proc_set_acl(dentry->d_inode, buf, buflen); + return nfs4_proc_set_acl(d_inode(dentry), buf, buflen); } static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key, @@ -6121,7 +6119,7 @@ static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key, if (strcmp(key, "") != 0) return -EINVAL; - return nfs4_proc_get_acl(dentry->d_inode, buf, buflen); + return nfs4_proc_get_acl(d_inode(dentry), buf, buflen); } static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list, @@ -6130,7 +6128,7 @@ static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list, { size_t len = sizeof(XATTR_NAME_NFSV4_ACL); - if (!nfs4_server_supports_acls(NFS_SERVER(dentry->d_inode))) + if (!nfs4_server_supports_acls(NFS_SERVER(d_inode(dentry)))) return 0; if (list && len <= list_len) @@ -6158,7 +6156,7 @@ static int nfs4_xattr_get_nfs4_label(struct dentry *dentry, const char *key, void *buf, size_t buflen, int type) { if (security_ismaclabel(key)) - return nfs4_get_security_label(dentry->d_inode, buf, buflen); + return nfs4_get_security_label(d_inode(dentry), buf, buflen); return -EOPNOTSUPP; } @@ -6168,10 +6166,10 @@ static size_t nfs4_xattr_list_nfs4_label(struct dentry *dentry, char *list, { size_t len = 0; - if (nfs_server_capable(dentry->d_inode, NFS_CAP_SECURITY_LABEL)) { - len = security_inode_listsecurity(dentry->d_inode, NULL, 0); + if (nfs_server_capable(d_inode(dentry), NFS_CAP_SECURITY_LABEL)) { + len = security_inode_listsecurity(d_inode(dentry), NULL, 0); if (list && len <= list_len) - security_inode_listsecurity(dentry->d_inode, list, len); + security_inode_listsecurity(d_inode(dentry), list, len); } return len; } @@ -7944,6 +7942,8 @@ _nfs4_proc_getdeviceinfo(struct nfs_server *server, { struct nfs4_getdeviceinfo_args args = { .pdev = pdev, + .notify_types = NOTIFY_DEVICEID4_CHANGE | + NOTIFY_DEVICEID4_DELETE, }; struct nfs4_getdeviceinfo_res res = { .pdev = pdev, @@ -7958,6 +7958,11 @@ _nfs4_proc_getdeviceinfo(struct nfs_server *server, dprintk("--> %s\n", __func__); status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); + if (res.notification & ~args.notify_types) + dprintk("%s: unsupported notification\n", __func__); + if (res.notification != args.notify_types) + pdev->nocache = 1; + dprintk("<-- %s status=%d\n", __func__, status); return status; diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index f95e3b5..2782cfc 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -42,7 +42,6 @@ #include <linux/slab.h> #include <linux/fs.h> #include <linux/nfs_fs.h> -#include <linux/nfs_idmap.h> #include <linux/kthread.h> #include <linux/module.h> #include <linux/random.h> @@ -57,6 +56,7 @@ #include "callback.h" #include "delegation.h" #include "internal.h" +#include "nfs4idmap.h" #include "nfs4session.h" #include "pnfs.h" #include "netns.h" @@ -1902,7 +1902,7 @@ static int nfs4_try_migration(struct nfs_server *server, struct rpc_cred *cred) goto out; } - inode = server->super->s_root->d_inode; + inode = d_inode(server->super->s_root); result = nfs4_proc_get_locations(inode, locations, page, cred); if (result) { dprintk("<-- %s: failed to retrieve fs_locations: %d\n", @@ -2021,7 +2021,7 @@ restart: rcu_read_unlock(); - inode = server->super->s_root->d_inode; + inode = d_inode(server->super->s_root); status = nfs4_proc_fsid_present(inode, cred); if (status != -NFS4ERR_MOVED) goto restart; /* wasn't this one */ diff --git a/fs/nfs/nfs4super.c b/fs/nfs/nfs4super.c index 75090fe..6fb7cb6 100644 --- a/fs/nfs/nfs4super.c +++ b/fs/nfs/nfs4super.c @@ -3,12 +3,12 @@ */ #include <linux/init.h> #include <linux/module.h> -#include <linux/nfs_idmap.h> #include <linux/nfs4_mount.h> #include <linux/nfs_fs.h> #include "delegation.h" #include "internal.h" #include "nfs4_fs.h" +#include "nfs4idmap.h" #include "dns_resolve.h" #include "pnfs.h" #include "nfs.h" @@ -91,10 +91,11 @@ static void nfs4_evict_inode(struct inode *inode) { truncate_inode_pages_final(&inode->i_data); clear_inode(inode); - pnfs_return_layout(inode); - pnfs_destroy_layout(NFS_I(inode)); /* If we are holding a delegation, return it! */ nfs_inode_return_delegation_noreclaim(inode); + /* Note that above delegreturn would trigger pnfs return-on-close */ + pnfs_return_layout(inode); + pnfs_destroy_layout(NFS_I(inode)); /* First call standard NFS clear_inode() code */ nfs_clear_inode(inode); } diff --git a/fs/nfs/nfs4sysctl.c b/fs/nfs/nfs4sysctl.c index b6ebe7e..0fbd3ab 100644 --- a/fs/nfs/nfs4sysctl.c +++ b/fs/nfs/nfs4sysctl.c @@ -6,10 +6,10 @@ * Copyright (c) 2006 Trond Myklebust <Trond.Myklebust@netapp.com> */ #include <linux/sysctl.h> -#include <linux/nfs_idmap.h> #include <linux/nfs_fs.h> #include "nfs4_fs.h" +#include "nfs4idmap.h" #include "callback.h" static const int nfs_set_port_min = 0; diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h index 1c32adb..470af1a 100644 --- a/fs/nfs/nfs4trace.h +++ b/fs/nfs/nfs4trace.h @@ -418,7 +418,7 @@ DECLARE_EVENT_CLASS(nfs4_open_event, __entry->fileid = 0; __entry->fhandle = 0; } - __entry->dir = NFS_FILEID(ctx->dentry->d_parent->d_inode); + __entry->dir = NFS_FILEID(d_inode(ctx->dentry->d_parent)); __assign_str(name, ctx->dentry->d_name.name); ), @@ -1110,7 +1110,7 @@ TRACE_EVENT(nfs4_layoutget, ), TP_fast_assign( - const struct inode *inode = ctx->dentry->d_inode; + const struct inode *inode = d_inode(ctx->dentry); __entry->dev = inode->i_sb->s_dev; __entry->fileid = NFS_FILEID(inode); __entry->fhandle = nfs_fhandle_hash(NFS_FH(inode)); diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 5c399ec..0aea978 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -52,10 +52,10 @@ #include <linux/nfs.h> #include <linux/nfs4.h> #include <linux/nfs_fs.h> -#include <linux/nfs_idmap.h> #include "nfs4_fs.h" #include "internal.h" +#include "nfs4idmap.h" #include "nfs4session.h" #include "pnfs.h" #include "netns.h" @@ -1920,7 +1920,7 @@ encode_getdeviceinfo(struct xdr_stream *xdr, p = reserve_space(xdr, 4 + 4); *p++ = cpu_to_be32(1); /* bitmap length */ - *p++ = cpu_to_be32(NOTIFY_DEVICEID4_CHANGE | NOTIFY_DEVICEID4_DELETE); + *p++ = cpu_to_be32(args->notify_types); } static void @@ -5753,8 +5753,9 @@ out_overflow: #if defined(CONFIG_NFS_V4_1) static int decode_getdeviceinfo(struct xdr_stream *xdr, - struct pnfs_device *pdev) + struct nfs4_getdeviceinfo_res *res) { + struct pnfs_device *pdev = res->pdev; __be32 *p; uint32_t len, type; int status; @@ -5802,12 +5803,7 @@ static int decode_getdeviceinfo(struct xdr_stream *xdr, if (unlikely(!p)) goto out_overflow; - if (be32_to_cpup(p++) & - ~(NOTIFY_DEVICEID4_CHANGE | NOTIFY_DEVICEID4_DELETE)) { - dprintk("%s: unsupported notification\n", - __func__); - } - + res->notification = be32_to_cpup(p++); for (i = 1; i < len; i++) { if (be32_to_cpup(p++)) { dprintk("%s: unsupported notification\n", @@ -7061,7 +7057,7 @@ static int nfs4_xdr_dec_getdeviceinfo(struct rpc_rqst *rqstp, status = decode_sequence(xdr, &res->seq_res, rqstp); if (status != 0) goto out; - status = decode_getdeviceinfo(xdr, res->pdev); + status = decode_getdeviceinfo(xdr, res); out: return status; } @@ -7365,6 +7361,11 @@ nfs4_stat_to_errno(int stat) .p_name = #proc, \ } +#define STUB(proc) \ +[NFSPROC4_CLNT_##proc] = { \ + .p_name = #proc, \ +} + struct rpc_procinfo nfs4_procedures[] = { PROC(READ, enc_read, dec_read), PROC(WRITE, enc_write, dec_write), @@ -7417,6 +7418,7 @@ struct rpc_procinfo nfs4_procedures[] = { PROC(SECINFO_NO_NAME, enc_secinfo_no_name, dec_secinfo_no_name), PROC(TEST_STATEID, enc_test_stateid, dec_test_stateid), PROC(FREE_STATEID, enc_free_stateid, dec_free_stateid), + STUB(GETDEVICELIST), PROC(BIND_CONN_TO_SESSION, enc_bind_conn_to_session, dec_bind_conn_to_session), PROC(DESTROY_CLIENTID, enc_destroy_clientid, dec_destroy_clientid), diff --git a/fs/nfs/nfstrace.c b/fs/nfs/nfstrace.c index 4eb0aea..c74f7af 100644 --- a/fs/nfs/nfstrace.c +++ b/fs/nfs/nfstrace.c @@ -7,3 +7,6 @@ #define CREATE_TRACE_POINTS #include "nfstrace.h" + +EXPORT_TRACEPOINT_SYMBOL_GPL(nfs_fsync_enter); +EXPORT_TRACEPOINT_SYMBOL_GPL(nfs_fsync_exit); diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c index 24e1d74..5aaed36 100644 --- a/fs/nfs/objlayout/objio_osd.c +++ b/fs/nfs/objlayout/objio_osd.c @@ -57,7 +57,7 @@ objio_free_deviceid_node(struct nfs4_deviceid_node *d) dprintk("%s: free od=%p\n", __func__, de->od.od); osduld_put_device(de->od.od); - kfree(de); + kfree_rcu(d, rcu); } struct objio_segment { @@ -637,6 +637,8 @@ static struct pnfs_layoutdriver_type objlayout_type = { .pg_read_ops = &objio_pg_read_ops, .pg_write_ops = &objio_pg_write_ops, + .sync = pnfs_generic_sync, + .free_deviceid_node = objio_free_deviceid_node, .encode_layoutcommit = objlayout_encode_layoutcommit, diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index d57190a..282b393 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -938,7 +938,7 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev, if (prev) { if (!nfs_match_open_context(req->wb_context, prev->wb_context)) return false; - flctx = req->wb_context->dentry->d_inode->i_flctx; + flctx = d_inode(req->wb_context->dentry)->i_flctx; if (flctx != NULL && !(list_empty_careful(&flctx->flc_posix) && list_empty_careful(&flctx->flc_flock)) && diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 4f802b0..2306062 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -1090,6 +1090,7 @@ bool pnfs_roc(struct inode *ino) pnfs_get_layout_hdr(lo); /* matched in pnfs_roc_release */ spin_unlock(&ino->i_lock); pnfs_free_lseg_list(&tmp_list); + pnfs_layoutcommit_inode(ino, true); return true; out_noroc: @@ -1104,8 +1105,10 @@ out_noroc: } } spin_unlock(&ino->i_lock); - if (layoutreturn) + if (layoutreturn) { + pnfs_layoutcommit_inode(ino, true); pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, true); + } return false; } @@ -1841,7 +1844,8 @@ void pnfs_ld_write_done(struct nfs_pgio_header *hdr) { trace_nfs4_pnfs_write(hdr, hdr->pnfs_error); if (!hdr->pnfs_error) { - pnfs_set_layoutcommit(hdr); + pnfs_set_layoutcommit(hdr->inode, hdr->lseg, + hdr->mds_offset + hdr->res.count); hdr->mds_ops->rpc_call_done(&hdr->task, hdr); } else pnfs_ld_handle_write_error(hdr); @@ -1902,7 +1906,6 @@ static void pnfs_writehdr_free(struct nfs_pgio_header *hdr) pnfs_put_lseg(hdr->lseg); nfs_pgio_header_free(hdr); } -EXPORT_SYMBOL_GPL(pnfs_writehdr_free); int pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc) @@ -2032,7 +2035,6 @@ static void pnfs_readhdr_free(struct nfs_pgio_header *hdr) pnfs_put_lseg(hdr->lseg); nfs_pgio_header_free(hdr); } -EXPORT_SYMBOL_GPL(pnfs_readhdr_free); int pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc) @@ -2099,64 +2101,34 @@ void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg) EXPORT_SYMBOL_GPL(pnfs_set_lo_fail); void -pnfs_set_layoutcommit(struct nfs_pgio_header *hdr) +pnfs_set_layoutcommit(struct inode *inode, struct pnfs_layout_segment *lseg, + loff_t end_pos) { - struct inode *inode = hdr->inode; struct nfs_inode *nfsi = NFS_I(inode); - loff_t end_pos = hdr->mds_offset + hdr->res.count; bool mark_as_dirty = false; spin_lock(&inode->i_lock); if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) { - mark_as_dirty = true; - dprintk("%s: Set layoutcommit for inode %lu ", - __func__, inode->i_ino); - } - if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &hdr->lseg->pls_flags)) { - /* references matched in nfs4_layoutcommit_release */ - pnfs_get_lseg(hdr->lseg); - } - if (end_pos > nfsi->layout->plh_lwb) nfsi->layout->plh_lwb = end_pos; - spin_unlock(&inode->i_lock); - dprintk("%s: lseg %p end_pos %llu\n", - __func__, hdr->lseg, nfsi->layout->plh_lwb); - - /* if pnfs_layoutcommit_inode() runs between inode locks, the next one - * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */ - if (mark_as_dirty) - mark_inode_dirty_sync(inode); -} -EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit); - -void pnfs_commit_set_layoutcommit(struct nfs_commit_data *data) -{ - struct inode *inode = data->inode; - struct nfs_inode *nfsi = NFS_I(inode); - bool mark_as_dirty = false; - - spin_lock(&inode->i_lock); - if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) { mark_as_dirty = true; dprintk("%s: Set layoutcommit for inode %lu ", __func__, inode->i_ino); - } - if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &data->lseg->pls_flags)) { + } else if (end_pos > nfsi->layout->plh_lwb) + nfsi->layout->plh_lwb = end_pos; + if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags)) { /* references matched in nfs4_layoutcommit_release */ - pnfs_get_lseg(data->lseg); + pnfs_get_lseg(lseg); } - if (data->lwb > nfsi->layout->plh_lwb) - nfsi->layout->plh_lwb = data->lwb; spin_unlock(&inode->i_lock); dprintk("%s: lseg %p end_pos %llu\n", - __func__, data->lseg, nfsi->layout->plh_lwb); + __func__, lseg, nfsi->layout->plh_lwb); /* if pnfs_layoutcommit_inode() runs between inode locks, the next one * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */ if (mark_as_dirty) mark_inode_dirty_sync(inode); } -EXPORT_SYMBOL_GPL(pnfs_commit_set_layoutcommit); +EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit); void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data) { @@ -2216,7 +2188,6 @@ pnfs_layoutcommit_inode(struct inode *inode, bool sync) pnfs_list_write_lseg(inode, &data->lseg_list); end_pos = nfsi->layout->plh_lwb; - nfsi->layout->plh_lwb = 0; nfs4_stateid_copy(&data->args.stateid, &nfsi->layout->plh_stateid); spin_unlock(&inode->i_lock); @@ -2233,11 +2204,11 @@ pnfs_layoutcommit_inode(struct inode *inode, bool sync) status = ld->prepare_layoutcommit(&data->args); if (status) { spin_lock(&inode->i_lock); - if (end_pos < nfsi->layout->plh_lwb) + set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags); + if (end_pos > nfsi->layout->plh_lwb) nfsi->layout->plh_lwb = end_pos; spin_unlock(&inode->i_lock); put_rpccred(data->cred); - set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags); goto clear_layoutcommitting; } } @@ -2258,6 +2229,13 @@ clear_layoutcommitting: } EXPORT_SYMBOL_GPL(pnfs_layoutcommit_inode); +int +pnfs_generic_sync(struct inode *inode, bool datasync) +{ + return pnfs_layoutcommit_inode(inode, true); +} +EXPORT_SYMBOL_GPL(pnfs_generic_sync); + struct nfs4_threshold *pnfs_mdsthreshold_alloc(void) { struct nfs4_threshold *thp; diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 635f086..1e6308f 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -155,6 +155,8 @@ struct pnfs_layoutdriver_type { int how, struct nfs_commit_info *cinfo); + int (*sync)(struct inode *inode, bool datasync); + /* * Return PNFS_ATTEMPTED to indicate the layout code has attempted * I/O, else return PNFS_NOT_ATTEMPTED to fall back to normal NFS @@ -203,6 +205,7 @@ struct pnfs_device { struct page **pages; unsigned int pgbase; unsigned int pglen; /* reply buffer length */ + unsigned char nocache : 1;/* May not be cached */ }; #define NFS4_PNFS_GETDEVLIST_MAXNUM 16 @@ -263,10 +266,11 @@ bool pnfs_roc(struct inode *ino); void pnfs_roc_release(struct inode *ino); void pnfs_roc_set_barrier(struct inode *ino, u32 barrier); bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task); -void pnfs_set_layoutcommit(struct nfs_pgio_header *); -void pnfs_commit_set_layoutcommit(struct nfs_commit_data *data); +void pnfs_set_layoutcommit(struct inode *, struct pnfs_layout_segment *, loff_t); void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data); int pnfs_layoutcommit_inode(struct inode *inode, bool sync); +int pnfs_generic_sync(struct inode *inode, bool datasync); +int pnfs_nfs_generic_sync(struct inode *inode, bool datasync); int _pnfs_return_layout(struct inode *); int pnfs_commit_and_return_layout(struct inode *); void pnfs_ld_write_done(struct nfs_pgio_header *); @@ -291,6 +295,7 @@ void pnfs_error_mark_layout_for_return(struct inode *inode, enum { NFS_DEVICEID_INVALID = 0, /* set when MDS clientid recalled */ NFS_DEVICEID_UNAVAILABLE, /* device temporarily unavailable */ + NFS_DEVICEID_NOCACHE, /* device may not be cached */ }; /* pnfs_dev.c */ @@ -302,6 +307,7 @@ struct nfs4_deviceid_node { unsigned long flags; unsigned long timestamp_unavailable; struct nfs4_deviceid deviceid; + struct rcu_head rcu; atomic_t ref; }; @@ -426,7 +432,7 @@ static inline bool pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg, struct nfs_commit_info *cinfo, u32 ds_commit_idx) { - struct inode *inode = req->wb_context->dentry->d_inode; + struct inode *inode = d_inode(req->wb_context->dentry); struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld; if (lseg == NULL || ld->mark_request_commit == NULL) @@ -438,7 +444,7 @@ pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg, static inline bool pnfs_clear_request_commit(struct nfs_page *req, struct nfs_commit_info *cinfo) { - struct inode *inode = req->wb_context->dentry->d_inode; + struct inode *inode = d_inode(req->wb_context->dentry); struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld; if (ld == NULL || ld->clear_request_commit == NULL) @@ -486,6 +492,14 @@ pnfs_ld_read_whole_page(struct inode *inode) return NFS_SERVER(inode)->pnfs_curr_ld->flags & PNFS_READ_WHOLE_PAGE; } +static inline int +pnfs_sync_inode(struct inode *inode, bool datasync) +{ + if (!pnfs_enabled_sb(NFS_SERVER(inode))) + return 0; + return NFS_SERVER(inode)->pnfs_curr_ld->sync(inode, datasync); +} + static inline bool pnfs_layoutcommit_outstanding(struct inode *inode) { @@ -568,6 +582,12 @@ pnfs_ld_read_whole_page(struct inode *inode) return false; } +static inline int +pnfs_sync_inode(struct inode *inode, bool datasync) +{ + return 0; +} + static inline bool pnfs_roc(struct inode *ino) { diff --git a/fs/nfs/pnfs_dev.c b/fs/nfs/pnfs_dev.c index aa2ec00..2961fcd 100644 --- a/fs/nfs/pnfs_dev.c +++ b/fs/nfs/pnfs_dev.c @@ -149,6 +149,8 @@ nfs4_get_device_info(struct nfs_server *server, */ d = server->pnfs_curr_ld->alloc_deviceid_node(server, pdev, gfp_flags); + if (d && pdev->nocache) + set_bit(NFS_DEVICEID_NOCACHE, &d->flags); out_free_pages: for (i = 0; i < max_pages; i++) @@ -175,8 +177,8 @@ __nfs4_find_get_deviceid(struct nfs_server *server, rcu_read_lock(); d = _lookup_deviceid(server->pnfs_curr_ld, server->nfs_client, id, hash); - if (d != NULL) - atomic_inc(&d->ref); + if (d != NULL && !atomic_inc_not_zero(&d->ref)) + d = NULL; rcu_read_unlock(); return d; } @@ -235,12 +237,11 @@ nfs4_delete_deviceid(const struct pnfs_layoutdriver_type *ld, return; } hlist_del_init_rcu(&d->node); + clear_bit(NFS_DEVICEID_NOCACHE, &d->flags); spin_unlock(&nfs4_deviceid_lock); - synchronize_rcu(); /* balance the initial ref set in pnfs_insert_deviceid */ - if (atomic_dec_and_test(&d->ref)) - d->ld->free_deviceid_node(d); + nfs4_put_deviceid_node(d); } EXPORT_SYMBOL_GPL(nfs4_delete_deviceid); @@ -271,6 +272,11 @@ EXPORT_SYMBOL_GPL(nfs4_init_deviceid_node); bool nfs4_put_deviceid_node(struct nfs4_deviceid_node *d) { + if (test_bit(NFS_DEVICEID_NOCACHE, &d->flags)) { + if (atomic_add_unless(&d->ref, -1, 2)) + return false; + nfs4_delete_deviceid(d->ld, d->nfs_client, &d->deviceid); + } if (!atomic_dec_and_test(&d->ref)) return false; d->ld->free_deviceid_node(d); @@ -314,6 +320,7 @@ _deviceid_purge_client(const struct nfs_client *clp, long hash) if (d->nfs_client == clp && atomic_read(&d->ref)) { hlist_del_init_rcu(&d->node); hlist_add_head(&d->tmpnode, &tmp); + clear_bit(NFS_DEVICEID_NOCACHE, &d->flags); } rcu_read_unlock(); spin_unlock(&nfs4_deviceid_lock); @@ -321,12 +328,10 @@ _deviceid_purge_client(const struct nfs_client *clp, long hash) if (hlist_empty(&tmp)) return; - synchronize_rcu(); while (!hlist_empty(&tmp)) { d = hlist_entry(tmp.first, struct nfs4_deviceid_node, tmpnode); hlist_del(&d->tmpnode); - if (atomic_dec_and_test(&d->ref)) - d->ld->free_deviceid_node(d); + nfs4_put_deviceid_node(d); } } diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c index 54e36b3..f37e25b 100644 --- a/fs/nfs/pnfs_nfs.c +++ b/fs/nfs/pnfs_nfs.c @@ -561,7 +561,7 @@ static bool load_v3_ds_connect(void) return(get_v3_ds_connect != NULL); } -void __exit nfs4_pnfs_v3_ds_connect_unload(void) +void nfs4_pnfs_v3_ds_connect_unload(void) { if (get_v3_ds_connect) { symbol_put(nfs3_set_ds_client); @@ -868,3 +868,13 @@ pnfs_layout_mark_request_commit(struct nfs_page *req, nfs_request_add_commit_list(req, list, cinfo); } EXPORT_SYMBOL_GPL(pnfs_layout_mark_request_commit); + +int +pnfs_nfs_generic_sync(struct inode *inode, bool datasync) +{ + if (datasync) + return 0; + return pnfs_layoutcommit_inode(inode, true); +} +EXPORT_SYMBOL_GPL(pnfs_nfs_generic_sync); + diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c index c63189a..b417bbc 100644 --- a/fs/nfs/proc.c +++ b/fs/nfs/proc.c @@ -118,7 +118,7 @@ static int nfs_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, struct iattr *sattr) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct nfs_sattrargs arg = { .fh = NFS_FH(inode), .sattr = sattr @@ -487,7 +487,7 @@ static int nfs_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, u64 cookie, struct page **pages, unsigned int count, int plus) { - struct inode *dir = dentry->d_inode; + struct inode *dir = d_inode(dentry); struct nfs_readdirargs arg = { .fh = NFS_FH(dir), .cookie = cookie, diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 568ecf0..ae0ff7a 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c @@ -117,15 +117,15 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode, static void nfs_readpage_release(struct nfs_page *req) { - struct inode *d_inode = req->wb_context->dentry->d_inode; + struct inode *inode = d_inode(req->wb_context->dentry); - dprintk("NFS: read done (%s/%llu %d@%lld)\n", d_inode->i_sb->s_id, - (unsigned long long)NFS_FILEID(d_inode), req->wb_bytes, + dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id, + (unsigned long long)NFS_FILEID(inode), req->wb_bytes, (long long)req_offset(req)); if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) { if (PageUptodate(req->wb_page)) - nfs_readpage_to_fscache(d_inode, req->wb_page, 0); + nfs_readpage_to_fscache(inode, req->wb_page, 0); unlock_page(req->wb_page); } @@ -284,7 +284,7 @@ int nfs_readpage(struct file *file, struct page *page) dprintk("NFS: nfs_readpage (%p %ld@%lu)\n", page, PAGE_CACHE_SIZE, page_file_index(page)); nfs_inc_stats(inode, NFSIOS_VFSREADPAGE); - nfs_inc_stats(inode, NFSIOS_READPAGES); + nfs_add_stats(inode, NFSIOS_READPAGES, 1); /* * Try to flush any pending writes to the file.. diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 322b2de02..f175b83 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -43,7 +43,6 @@ #include <linux/seq_file.h> #include <linux/mount.h> #include <linux/namei.h> -#include <linux/nfs_idmap.h> #include <linux/vfs.h> #include <linux/inet.h> #include <linux/in6.h> @@ -433,7 +432,7 @@ int nfs_statfs(struct dentry *dentry, struct kstatfs *buf) struct nfs_server *server = NFS_SB(dentry->d_sb); unsigned char blockbits; unsigned long blockres; - struct nfs_fh *fh = NFS_FH(dentry->d_inode); + struct nfs_fh *fh = NFS_FH(d_inode(dentry)); struct nfs_fsstat res; int error = -ENOMEM; @@ -447,7 +446,7 @@ int nfs_statfs(struct dentry *dentry, struct kstatfs *buf) pd_dentry = dget_parent(dentry); if (pd_dentry != NULL) { - nfs_zap_caches(pd_dentry->d_inode); + nfs_zap_caches(d_inode(pd_dentry)); dput(pd_dentry); } } @@ -2193,7 +2192,7 @@ nfs_compare_remount_data(struct nfs_server *nfss, data->version != nfss->nfs_client->rpc_ops->version || data->minorversion != nfss->nfs_client->cl_minorversion || data->retrans != nfss->client->cl_timeout->to_retries || - data->selected_flavor != nfss->client->cl_auth->au_flavor || + !nfs_auth_info_match(&data->auth_info, nfss->client->cl_auth->au_flavor) || data->acregmin != nfss->acregmin / HZ || data->acregmax != nfss->acregmax / HZ || data->acdirmin != nfss->acdirmin / HZ || @@ -2241,7 +2240,6 @@ nfs_remount(struct super_block *sb, int *flags, char *raw_data) data->wsize = nfss->wsize; data->retrans = nfss->client->cl_timeout->to_retries; data->selected_flavor = nfss->client->cl_auth->au_flavor; - data->auth_info = nfss->auth_info; data->acregmin = nfss->acregmin / HZ; data->acregmax = nfss->acregmax / HZ; data->acdirmin = nfss->acdirmin / HZ; @@ -2526,7 +2524,7 @@ int nfs_clone_sb_security(struct super_block *s, struct dentry *mntroot, struct nfs_mount_info *mount_info) { /* clone any lsm security options from the parent to the new sb */ - if (mntroot->d_inode->i_op != NFS_SB(s)->nfs_client->rpc_ops->dir_inode_ops) + if (d_inode(mntroot)->i_op != NFS_SB(s)->nfs_client->rpc_ops->dir_inode_ops) return -ESTALE; return security_sb_clone_mnt_opts(mount_info->cloned->sb, s); } diff --git a/fs/nfs/symlink.c b/fs/nfs/symlink.c index 05c9e02..2d56200 100644 --- a/fs/nfs/symlink.c +++ b/fs/nfs/symlink.c @@ -45,7 +45,7 @@ error: static void *nfs_follow_link(struct dentry *dentry, struct nameidata *nd) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct page *page; void *err; diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c index de54129..fa538b2 100644 --- a/fs/nfs/unlink.c +++ b/fs/nfs/unlink.c @@ -143,7 +143,7 @@ static int nfs_do_call_unlink(struct dentry *parent, struct inode *dir, struct n nfs_free_dname(data); ret = nfs_copy_dname(alias, data); spin_lock(&alias->d_lock); - if (ret == 0 && alias->d_inode != NULL && + if (ret == 0 && d_really_is_positive(alias) && !(alias->d_flags & DCACHE_NFSFS_RENAMED)) { devname_garbage = alias->d_fsdata; alias->d_fsdata = data; @@ -190,7 +190,7 @@ static int nfs_call_unlink(struct dentry *dentry, struct nfs_unlinkdata *data) parent = dget_parent(dentry); if (parent == NULL) goto out_free; - dir = parent->d_inode; + dir = d_inode(parent); /* Non-exclusive lock protects against concurrent lookup() calls */ spin_lock(&dir->i_lock); if (atomic_inc_not_zero(&NFS_I(dir)->silly_count) == 0) { @@ -210,21 +210,21 @@ out_free: void nfs_wait_on_sillyrename(struct dentry *dentry) { - struct nfs_inode *nfsi = NFS_I(dentry->d_inode); + struct nfs_inode *nfsi = NFS_I(d_inode(dentry)); wait_event(nfsi->waitqueue, atomic_read(&nfsi->silly_count) <= 1); } void nfs_block_sillyrename(struct dentry *dentry) { - struct nfs_inode *nfsi = NFS_I(dentry->d_inode); + struct nfs_inode *nfsi = NFS_I(d_inode(dentry)); wait_event(nfsi->waitqueue, atomic_cmpxchg(&nfsi->silly_count, 1, 0) == 1); } void nfs_unblock_sillyrename(struct dentry *dentry) { - struct inode *dir = dentry->d_inode; + struct inode *dir = d_inode(dentry); struct nfs_inode *nfsi = NFS_I(dir); struct nfs_unlinkdata *data; @@ -367,8 +367,8 @@ static void nfs_async_rename_release(void *calldata) struct nfs_renamedata *data = calldata; struct super_block *sb = data->old_dir->i_sb; - if (data->old_dentry->d_inode) - nfs_mark_for_revalidate(data->old_dentry->d_inode); + if (d_really_is_positive(data->old_dentry)) + nfs_mark_for_revalidate(d_inode(data->old_dentry)); dput(data->old_dentry); dput(data->new_dentry); @@ -529,10 +529,10 @@ nfs_sillyrename(struct inode *dir, struct dentry *dentry) if (dentry->d_flags & DCACHE_NFSFS_RENAMED) goto out; - fileid = NFS_FILEID(dentry->d_inode); + fileid = NFS_FILEID(d_inode(dentry)); /* Return delegation in anticipation of the rename */ - NFS_PROTO(dentry->d_inode)->return_delegation(dentry->d_inode); + NFS_PROTO(d_inode(dentry))->return_delegation(d_inode(dentry)); sdentry = NULL; do { @@ -554,7 +554,7 @@ nfs_sillyrename(struct inode *dir, struct dentry *dentry) */ if (IS_ERR(sdentry)) goto out; - } while (sdentry->d_inode != NULL); /* need negative lookup */ + } while (d_inode(sdentry) != NULL); /* need negative lookup */ /* queue unlink first. Can't do this from rpc_release as it * has to allocate memory diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 849ed78..d12a4be 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -580,7 +580,7 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, st int ret; nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); - nfs_inc_stats(inode, NFSIOS_WRITEPAGES); + nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1); nfs_pageio_cond_complete(pgio, page_file_index(page)); ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE); @@ -702,7 +702,7 @@ static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req) */ static void nfs_inode_remove_request(struct nfs_page *req) { - struct inode *inode = req->wb_context->dentry->d_inode; + struct inode *inode = d_inode(req->wb_context->dentry); struct nfs_inode *nfsi = NFS_I(inode); struct nfs_page *head; @@ -861,7 +861,7 @@ static void nfs_clear_request_commit(struct nfs_page *req) { if (test_bit(PG_CLEAN, &req->wb_flags)) { - struct inode *inode = req->wb_context->dentry->d_inode; + struct inode *inode = d_inode(req->wb_context->dentry); struct nfs_commit_info cinfo; nfs_init_cinfo_from_inode(&cinfo, inode); @@ -1591,7 +1591,7 @@ void nfs_init_commit(struct nfs_commit_data *data, struct nfs_commit_info *cinfo) { struct nfs_page *first = nfs_list_entry(head->next); - struct inode *inode = first->wb_context->dentry->d_inode; + struct inode *inode = d_inode(first->wb_context->dentry); /* Set up the RPC argument and reply structs * NB: take care not to mess about with data->commit et al. */ @@ -1690,7 +1690,7 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data) dprintk("NFS: commit (%s/%llu %d@%lld)", req->wb_context->dentry->d_sb->s_id, - (unsigned long long)NFS_FILEID(req->wb_context->dentry->d_inode), + (unsigned long long)NFS_FILEID(d_inode(req->wb_context->dentry)), req->wb_bytes, (long long)req_offset(req)); if (status < 0) { @@ -1840,17 +1840,16 @@ EXPORT_SYMBOL_GPL(nfs_write_inode); */ int nfs_wb_all(struct inode *inode) { - struct writeback_control wbc = { - .sync_mode = WB_SYNC_ALL, - .nr_to_write = LONG_MAX, - .range_start = 0, - .range_end = LLONG_MAX, - }; int ret; trace_nfs_writeback_inode_enter(inode); - ret = sync_inode(inode, &wbc); + ret = filemap_write_and_wait(inode->i_mapping); + if (!ret) { + ret = nfs_commit_inode(inode, FLUSH_SYNC); + if (!ret) + pnfs_sync_inode(inode, true); + } trace_nfs_writeback_inode_exit(inode, ret); return ret; @@ -1876,11 +1875,6 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page) * request from the inode / page_private pointer and * release it */ nfs_inode_remove_request(req); - /* - * In case nfs_inode_remove_request has marked the - * page as being dirty - */ - cancel_dirty_page(page, PAGE_CACHE_SIZE); nfs_unlock_and_release_request(req); } diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig index 683bf71..a0b77fc 100644 --- a/fs/nfsd/Kconfig +++ b/fs/nfsd/Kconfig @@ -6,6 +6,7 @@ config NFSD select SUNRPC select EXPORTFS select NFS_ACL_SUPPORT if NFSD_V2_ACL + depends on MULTIUSER help Choose Y here if you want to allow other computers to access files residing on this system using Sun's Network File System @@ -107,7 +108,7 @@ config NFSD_V4_SECURITY_LABEL config NFSD_FAULT_INJECTION bool "NFS server manual fault injection" - depends on NFSD_V4 && DEBUG_KERNEL + depends on NFSD_V4 && DEBUG_KERNEL && DEBUG_FS help This option enables support for manually injecting faults into the NFS server. This is intended to be used for diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c index cdbc78c..03d647b 100644 --- a/fs/nfsd/blocklayout.c +++ b/fs/nfsd/blocklayout.c @@ -137,7 +137,7 @@ nfsd4_block_proc_layoutget(struct inode *inode, const struct svc_fh *fhp, seg->offset = iomap.offset; seg->length = iomap.length; - dprintk("GET: %lld:%lld %d\n", bex->foff, bex->len, bex->es); + dprintk("GET: 0x%llx:0x%llx %d\n", bex->foff, bex->len, bex->es); return 0; out_error: diff --git a/fs/nfsd/blocklayoutxdr.c b/fs/nfsd/blocklayoutxdr.c index 9da89fd..9aa2796 100644 --- a/fs/nfsd/blocklayoutxdr.c +++ b/fs/nfsd/blocklayoutxdr.c @@ -122,19 +122,19 @@ nfsd4_block_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp, p = xdr_decode_hyper(p, &bex.foff); if (bex.foff & (block_size - 1)) { - dprintk("%s: unaligned offset %lld\n", + dprintk("%s: unaligned offset 0x%llx\n", __func__, bex.foff); goto fail; } p = xdr_decode_hyper(p, &bex.len); if (bex.len & (block_size - 1)) { - dprintk("%s: unaligned length %lld\n", + dprintk("%s: unaligned length 0x%llx\n", __func__, bex.foff); goto fail; } p = xdr_decode_hyper(p, &bex.soff); if (bex.soff & (block_size - 1)) { - dprintk("%s: unaligned disk offset %lld\n", + dprintk("%s: unaligned disk offset 0x%llx\n", __func__, bex.soff); goto fail; } diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c index c3e3b6e..f79521a 100644 --- a/fs/nfsd/export.c +++ b/fs/nfsd/export.c @@ -599,7 +599,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen) goto out4; } - err = check_export(exp.ex_path.dentry->d_inode, &exp.ex_flags, + err = check_export(d_inode(exp.ex_path.dentry), &exp.ex_flags, exp.ex_uuid); if (err) goto out4; @@ -691,8 +691,7 @@ static int svc_export_match(struct cache_head *a, struct cache_head *b) struct svc_export *orig = container_of(a, struct svc_export, h); struct svc_export *new = container_of(b, struct svc_export, h); return orig->ex_client == new->ex_client && - orig->ex_path.dentry == new->ex_path.dentry && - orig->ex_path.mnt == new->ex_path.mnt; + path_equal(&orig->ex_path, &new->ex_path); } static void svc_export_init(struct cache_head *cnew, struct cache_head *citem) @@ -891,7 +890,7 @@ exp_rootfh(struct net *net, struct auth_domain *clp, char *name, printk("nfsd: exp_rootfh path not found %s", name); return err; } - inode = path.dentry->d_inode; + inode = d_inode(path.dentry); dprintk("nfsd: exp_rootfh(%s [%p] %s:%s/%ld)\n", name, path.dentry, clp->name, @@ -1159,6 +1158,7 @@ static struct flags { { NFSEXP_NOSUBTREECHECK, {"no_subtree_check", ""}}, { NFSEXP_NOAUTHNLM, {"insecure_locks", ""}}, { NFSEXP_V4ROOT, {"v4root", ""}}, + { NFSEXP_PNFS, {"pnfs", ""}}, { 0, {"", ""}} }; diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c index ac54ea6..d54701f 100644 --- a/fs/nfsd/nfs2acl.c +++ b/fs/nfsd/nfs2acl.c @@ -42,7 +42,7 @@ static __be32 nfsacld_proc_getacl(struct svc_rqst * rqstp, if (nfserr) RETURN_STATUS(nfserr); - inode = fh->fh_dentry->d_inode; + inode = d_inode(fh->fh_dentry); if (argp->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT)) RETURN_STATUS(nfserr_inval); @@ -103,7 +103,7 @@ static __be32 nfsacld_proc_setacl(struct svc_rqst * rqstp, if (nfserr) goto out; - inode = fh->fh_dentry->d_inode; + inode = d_inode(fh->fh_dentry); if (!IS_POSIXACL(inode) || !inode->i_op->set_acl) { error = -EOPNOTSUPP; goto out_errno; @@ -266,9 +266,9 @@ static int nfsaclsvc_encode_getaclres(struct svc_rqst *rqstp, __be32 *p, * nfsd_dispatch actually ensures the following cannot happen. * However, it seems fragile to depend on that. */ - if (dentry == NULL || dentry->d_inode == NULL) + if (dentry == NULL || d_really_is_negative(dentry)) return 0; - inode = dentry->d_inode; + inode = d_inode(dentry); p = nfs2svc_encode_fattr(rqstp, p, &resp->fh, &resp->stat); *p++ = htonl(resp->mask); diff --git a/fs/nfsd/nfs3acl.c b/fs/nfsd/nfs3acl.c index 34cbbab..882b1a1 100644 --- a/fs/nfsd/nfs3acl.c +++ b/fs/nfsd/nfs3acl.c @@ -39,7 +39,7 @@ static __be32 nfsd3_proc_getacl(struct svc_rqst * rqstp, if (nfserr) RETURN_STATUS(nfserr); - inode = fh->fh_dentry->d_inode; + inode = d_inode(fh->fh_dentry); if (argp->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT)) RETURN_STATUS(nfserr_inval); @@ -94,7 +94,7 @@ static __be32 nfsd3_proc_setacl(struct svc_rqst * rqstp, if (nfserr) goto out; - inode = fh->fh_dentry->d_inode; + inode = d_inode(fh->fh_dentry); if (!IS_POSIXACL(inode) || !inode->i_op->set_acl) { error = -EOPNOTSUPP; goto out_errno; @@ -174,8 +174,8 @@ static int nfs3svc_encode_getaclres(struct svc_rqst *rqstp, __be32 *p, struct dentry *dentry = resp->fh.fh_dentry; p = nfs3svc_encode_post_op_attr(rqstp, p, &resp->fh); - if (resp->status == 0 && dentry && dentry->d_inode) { - struct inode *inode = dentry->d_inode; + if (resp->status == 0 && dentry && d_really_is_positive(dentry)) { + struct inode *inode = d_inode(dentry); struct kvec *head = rqstp->rq_res.head; unsigned int base; int n; diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c index 12f2aab..7b755b7 100644 --- a/fs/nfsd/nfs3proc.c +++ b/fs/nfsd/nfs3proc.c @@ -166,7 +166,7 @@ nfsd3_proc_read(struct svc_rqst *rqstp, struct nfsd3_readargs *argp, rqstp->rq_vec, argp->vlen, &resp->count); if (nfserr == 0) { - struct inode *inode = resp->fh.fh_dentry->d_inode; + struct inode *inode = d_inode(resp->fh.fh_dentry); resp->eof = (argp->offset + resp->count) >= inode->i_size; } @@ -551,7 +551,7 @@ nfsd3_proc_fsinfo(struct svc_rqst * rqstp, struct nfsd_fhandle *argp, * different read/write sizes for file systems known to have * problems with large blocks */ if (nfserr == 0) { - struct super_block *sb = argp->fh.fh_dentry->d_inode->i_sb; + struct super_block *sb = d_inode(argp->fh.fh_dentry)->i_sb; /* Note that we don't care for remote fs's here */ if (sb->s_magic == MSDOS_SUPER_MAGIC) { @@ -587,7 +587,7 @@ nfsd3_proc_pathconf(struct svc_rqst * rqstp, struct nfsd_fhandle *argp, nfserr = fh_verify(rqstp, &argp->fh, 0, NFSD_MAY_NOP); if (nfserr == 0) { - struct super_block *sb = argp->fh.fh_dentry->d_inode->i_sb; + struct super_block *sb = d_inode(argp->fh.fh_dentry)->i_sb; /* Note that we don't care for remote fs's here */ switch (sb->s_magic) { diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c index 39c5eb3..e4b2b43 100644 --- a/fs/nfsd/nfs3xdr.c +++ b/fs/nfsd/nfs3xdr.c @@ -146,7 +146,7 @@ static __be32 *encode_fsid(__be32 *p, struct svc_fh *fhp) default: case FSIDSOURCE_DEV: p = xdr_encode_hyper(p, (u64)huge_encode_dev - (fhp->fh_dentry->d_inode->i_sb->s_dev)); + (d_inode(fhp->fh_dentry)->i_sb->s_dev)); break; case FSIDSOURCE_FSID: p = xdr_encode_hyper(p, (u64) fhp->fh_export->ex_fsid); @@ -203,14 +203,14 @@ static __be32 * encode_post_op_attr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp) { struct dentry *dentry = fhp->fh_dentry; - if (dentry && dentry->d_inode) { + if (dentry && d_really_is_positive(dentry)) { __be32 err; struct kstat stat; err = fh_getattr(fhp, &stat); if (!err) { *p++ = xdr_one; /* attributes follow */ - lease_get_mtime(dentry->d_inode, &stat.mtime); + lease_get_mtime(d_inode(dentry), &stat.mtime); return encode_fattr3(rqstp, p, fhp, &stat); } } @@ -233,7 +233,7 @@ encode_wcc_data(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp) { struct dentry *dentry = fhp->fh_dentry; - if (dentry && dentry->d_inode && fhp->fh_post_saved) { + if (dentry && d_really_is_positive(dentry) && fhp->fh_post_saved) { if (fhp->fh_pre_saved) { *p++ = xdr_one; p = xdr_encode_hyper(p, (u64) fhp->fh_pre_size); @@ -260,11 +260,11 @@ void fill_post_wcc(struct svc_fh *fhp) printk("nfsd: inode locked twice during operation.\n"); err = fh_getattr(fhp, &fhp->fh_post_attr); - fhp->fh_post_change = fhp->fh_dentry->d_inode->i_version; + fhp->fh_post_change = d_inode(fhp->fh_dentry)->i_version; if (err) { fhp->fh_post_saved = 0; /* Grab the ctime anyway - set_change_info might use it */ - fhp->fh_post_attr.ctime = fhp->fh_dentry->d_inode->i_ctime; + fhp->fh_post_attr.ctime = d_inode(fhp->fh_dentry)->i_ctime; } else fhp->fh_post_saved = 1; } @@ -628,7 +628,7 @@ nfs3svc_encode_attrstat(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_attrstat *resp) { if (resp->status == 0) { - lease_get_mtime(resp->fh.fh_dentry->d_inode, + lease_get_mtime(d_inode(resp->fh.fh_dentry), &resp->stat.mtime); p = encode_fattr3(rqstp, p, &resp->fh, &resp->stat); } @@ -828,7 +828,7 @@ compose_entry_fh(struct nfsd3_readdirres *cd, struct svc_fh *fhp, return rv; if (d_mountpoint(dchild)) goto out; - if (!dchild->d_inode) + if (d_really_is_negative(dchild)) goto out; rv = fh_compose(fhp, exp, dchild, &cd->fh); out: diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c index 59fd766..67242bf 100644 --- a/fs/nfsd/nfs4acl.c +++ b/fs/nfsd/nfs4acl.c @@ -139,7 +139,7 @@ int nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry, struct nfs4_acl **acl) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); int error = 0; struct posix_acl *pacl = NULL, *dpacl = NULL; unsigned int flags = 0; @@ -499,43 +499,13 @@ static inline void add_to_mask(struct posix_acl_state *state, struct posix_ace_s state->mask.allow |= astate->allow; } -/* - * Certain bits (SYNCHRONIZE, DELETE, WRITE_OWNER, READ/WRITE_NAMED_ATTRS, - * READ_ATTRIBUTES, READ_ACL) are currently unenforceable and don't translate - * to traditional read/write/execute permissions. - * - * It's problematic to reject acls that use certain mode bits, because it - * places the burden on users to learn the rules about which bits one - * particular server sets, without giving the user a lot of help--we return an - * error that could mean any number of different things. To make matters - * worse, the problematic bits might be introduced by some application that's - * automatically mapping from some other acl model. - * - * So wherever possible we accept anything, possibly erring on the side of - * denying more permissions than necessary. - * - * However we do reject *explicit* DENY's of a few bits representing - * permissions we could never deny: - */ - -static inline int check_deny(u32 mask, int isowner) -{ - if (mask & (NFS4_ACE_READ_ATTRIBUTES | NFS4_ACE_READ_ACL)) - return -EINVAL; - if (!isowner) - return 0; - if (mask & (NFS4_ACE_WRITE_ATTRIBUTES | NFS4_ACE_WRITE_ACL)) - return -EINVAL; - return 0; -} - static struct posix_acl * posix_state_to_acl(struct posix_acl_state *state, unsigned int flags) { struct posix_acl_entry *pace; struct posix_acl *pacl; int nace; - int i, error = 0; + int i; /* * ACLs with no ACEs are treated differently in the inheritable @@ -560,17 +530,11 @@ posix_state_to_acl(struct posix_acl_state *state, unsigned int flags) pace = pacl->a_entries; pace->e_tag = ACL_USER_OBJ; - error = check_deny(state->owner.deny, 1); - if (error) - goto out_err; low_mode_from_nfs4(state->owner.allow, &pace->e_perm, flags); for (i=0; i < state->users->n; i++) { pace++; pace->e_tag = ACL_USER; - error = check_deny(state->users->aces[i].perms.deny, 0); - if (error) - goto out_err; low_mode_from_nfs4(state->users->aces[i].perms.allow, &pace->e_perm, flags); pace->e_uid = state->users->aces[i].uid; @@ -579,18 +543,12 @@ posix_state_to_acl(struct posix_acl_state *state, unsigned int flags) pace++; pace->e_tag = ACL_GROUP_OBJ; - error = check_deny(state->group.deny, 0); - if (error) - goto out_err; low_mode_from_nfs4(state->group.allow, &pace->e_perm, flags); add_to_mask(state, &state->group); for (i=0; i < state->groups->n; i++) { pace++; pace->e_tag = ACL_GROUP; - error = check_deny(state->groups->aces[i].perms.deny, 0); - if (error) - goto out_err; low_mode_from_nfs4(state->groups->aces[i].perms.allow, &pace->e_perm, flags); pace->e_gid = state->groups->aces[i].gid; @@ -605,15 +563,9 @@ posix_state_to_acl(struct posix_acl_state *state, unsigned int flags) pace++; pace->e_tag = ACL_OTHER; - error = check_deny(state->other.deny, 0); - if (error) - goto out_err; low_mode_from_nfs4(state->other.allow, &pace->e_perm, flags); return pacl; -out_err: - posix_acl_release(pacl); - return ERR_PTR(error); } static inline void allow_bits(struct posix_ace_state *astate, u32 mask) @@ -828,7 +780,7 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp, return error; dentry = fhp->fh_dentry; - inode = dentry->d_inode; + inode = d_inode(dentry); if (!inode->i_op->set_acl || !IS_POSIXACL(inode)) return nfserr_attrnotsupp; diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c index 1028a06..6904213 100644 --- a/fs/nfsd/nfs4layouts.c +++ b/fs/nfsd/nfs4layouts.c @@ -118,7 +118,7 @@ void nfsd4_setup_layout_type(struct svc_export *exp) { struct super_block *sb = exp->ex_path.mnt->mnt_sb; - if (exp->ex_flags & NFSEXP_NOPNFS) + if (!(exp->ex_flags & NFSEXP_PNFS)) return; if (sb->s_export_op->get_uuid && @@ -440,15 +440,14 @@ nfsd4_return_file_layout(struct nfs4_layout *lp, struct nfsd4_layout_seg *seg, list_move_tail(&lp->lo_perstate, reaplist); return; } - end = seg->offset; + lo->offset = layout_end(seg); } else { /* retain the whole layout segment on a split. */ if (layout_end(seg) < end) { dprintk("%s: split not supported\n", __func__); return; } - - lo->offset = layout_end(seg); + end = seg->offset; } layout_update_len(lo, end); @@ -513,6 +512,9 @@ nfsd4_return_client_layouts(struct svc_rqst *rqstp, spin_lock(&clp->cl_lock); list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt) { + if (ls->ls_layout_type != lrp->lr_layout_type) + continue; + if (lrp->lr_return_type == RETURN_FSID && !fh_fsid_match(&ls->ls_stid.sc_file->fi_fhandle, &cstate->current_fh.fh_handle)) @@ -587,6 +589,8 @@ nfsd4_cb_layout_fail(struct nfs4_layout_stateid *ls) rpc_ntop((struct sockaddr *)&clp->cl_addr, addr_str, sizeof(addr_str)); + trace_layout_recall_fail(&ls->ls_stid.sc_stateid); + printk(KERN_WARNING "nfsd: client %s failed to respond to layout recall. " " Fencing..\n", addr_str); diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index d30bea8d..864e200 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -52,7 +52,7 @@ static inline void nfsd4_security_inode_setsecctx(struct svc_fh *resfh, struct xdr_netobj *label, u32 *bmval) { - struct inode *inode = resfh->fh_dentry->d_inode; + struct inode *inode = d_inode(resfh->fh_dentry); int status; mutex_lock(&inode->i_mutex); @@ -110,7 +110,7 @@ check_attr_support(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, * in current environment or not. */ if (bmval[0] & FATTR4_WORD0_ACL) { - if (!IS_POSIXACL(dentry->d_inode)) + if (!IS_POSIXACL(d_inode(dentry))) return nfserr_attrnotsupp; } @@ -209,7 +209,7 @@ do_open_permission(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfs static __be32 nfsd_check_obj_isreg(struct svc_fh *fh) { - umode_t mode = fh->fh_dentry->d_inode->i_mode; + umode_t mode = d_inode(fh->fh_dentry)->i_mode; if (S_ISREG(mode)) return nfs_ok; @@ -470,7 +470,7 @@ out: fh_put(resfh); kfree(resfh); } - nfsd4_cleanup_open_state(cstate, open, status); + nfsd4_cleanup_open_state(cstate, open); nfsd4_bump_seqid(cstate, status); return status; } @@ -881,7 +881,7 @@ nfsd4_secinfo(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, &exp, &dentry); if (err) return err; - if (dentry->d_inode == NULL) { + if (d_really_is_negative(dentry)) { exp_put(exp); err = nfserr_noent; } else @@ -1030,6 +1030,8 @@ nfsd4_fallocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, dprintk("NFSD: nfsd4_fallocate: couldn't process stateid!\n"); return status; } + if (!file) + return nfserr_bad_stateid; status = nfsd4_vfs_fallocate(rqstp, &cstate->current_fh, file, fallocate->falloc_offset, @@ -1069,6 +1071,8 @@ nfsd4_seek(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, dprintk("NFSD: nfsd4_seek: couldn't process stateid!\n"); return status; } + if (!file) + return nfserr_bad_stateid; switch (seek->seek_whence) { case NFS4_CONTENT_DATA: @@ -1237,8 +1241,8 @@ nfsd4_getdeviceinfo(struct svc_rqst *rqstp, nfserr = ops->proc_getdeviceinfo(exp->ex_path.mnt->mnt_sb, gdp); gdp->gd_notify_types &= ops->notify_types; - exp_put(exp); out: + exp_put(exp); return nfserr; } @@ -1308,7 +1312,7 @@ nfsd4_layoutget(struct svc_rqst *rqstp, if (atomic_read(&ls->ls_stid.sc_file->fi_lo_recalls)) goto out_put_stid; - nfserr = ops->proc_layoutget(current_fh->fh_dentry->d_inode, + nfserr = ops->proc_layoutget(d_inode(current_fh->fh_dentry), current_fh, lgp); if (nfserr) goto out_put_stid; @@ -1342,7 +1346,7 @@ nfsd4_layoutcommit(struct svc_rqst *rqstp, ops = nfsd4_layout_verify(current_fh->fh_export, lcp->lc_layout_type); if (!ops) goto out; - inode = current_fh->fh_dentry->d_inode; + inode = d_inode(current_fh->fh_dentry); nfserr = nfserr_inval; if (new_size <= seg->offset) { @@ -1815,7 +1819,7 @@ static inline u32 nfsd4_getattr_rsize(struct svc_rqst *rqstp, bmap0 &= ~FATTR4_WORD0_FILEHANDLE; } if (bmap2 & FATTR4_WORD2_SECURITY_LABEL) { - ret += NFSD4_MAX_SEC_LABEL_LEN + 12; + ret += NFS4_MAXLABELLEN + 12; bmap2 &= ~FATTR4_WORD2_SECURITY_LABEL; } /* @@ -2282,13 +2286,13 @@ static struct nfsd4_operation nfsd4_ops[] = { .op_func = (nfsd4op_func)nfsd4_allocate, .op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME, .op_name = "OP_ALLOCATE", - .op_rsize_bop = (nfsd4op_rsize)nfsd4_write_rsize, + .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize, }, [OP_DEALLOCATE] = { .op_func = (nfsd4op_func)nfsd4_deallocate, .op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME, .op_name = "OP_DEALLOCATE", - .op_rsize_bop = (nfsd4op_rsize)nfsd4_write_rsize, + .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize, }, [OP_SEEK] = { .op_func = (nfsd4op_func)nfsd4_seek, diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c index 1c307f0..d88ea7b 100644 --- a/fs/nfsd/nfs4recover.c +++ b/fs/nfsd/nfs4recover.c @@ -192,14 +192,14 @@ nfsd4_create_clid_dir(struct nfs4_client *clp) dir = nn->rec_file->f_path.dentry; /* lock the parent */ - mutex_lock(&dir->d_inode->i_mutex); + mutex_lock(&d_inode(dir)->i_mutex); dentry = lookup_one_len(dname, dir, HEXDIR_LEN-1); if (IS_ERR(dentry)) { status = PTR_ERR(dentry); goto out_unlock; } - if (dentry->d_inode) + if (d_really_is_positive(dentry)) /* * In the 4.1 case, where we're called from * reclaim_complete(), records from the previous reboot @@ -209,11 +209,11 @@ nfsd4_create_clid_dir(struct nfs4_client *clp) * as well be forgiving and just succeed silently. */ goto out_put; - status = vfs_mkdir(dir->d_inode, dentry, S_IRWXU); + status = vfs_mkdir(d_inode(dir), dentry, S_IRWXU); out_put: dput(dentry); out_unlock: - mutex_unlock(&dir->d_inode->i_mutex); + mutex_unlock(&d_inode(dir)->i_mutex); if (status == 0) { if (nn->in_grace) { crp = nfs4_client_to_reclaim(dname, nn); @@ -285,7 +285,7 @@ nfsd4_list_rec_dir(recdir_func *f, struct nfsd_net *nn) } status = iterate_dir(nn->rec_file, &ctx.ctx); - mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT); + mutex_lock_nested(&d_inode(dir)->i_mutex, I_MUTEX_PARENT); while (!list_empty(&ctx.names)) { struct name_list *entry; entry = list_entry(ctx.names.next, struct name_list, list); @@ -302,7 +302,7 @@ nfsd4_list_rec_dir(recdir_func *f, struct nfsd_net *nn) list_del(&entry->list); kfree(entry); } - mutex_unlock(&dir->d_inode->i_mutex); + mutex_unlock(&d_inode(dir)->i_mutex); nfs4_reset_creds(original_cred); return status; } @@ -316,20 +316,20 @@ nfsd4_unlink_clid_dir(char *name, int namlen, struct nfsd_net *nn) dprintk("NFSD: nfsd4_unlink_clid_dir. name %.*s\n", namlen, name); dir = nn->rec_file->f_path.dentry; - mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT); + mutex_lock_nested(&d_inode(dir)->i_mutex, I_MUTEX_PARENT); dentry = lookup_one_len(name, dir, namlen); if (IS_ERR(dentry)) { status = PTR_ERR(dentry); goto out_unlock; } status = -ENOENT; - if (!dentry->d_inode) + if (d_really_is_negative(dentry)) goto out; - status = vfs_rmdir(dir->d_inode, dentry); + status = vfs_rmdir(d_inode(dir), dentry); out: dput(dentry); out_unlock: - mutex_unlock(&dir->d_inode->i_mutex); + mutex_unlock(&d_inode(dir)->i_mutex); return status; } @@ -385,7 +385,7 @@ purge_old(struct dentry *parent, struct dentry *child, struct nfsd_net *nn) if (nfs4_has_reclaimed_state(child->d_name.name, nn)) return 0; - status = vfs_rmdir(parent->d_inode, child); + status = vfs_rmdir(d_inode(parent), child); if (status) printk("failed to remove client recovery directory %pd\n", child); diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index d2f2c37..38f2d7a 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -1139,7 +1139,7 @@ hash_sessionid(struct nfs4_sessionid *sessionid) return sid->sequence % SESSION_HASH_SIZE; } -#ifdef NFSD_DEBUG +#ifdef CONFIG_SUNRPC_DEBUG static inline void dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid) { @@ -3221,7 +3221,7 @@ alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open, } else nfs4_free_openowner(&oo->oo_owner); spin_unlock(&clp->cl_lock); - return oo; + return ret; } static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) { @@ -4049,7 +4049,6 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf status = nfserr_bad_stateid; if (nfsd4_is_deleg_cur(open)) goto out; - status = nfserr_jukebox; } /* @@ -4118,7 +4117,7 @@ out: } void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate, - struct nfsd4_open *open, __be32 status) + struct nfsd4_open *open) { if (open->op_openowner) { struct nfs4_stateowner *so = &open->op_openowner->oo_owner; @@ -4473,7 +4472,7 @@ nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate, struct nfs4_ol_stateid *stp = NULL; struct nfs4_delegation *dp = NULL; struct svc_fh *current_fh = &cstate->current_fh; - struct inode *ino = current_fh->fh_dentry->d_inode; + struct inode *ino = d_inode(current_fh->fh_dentry); struct nfsd_net *nn = net_generic(net, nfsd_net_id); struct file *file = NULL; __be32 status; @@ -4932,20 +4931,22 @@ nfs4_transform_lock_offset(struct file_lock *lock) lock->fl_end = OFFSET_MAX; } -static void nfsd4_fl_get_owner(struct file_lock *dst, struct file_lock *src) +static fl_owner_t +nfsd4_fl_get_owner(fl_owner_t owner) { - struct nfs4_lockowner *lo = (struct nfs4_lockowner *)src->fl_owner; - dst->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lo->lo_owner)); + struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner; + + nfs4_get_stateowner(&lo->lo_owner); + return owner; } -static void nfsd4_fl_put_owner(struct file_lock *fl) +static void +nfsd4_fl_put_owner(fl_owner_t owner) { - struct nfs4_lockowner *lo = (struct nfs4_lockowner *)fl->fl_owner; + struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner; - if (lo) { + if (lo) nfs4_put_stateowner(&lo->lo_owner); - fl->fl_owner = NULL; - } } static const struct lock_manager_operations nfsd_posix_mng_ops = { @@ -5062,7 +5063,7 @@ alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, } else nfs4_free_lockowner(&lo->lo_owner); spin_unlock(&clp->cl_lock); - return lo; + return ret; } static void @@ -5169,7 +5170,7 @@ lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, struct nfs4_file *fi = ost->st_stid.sc_file; struct nfs4_openowner *oo = openowner(ost->st_stateowner); struct nfs4_client *cl = oo->oo_owner.so_client; - struct inode *inode = cstate->current_fh.fh_dentry->d_inode; + struct inode *inode = d_inode(cstate->current_fh.fh_dentry); struct nfs4_lockowner *lo; unsigned int strhashval; diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index df5e66c..158badf 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -424,7 +424,7 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, len += 4; dummy32 = be32_to_cpup(p++); READ_BUF(dummy32); - if (dummy32 > NFSD4_MAX_SEC_LABEL_LEN) + if (dummy32 > NFS4_MAXLABELLEN) return nfserr_badlabel; len += (XDR_QUADLEN(dummy32) << 2); READMEM(buf, dummy32); @@ -1562,7 +1562,11 @@ nfsd4_decode_layoutget(struct nfsd4_compoundargs *argp, p = xdr_decode_hyper(p, &lgp->lg_seg.offset); p = xdr_decode_hyper(p, &lgp->lg_seg.length); p = xdr_decode_hyper(p, &lgp->lg_minlength); - nfsd4_decode_stateid(argp, &lgp->lg_sid); + + status = nfsd4_decode_stateid(argp, &lgp->lg_sid); + if (status) + return status; + READ_BUF(4); lgp->lg_maxcount = be32_to_cpup(p++); @@ -1580,7 +1584,11 @@ nfsd4_decode_layoutcommit(struct nfsd4_compoundargs *argp, p = xdr_decode_hyper(p, &lcp->lc_seg.offset); p = xdr_decode_hyper(p, &lcp->lc_seg.length); lcp->lc_reclaim = be32_to_cpup(p++); - nfsd4_decode_stateid(argp, &lcp->lc_sid); + + status = nfsd4_decode_stateid(argp, &lcp->lc_sid); + if (status) + return status; + READ_BUF(4); lcp->lc_newoffset = be32_to_cpup(p++); if (lcp->lc_newoffset) { @@ -1628,7 +1636,11 @@ nfsd4_decode_layoutreturn(struct nfsd4_compoundargs *argp, READ_BUF(16); p = xdr_decode_hyper(p, &lrp->lr_seg.offset); p = xdr_decode_hyper(p, &lrp->lr_seg.length); - nfsd4_decode_stateid(argp, &lrp->lr_sid); + + status = nfsd4_decode_stateid(argp, &lrp->lr_sid); + if (status) + return status; + READ_BUF(4); lrp->lrf_body_len = be32_to_cpup(p++); if (lrp->lrf_body_len > 0) { @@ -2008,7 +2020,7 @@ static __be32 nfsd4_encode_path(struct xdr_stream *xdr, * dentries/path components in an array. */ for (;;) { - if (cur.dentry == root->dentry && cur.mnt == root->mnt) + if (path_equal(&cur, root)) break; if (cur.dentry == cur.mnt->mnt_root) { if (follow_up(&cur)) @@ -2280,7 +2292,7 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp, #ifdef CONFIG_NFSD_V4_SECURITY_LABEL if ((bmval[2] & FATTR4_WORD2_SECURITY_LABEL) || bmval[0] & FATTR4_WORD0_SUPPORTED_ATTRS) { - err = security_inode_getsecctx(dentry->d_inode, + err = security_inode_getsecctx(d_inode(dentry), &context, &contextlen); contextsupport = (err == 0); if (bmval2 & FATTR4_WORD2_SECURITY_LABEL) { @@ -2372,7 +2384,7 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp, p = xdr_reserve_space(xdr, 8); if (!p) goto out_resource; - p = encode_change(p, &stat, dentry->d_inode); + p = encode_change(p, &stat, d_inode(dentry)); } if (bmval0 & FATTR4_WORD0_SIZE) { p = xdr_reserve_space(xdr, 8); @@ -2795,7 +2807,7 @@ nfsd4_encode_dirent_fattr(struct xdr_stream *xdr, struct nfsd4_readdir *cd, dentry = lookup_one_len(name, cd->rd_fhp->fh_dentry, namlen); if (IS_ERR(dentry)) return nfserrno(PTR_ERR(dentry)); - if (!dentry->d_inode) { + if (d_really_is_negative(dentry)) { /* * nfsd_buffered_readdir drops the i_mutex between * readdir and calling this callback, leaving a window @@ -3312,7 +3324,7 @@ static __be32 nfsd4_encode_splice_read( } eof = (read->rd_offset + maxcount >= - read->rd_fhp->fh_dentry->d_inode->i_size); + d_inode(read->rd_fhp->fh_dentry)->i_size); *(p++) = htonl(eof); *(p++) = htonl(maxcount); @@ -3389,7 +3401,7 @@ static __be32 nfsd4_encode_readv(struct nfsd4_compoundres *resp, xdr_truncate_encode(xdr, starting_len + 8 + ((maxcount+3)&~3)); eof = (read->rd_offset + maxcount >= - read->rd_fhp->fh_dentry->d_inode->i_size); + d_inode(read->rd_fhp->fh_dentry)->i_size); tmp = htonl(eof); write_bytes_to_xdr_buf(xdr->buf, starting_len , &tmp, 4); @@ -3410,6 +3422,7 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr, unsigned long maxcount; struct xdr_stream *xdr = &resp->xdr; struct file *file = read->rd_filp; + struct svc_fh *fhp = read->rd_fhp; int starting_len = xdr->buf->len; struct raparms *ra; __be32 *p; @@ -3433,12 +3446,15 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr, maxcount = min_t(unsigned long, maxcount, (xdr->buf->buflen - xdr->buf->len)); maxcount = min_t(unsigned long, maxcount, read->rd_length); - if (!read->rd_filp) { + if (read->rd_filp) + err = nfsd_permission(resp->rqstp, fhp->fh_export, + fhp->fh_dentry, + NFSD_MAY_READ|NFSD_MAY_OWNER_OVERRIDE); + else err = nfsd_get_tmp_read_open(resp->rqstp, read->rd_fhp, &file, &ra); - if (err) - goto err_truncate; - } + if (err) + goto err_truncate; if (file->f_op->splice_read && test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags)) err = nfsd4_encode_splice_read(resp, read, file, maxcount); @@ -4123,7 +4139,7 @@ nfsd4_encode_layoutreturn(struct nfsd4_compoundres *resp, __be32 nfserr, return nfserr_resource; *p++ = cpu_to_be32(lrp->lrs_present); if (lrp->lrs_present) - nfsd4_encode_stateid(xdr, &lrp->lr_sid); + return nfsd4_encode_stateid(xdr, &lrp->lr_sid); return nfs_ok; } #endif /* CONFIG_NFSD_PNFS */ diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c index 83a9694..46ec934 100644 --- a/fs/nfsd/nfscache.c +++ b/fs/nfsd/nfscache.c @@ -165,13 +165,17 @@ int nfsd_reply_cache_init(void) { unsigned int hashsize; unsigned int i; + int status = 0; max_drc_entries = nfsd_cache_size_limit(); atomic_set(&num_drc_entries, 0); hashsize = nfsd_hashsize(max_drc_entries); maskbits = ilog2(hashsize); - register_shrinker(&nfsd_reply_cache_shrinker); + status = register_shrinker(&nfsd_reply_cache_shrinker); + if (status) + return status; + drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep), 0, 0, NULL); if (!drc_slab) diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index aa47d75..9690cb4 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -1250,15 +1250,15 @@ static int __init init_nfsd(void) int retval; printk(KERN_INFO "Installing knfsd (copyright (C) 1996 okir@monad.swb.de).\n"); - retval = register_cld_notifier(); - if (retval) - return retval; retval = register_pernet_subsys(&nfsd_net_ops); if (retval < 0) - goto out_unregister_notifier; - retval = nfsd4_init_slabs(); + return retval; + retval = register_cld_notifier(); if (retval) goto out_unregister_pernet; + retval = nfsd4_init_slabs(); + if (retval) + goto out_unregister_notifier; retval = nfsd4_init_pnfs(); if (retval) goto out_free_slabs; @@ -1290,10 +1290,10 @@ out_exit_pnfs: nfsd4_exit_pnfs(); out_free_slabs: nfsd4_free_slabs(); -out_unregister_pernet: - unregister_pernet_subsys(&nfsd_net_ops); out_unregister_notifier: unregister_cld_notifier(); +out_unregister_pernet: + unregister_pernet_subsys(&nfsd_net_ops); return retval; } @@ -1308,8 +1308,8 @@ static void __exit exit_nfsd(void) nfsd4_exit_pnfs(); nfsd_fault_inject_cleanup(); unregister_filesystem(&nfsd_fs_type); - unregister_pernet_subsys(&nfsd_net_ops); unregister_cld_notifier(); + unregister_pernet_subsys(&nfsd_net_ops); } MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>"); diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h index 565c4da..cf98052 100644 --- a/fs/nfsd/nfsd.h +++ b/fs/nfsd/nfsd.h @@ -24,7 +24,7 @@ #include "export.h" #undef ifdebug -#ifdef NFSD_DEBUG +#ifdef CONFIG_SUNRPC_DEBUG # define ifdebug(flag) if (nfsd_debug & NFSDDBG_##flag) #else # define ifdebug(flag) if (0) diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c index e9fa966..350041a 100644 --- a/fs/nfsd/nfsfh.c +++ b/fs/nfsd/nfsfh.c @@ -38,7 +38,7 @@ static int nfsd_acceptable(void *expv, struct dentry *dentry) /* make sure parents give x permission to user */ int err; parent = dget_parent(tdentry); - err = inode_permission(parent->d_inode, MAY_EXEC); + err = inode_permission(d_inode(parent), MAY_EXEC); if (err < 0) { dput(parent); break; @@ -340,7 +340,7 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access) if (error) goto out; - error = nfsd_mode_check(rqstp, dentry->d_inode->i_mode, type); + error = nfsd_mode_check(rqstp, d_inode(dentry)->i_mode, type); if (error) goto out; @@ -412,8 +412,8 @@ static inline void _fh_update_old(struct dentry *dentry, struct svc_export *exp, struct knfsd_fh *fh) { - fh->ofh_ino = ino_t_to_u32(dentry->d_inode->i_ino); - fh->ofh_generation = dentry->d_inode->i_generation; + fh->ofh_ino = ino_t_to_u32(d_inode(dentry)->i_ino); + fh->ofh_generation = d_inode(dentry)->i_generation; if (d_is_dir(dentry) || (exp->ex_flags & NFSEXP_NOSUBTREECHECK)) fh->ofh_dirino = 0; @@ -426,7 +426,7 @@ static bool is_root_export(struct svc_export *exp) static struct super_block *exp_sb(struct svc_export *exp) { - return exp->ex_path.dentry->d_inode->i_sb; + return d_inode(exp->ex_path.dentry)->i_sb; } static bool fsid_type_ok_for_exp(u8 fsid_type, struct svc_export *exp) @@ -520,12 +520,12 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry, * */ - struct inode * inode = dentry->d_inode; + struct inode * inode = d_inode(dentry); dev_t ex_dev = exp_sb(exp)->s_dev; dprintk("nfsd: fh_compose(exp %02x:%02x/%ld %pd2, ino=%ld)\n", MAJOR(ex_dev), MINOR(ex_dev), - (long) exp->ex_path.dentry->d_inode->i_ino, + (long) d_inode(exp->ex_path.dentry)->i_ino, dentry, (inode ? inode->i_ino : 0)); @@ -558,7 +558,7 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry, fhp->fh_handle.ofh_dev = old_encode_dev(ex_dev); fhp->fh_handle.ofh_xdev = fhp->fh_handle.ofh_dev; fhp->fh_handle.ofh_xino = - ino_t_to_u32(exp->ex_path.dentry->d_inode->i_ino); + ino_t_to_u32(d_inode(exp->ex_path.dentry)->i_ino); fhp->fh_handle.ofh_dirino = ino_t_to_u32(parent_ino(dentry)); if (inode) _fh_update_old(dentry, exp, &fhp->fh_handle); @@ -570,7 +570,7 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry, mk_fsid(fhp->fh_handle.fh_fsid_type, fhp->fh_handle.fh_fsid, ex_dev, - exp->ex_path.dentry->d_inode->i_ino, + d_inode(exp->ex_path.dentry)->i_ino, exp->ex_fsid, exp->ex_uuid); if (inode) @@ -597,7 +597,7 @@ fh_update(struct svc_fh *fhp) goto out_bad; dentry = fhp->fh_dentry; - if (!dentry->d_inode) + if (d_really_is_negative(dentry)) goto out_negative; if (fhp->fh_handle.fh_version != 1) { _fh_update_old(dentry, fhp->fh_export, &fhp->fh_handle); diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h index f229204..1e90dad 100644 --- a/fs/nfsd/nfsfh.h +++ b/fs/nfsd/nfsfh.h @@ -225,7 +225,7 @@ fill_pre_wcc(struct svc_fh *fhp) { struct inode *inode; - inode = fhp->fh_dentry->d_inode; + inode = d_inode(fhp->fh_dentry); if (!fhp->fh_pre_saved) { fhp->fh_pre_mtime = inode->i_mtime; fhp->fh_pre_ctime = inode->i_ctime; @@ -264,7 +264,7 @@ fh_lock_nested(struct svc_fh *fhp, unsigned int subclass) return; } - inode = dentry->d_inode; + inode = d_inode(dentry); mutex_lock_nested(&inode->i_mutex, subclass); fill_pre_wcc(fhp); fhp->fh_locked = 1; @@ -284,7 +284,7 @@ fh_unlock(struct svc_fh *fhp) { if (fhp->fh_locked) { fill_post_wcc(fhp); - mutex_unlock(&fhp->fh_dentry->d_inode->i_mutex); + mutex_unlock(&d_inode(fhp->fh_dentry)->i_mutex); fhp->fh_locked = 0; } } diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c index b868073..aecbcd3 100644 --- a/fs/nfsd/nfsproc.c +++ b/fs/nfsd/nfsproc.c @@ -223,7 +223,7 @@ nfsd_proc_create(struct svc_rqst *rqstp, struct nfsd_createargs *argp, } fh_init(newfhp, NFS_FHSIZE); nfserr = fh_compose(newfhp, dirfhp->fh_export, dchild, dirfhp); - if (!nfserr && !dchild->d_inode) + if (!nfserr && d_really_is_negative(dchild)) nfserr = nfserr_noent; dput(dchild); if (nfserr) { @@ -241,7 +241,7 @@ nfsd_proc_create(struct svc_rqst *rqstp, struct nfsd_createargs *argp, } } - inode = newfhp->fh_dentry->d_inode; + inode = d_inode(newfhp->fh_dentry); /* Unfudge the mode bits */ if (attr->ia_valid & ATTR_MODE) { diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c index 412d706..79d964a 100644 --- a/fs/nfsd/nfsxdr.c +++ b/fs/nfsd/nfsxdr.c @@ -187,7 +187,7 @@ encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp, *p++ = htonl((u32) stat->ino); *p++ = htonl((u32) stat->atime.tv_sec); *p++ = htonl(stat->atime.tv_nsec ? stat->atime.tv_nsec / 1000 : 0); - lease_get_mtime(dentry->d_inode, &time); + lease_get_mtime(d_inode(dentry), &time); *p++ = htonl((u32) time.tv_sec); *p++ = htonl(time.tv_nsec ? time.tv_nsec / 1000 : 0); *p++ = htonl((u32) stat->ctime.tv_sec); diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 3685265..84d770b 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -174,7 +174,7 @@ int nfsd_mountpoint(struct dentry *dentry, struct svc_export *exp) return 1; if (!(exp->ex_flags & NFSEXP_V4ROOT)) return 0; - return dentry->d_inode != NULL; + return d_inode(dentry) != NULL; } __be32 @@ -270,7 +270,7 @@ nfsd_lookup(struct svc_rqst *rqstp, struct svc_fh *fhp, const char *name, * dentry may be negative, it may need to be updated. */ err = fh_compose(resfh, exp, dentry, fhp); - if (!err && !dentry->d_inode) + if (!err && d_really_is_negative(dentry)) err = nfserr_noent; out: dput(dentry); @@ -284,7 +284,7 @@ out: static int commit_metadata(struct svc_fh *fhp) { - struct inode *inode = fhp->fh_dentry->d_inode; + struct inode *inode = d_inode(fhp->fh_dentry); const struct export_operations *export_ops = inode->i_sb->s_export_op; if (!EX_ISSYNC(fhp->fh_export)) @@ -364,7 +364,7 @@ static __be32 nfsd_get_write_access(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap) { - struct inode *inode = fhp->fh_dentry->d_inode; + struct inode *inode = d_inode(fhp->fh_dentry); int host_err; if (iap->ia_size < inode->i_size) { @@ -426,7 +426,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap, } dentry = fhp->fh_dentry; - inode = dentry->d_inode; + inode = d_inode(dentry); /* Ignore any mode updates on symlinks */ if (S_ISLNK(inode->i_mode)) @@ -495,7 +495,7 @@ out: */ int nfsd4_is_junction(struct dentry *dentry) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); if (inode == NULL) return 0; @@ -521,9 +521,9 @@ __be32 nfsd4_set_nfs4_label(struct svc_rqst *rqstp, struct svc_fh *fhp, dentry = fhp->fh_dentry; - mutex_lock(&dentry->d_inode->i_mutex); + mutex_lock(&d_inode(dentry)->i_mutex); host_error = security_inode_setsecctx(dentry, label->data, label->len); - mutex_unlock(&dentry->d_inode->i_mutex); + mutex_unlock(&d_inode(dentry)->i_mutex); return nfserrno(host_error); } #else @@ -706,7 +706,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, path.mnt = fhp->fh_export->ex_path.mnt; path.dentry = fhp->fh_dentry; - inode = path.dentry->d_inode; + inode = d_inode(path.dentry); /* Disallow write access to files with the append-only bit set * or any access when mandatory locking enabled @@ -1211,7 +1211,7 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, goto out; dentry = fhp->fh_dentry; - dirp = dentry->d_inode; + dirp = d_inode(dentry); err = nfserr_notdir; if (!dirp->i_op->lookup) @@ -1250,7 +1250,7 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, * Make sure the child dentry is still negative ... */ err = nfserr_exist; - if (dchild->d_inode) { + if (d_really_is_positive(dchild)) { dprintk("nfsd_create: dentry %pd/%pd not negative!\n", dentry, dchild); goto out; @@ -1353,7 +1353,7 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, goto out; dentry = fhp->fh_dentry; - dirp = dentry->d_inode; + dirp = d_inode(dentry); /* Get all the sanity checks out of the way before * we lock the parent. */ @@ -1376,7 +1376,7 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, goto out_nfserr; /* If file doesn't exist, check for permissions to create one */ - if (!dchild->d_inode) { + if (d_really_is_negative(dchild)) { err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE); if (err) goto out; @@ -1397,7 +1397,7 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, v_atime = verifier[1]&0x7fffffff; } - if (dchild->d_inode) { + if (d_really_is_positive(dchild)) { err = 0; switch (createmode) { @@ -1420,17 +1420,17 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, } break; case NFS3_CREATE_EXCLUSIVE: - if ( dchild->d_inode->i_mtime.tv_sec == v_mtime - && dchild->d_inode->i_atime.tv_sec == v_atime - && dchild->d_inode->i_size == 0 ) { + if ( d_inode(dchild)->i_mtime.tv_sec == v_mtime + && d_inode(dchild)->i_atime.tv_sec == v_atime + && d_inode(dchild)->i_size == 0 ) { if (created) *created = 1; break; } case NFS4_CREATE_EXCLUSIVE4_1: - if ( dchild->d_inode->i_mtime.tv_sec == v_mtime - && dchild->d_inode->i_atime.tv_sec == v_atime - && dchild->d_inode->i_size == 0 ) { + if ( d_inode(dchild)->i_mtime.tv_sec == v_mtime + && d_inode(dchild)->i_atime.tv_sec == v_atime + && d_inode(dchild)->i_size == 0 ) { if (created) *created = 1; goto set_attr; @@ -1513,7 +1513,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp) path.mnt = fhp->fh_export->ex_path.mnt; path.dentry = fhp->fh_dentry; - inode = path.dentry->d_inode; + inode = d_inode(path.dentry); err = nfserr_inval; if (!inode->i_op->readlink) @@ -1576,7 +1576,7 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp, if (IS_ERR(dnew)) goto out_nfserr; - host_err = vfs_symlink(dentry->d_inode, dnew, path); + host_err = vfs_symlink(d_inode(dentry), dnew, path); err = nfserrno(host_err); if (!err) err = nfserrno(commit_metadata(fhp)); @@ -1632,7 +1632,7 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp, fh_lock_nested(ffhp, I_MUTEX_PARENT); ddir = ffhp->fh_dentry; - dirp = ddir->d_inode; + dirp = d_inode(ddir); dnew = lookup_one_len(name, ddir, len); host_err = PTR_ERR(dnew); @@ -1642,7 +1642,7 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp, dold = tfhp->fh_dentry; err = nfserr_noent; - if (!dold->d_inode) + if (d_really_is_negative(dold)) goto out_dput; host_err = vfs_link(dold, dirp, dnew, NULL); if (!host_err) { @@ -1689,10 +1689,10 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen, goto out; fdentry = ffhp->fh_dentry; - fdir = fdentry->d_inode; + fdir = d_inode(fdentry); tdentry = tfhp->fh_dentry; - tdir = tdentry->d_inode; + tdir = d_inode(tdentry); err = nfserr_perm; if (!flen || isdotent(fname, flen) || !tlen || isdotent(tname, tlen)) @@ -1717,7 +1717,7 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen, goto out_nfserr; host_err = -ENOENT; - if (!odentry->d_inode) + if (d_really_is_negative(odentry)) goto out_dput_old; host_err = -EINVAL; if (odentry == trap) @@ -1790,21 +1790,21 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, fh_lock_nested(fhp, I_MUTEX_PARENT); dentry = fhp->fh_dentry; - dirp = dentry->d_inode; + dirp = d_inode(dentry); rdentry = lookup_one_len(fname, dentry, flen); host_err = PTR_ERR(rdentry); if (IS_ERR(rdentry)) goto out_nfserr; - if (!rdentry->d_inode) { + if (d_really_is_negative(rdentry)) { dput(rdentry); err = nfserr_noent; goto out; } if (!type) - type = rdentry->d_inode->i_mode & S_IFMT; + type = d_inode(rdentry)->i_mode & S_IFMT; if (type != S_IFDIR) host_err = vfs_unlink(dirp, rdentry, NULL); @@ -2015,7 +2015,7 @@ __be32 nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp, struct dentry *dentry, int acc) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); int err; if ((acc & NFSD_MAY_MASK) == NFSD_MAY_NOP) diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h index 0bda93e..f982ae8 100644 --- a/fs/nfsd/xdr4.h +++ b/fs/nfsd/xdr4.h @@ -40,7 +40,6 @@ #include "state.h" #include "nfsd.h" -#define NFSD4_MAX_SEC_LABEL_LEN 2048 #define NFSD4_MAX_TAGLEN 128 #define XDR_LEN(n) (((n) + 3) & ~3) @@ -632,7 +631,7 @@ set_change_info(struct nfsd4_change_info *cinfo, struct svc_fh *fhp) { BUG_ON(!fhp->fh_pre_saved); cinfo->atomic = fhp->fh_post_saved; - cinfo->change_supported = IS_I_VERSION(fhp->fh_dentry->d_inode); + cinfo->change_supported = IS_I_VERSION(d_inode(fhp->fh_dentry)); cinfo->before_change = fhp->fh_pre_change; cinfo->after_change = fhp->fh_post_change; @@ -683,7 +682,7 @@ extern __be32 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open); extern void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate); extern void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate, - struct nfsd4_open *open, __be32 status); + struct nfsd4_open *open); extern __be32 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *, struct nfsd4_open_confirm *oc); extern __be32 nfsd4_close(struct svc_rqst *rqstp, diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c index 741fd02..8df0f3b 100644 --- a/fs/nilfs2/alloc.c +++ b/fs/nilfs2/alloc.c @@ -405,13 +405,14 @@ nilfs_palloc_rest_groups_in_desc_block(const struct inode *inode, static int nilfs_palloc_count_desc_blocks(struct inode *inode, unsigned long *desc_blocks) { - unsigned long blknum; + __u64 blknum; int ret; ret = nilfs_bmap_last_key(NILFS_I(inode)->i_bmap, &blknum); if (likely(!ret)) *desc_blocks = DIV_ROUND_UP( - blknum, NILFS_MDT(inode)->mi_blocks_per_desc_block); + (unsigned long)blknum, + NILFS_MDT(inode)->mi_blocks_per_desc_block); return ret; } diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c index aadbd0b..27f75bc 100644 --- a/fs/nilfs2/bmap.c +++ b/fs/nilfs2/bmap.c @@ -152,9 +152,7 @@ static int nilfs_bmap_do_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr) * * %-EEXIST - A record associated with @key already exist. */ -int nilfs_bmap_insert(struct nilfs_bmap *bmap, - unsigned long key, - unsigned long rec) +int nilfs_bmap_insert(struct nilfs_bmap *bmap, __u64 key, unsigned long rec) { int ret; @@ -191,19 +189,47 @@ static int nilfs_bmap_do_delete(struct nilfs_bmap *bmap, __u64 key) return bmap->b_ops->bop_delete(bmap, key); } -int nilfs_bmap_last_key(struct nilfs_bmap *bmap, unsigned long *key) +/** + * nilfs_bmap_seek_key - seek a valid entry and return its key + * @bmap: bmap struct + * @start: start key number + * @keyp: place to store valid key + * + * Description: nilfs_bmap_seek_key() seeks a valid key on @bmap + * starting from @start, and stores it to @keyp if found. + * + * Return Value: On success, 0 is returned. On error, one of the following + * negative error codes is returned. + * + * %-EIO - I/O error. + * + * %-ENOMEM - Insufficient amount of memory available. + * + * %-ENOENT - No valid entry was found + */ +int nilfs_bmap_seek_key(struct nilfs_bmap *bmap, __u64 start, __u64 *keyp) { - __u64 lastkey; int ret; down_read(&bmap->b_sem); - ret = bmap->b_ops->bop_last_key(bmap, &lastkey); + ret = bmap->b_ops->bop_seek_key(bmap, start, keyp); + up_read(&bmap->b_sem); + + if (ret < 0) + ret = nilfs_bmap_convert_error(bmap, __func__, ret); + return ret; +} + +int nilfs_bmap_last_key(struct nilfs_bmap *bmap, __u64 *keyp) +{ + int ret; + + down_read(&bmap->b_sem); + ret = bmap->b_ops->bop_last_key(bmap, keyp); up_read(&bmap->b_sem); if (ret < 0) ret = nilfs_bmap_convert_error(bmap, __func__, ret); - else - *key = lastkey; return ret; } @@ -224,7 +250,7 @@ int nilfs_bmap_last_key(struct nilfs_bmap *bmap, unsigned long *key) * * %-ENOENT - A record associated with @key does not exist. */ -int nilfs_bmap_delete(struct nilfs_bmap *bmap, unsigned long key) +int nilfs_bmap_delete(struct nilfs_bmap *bmap, __u64 key) { int ret; @@ -235,7 +261,7 @@ int nilfs_bmap_delete(struct nilfs_bmap *bmap, unsigned long key) return nilfs_bmap_convert_error(bmap, __func__, ret); } -static int nilfs_bmap_do_truncate(struct nilfs_bmap *bmap, unsigned long key) +static int nilfs_bmap_do_truncate(struct nilfs_bmap *bmap, __u64 key) { __u64 lastkey; int ret; @@ -276,7 +302,7 @@ static int nilfs_bmap_do_truncate(struct nilfs_bmap *bmap, unsigned long key) * * %-ENOMEM - Insufficient amount of memory available. */ -int nilfs_bmap_truncate(struct nilfs_bmap *bmap, unsigned long key) +int nilfs_bmap_truncate(struct nilfs_bmap *bmap, __u64 key) { int ret; diff --git a/fs/nilfs2/bmap.h b/fs/nilfs2/bmap.h index b89e680..bfa817c 100644 --- a/fs/nilfs2/bmap.h +++ b/fs/nilfs2/bmap.h @@ -76,8 +76,10 @@ struct nilfs_bmap_operations { union nilfs_binfo *); int (*bop_mark)(struct nilfs_bmap *, __u64, int); - /* The following functions are internal use only. */ + int (*bop_seek_key)(const struct nilfs_bmap *, __u64, __u64 *); int (*bop_last_key)(const struct nilfs_bmap *, __u64 *); + + /* The following functions are internal use only. */ int (*bop_check_insert)(const struct nilfs_bmap *, __u64); int (*bop_check_delete)(struct nilfs_bmap *, __u64); int (*bop_gather_data)(struct nilfs_bmap *, __u64 *, __u64 *, int); @@ -153,10 +155,11 @@ int nilfs_bmap_test_and_clear_dirty(struct nilfs_bmap *); int nilfs_bmap_read(struct nilfs_bmap *, struct nilfs_inode *); void nilfs_bmap_write(struct nilfs_bmap *, struct nilfs_inode *); int nilfs_bmap_lookup_contig(struct nilfs_bmap *, __u64, __u64 *, unsigned); -int nilfs_bmap_insert(struct nilfs_bmap *, unsigned long, unsigned long); -int nilfs_bmap_delete(struct nilfs_bmap *, unsigned long); -int nilfs_bmap_last_key(struct nilfs_bmap *, unsigned long *); -int nilfs_bmap_truncate(struct nilfs_bmap *, unsigned long); +int nilfs_bmap_insert(struct nilfs_bmap *bmap, __u64 key, unsigned long rec); +int nilfs_bmap_delete(struct nilfs_bmap *bmap, __u64 key); +int nilfs_bmap_seek_key(struct nilfs_bmap *bmap, __u64 start, __u64 *keyp); +int nilfs_bmap_last_key(struct nilfs_bmap *bmap, __u64 *keyp); +int nilfs_bmap_truncate(struct nilfs_bmap *bmap, __u64 key); void nilfs_bmap_clear(struct nilfs_bmap *); int nilfs_bmap_propagate(struct nilfs_bmap *, struct buffer_head *); void nilfs_bmap_lookup_dirty_buffers(struct nilfs_bmap *, struct list_head *); diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index ecdbae1..059f371 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c @@ -633,6 +633,44 @@ static int nilfs_btree_do_lookup_last(const struct nilfs_bmap *btree, return 0; } +/** + * nilfs_btree_get_next_key - get next valid key from btree path array + * @btree: bmap struct of btree + * @path: array of nilfs_btree_path struct + * @minlevel: start level + * @nextkey: place to store the next valid key + * + * Return Value: If a next key was found, 0 is returned. Otherwise, + * -ENOENT is returned. + */ +static int nilfs_btree_get_next_key(const struct nilfs_bmap *btree, + const struct nilfs_btree_path *path, + int minlevel, __u64 *nextkey) +{ + struct nilfs_btree_node *node; + int maxlevel = nilfs_btree_height(btree) - 1; + int index, next_adj, level; + + /* Next index is already set to bp_index for leaf nodes. */ + next_adj = 0; + for (level = minlevel; level <= maxlevel; level++) { + if (level == maxlevel) + node = nilfs_btree_get_root(btree); + else + node = nilfs_btree_get_nonroot_node(path, level); + + index = path[level].bp_index + next_adj; + if (index < nilfs_btree_node_get_nchildren(node)) { + /* Next key is in this node */ + *nextkey = nilfs_btree_node_get_key(node, index); + return 0; + } + /* For non-leaf nodes, next index is stored at bp_index + 1. */ + next_adj = 1; + } + return -ENOENT; +} + static int nilfs_btree_lookup(const struct nilfs_bmap *btree, __u64 key, int level, __u64 *ptrp) { @@ -1563,6 +1601,27 @@ out: return ret; } +static int nilfs_btree_seek_key(const struct nilfs_bmap *btree, __u64 start, + __u64 *keyp) +{ + struct nilfs_btree_path *path; + const int minlevel = NILFS_BTREE_LEVEL_NODE_MIN; + int ret; + + path = nilfs_btree_alloc_path(); + if (!path) + return -ENOMEM; + + ret = nilfs_btree_do_lookup(btree, path, start, NULL, minlevel, 0); + if (!ret) + *keyp = start; + else if (ret == -ENOENT) + ret = nilfs_btree_get_next_key(btree, path, minlevel, keyp); + + nilfs_btree_free_path(path); + return ret; +} + static int nilfs_btree_last_key(const struct nilfs_bmap *btree, __u64 *keyp) { struct nilfs_btree_path *path; @@ -2298,7 +2357,9 @@ static const struct nilfs_bmap_operations nilfs_btree_ops = { .bop_assign = nilfs_btree_assign, .bop_mark = nilfs_btree_mark, + .bop_seek_key = nilfs_btree_seek_key, .bop_last_key = nilfs_btree_last_key, + .bop_check_insert = NULL, .bop_check_delete = nilfs_btree_check_delete, .bop_gather_data = nilfs_btree_gather_data, @@ -2318,7 +2379,9 @@ static const struct nilfs_bmap_operations nilfs_btree_ops_gc = { .bop_assign = nilfs_btree_assign_gc, .bop_mark = NULL, + .bop_seek_key = NULL, .bop_last_key = NULL, + .bop_check_insert = NULL, .bop_check_delete = NULL, .bop_gather_data = NULL, diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c index 0d58075..b6596ca 100644 --- a/fs/nilfs2/cpfile.c +++ b/fs/nilfs2/cpfile.c @@ -53,6 +53,13 @@ nilfs_cpfile_get_offset(const struct inode *cpfile, __u64 cno) return do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile)); } +static __u64 nilfs_cpfile_first_checkpoint_in_block(const struct inode *cpfile, + unsigned long blkoff) +{ + return (__u64)nilfs_cpfile_checkpoints_per_block(cpfile) * blkoff + + 1 - NILFS_MDT(cpfile)->mi_first_entry_offset; +} + static unsigned long nilfs_cpfile_checkpoints_in_block(const struct inode *cpfile, __u64 curr, @@ -146,6 +153,44 @@ static inline int nilfs_cpfile_get_checkpoint_block(struct inode *cpfile, create, nilfs_cpfile_block_init, bhp); } +/** + * nilfs_cpfile_find_checkpoint_block - find and get a buffer on cpfile + * @cpfile: inode of cpfile + * @start_cno: start checkpoint number (inclusive) + * @end_cno: end checkpoint number (inclusive) + * @cnop: place to store the next checkpoint number + * @bhp: place to store a pointer to buffer_head struct + * + * Return Value: On success, it returns 0. On error, the following negative + * error code is returned. + * + * %-ENOMEM - Insufficient memory available. + * + * %-EIO - I/O error + * + * %-ENOENT - no block exists in the range. + */ +static int nilfs_cpfile_find_checkpoint_block(struct inode *cpfile, + __u64 start_cno, __u64 end_cno, + __u64 *cnop, + struct buffer_head **bhp) +{ + unsigned long start, end, blkoff; + int ret; + + if (unlikely(start_cno > end_cno)) + return -ENOENT; + + start = nilfs_cpfile_get_blkoff(cpfile, start_cno); + end = nilfs_cpfile_get_blkoff(cpfile, end_cno); + + ret = nilfs_mdt_find_block(cpfile, start, end, &blkoff, bhp); + if (!ret) + *cnop = (blkoff == start) ? start_cno : + nilfs_cpfile_first_checkpoint_in_block(cpfile, blkoff); + return ret; +} + static inline int nilfs_cpfile_delete_checkpoint_block(struct inode *cpfile, __u64 cno) { @@ -403,14 +448,15 @@ static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop, return -ENOENT; /* checkpoint number 0 is invalid */ down_read(&NILFS_MDT(cpfile)->mi_sem); - for (n = 0; cno < cur_cno && n < nci; cno += ncps) { - ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, cur_cno); - ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh); + for (n = 0; n < nci; cno += ncps) { + ret = nilfs_cpfile_find_checkpoint_block( + cpfile, cno, cur_cno - 1, &cno, &bh); if (ret < 0) { - if (ret != -ENOENT) - goto out; - continue; /* skip hole */ + if (likely(ret == -ENOENT)) + break; + goto out; } + ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, cur_cno); kaddr = kmap_atomic(bh->b_page); cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr); diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c index 197a63e..0ee0bed 100644 --- a/fs/nilfs2/dir.c +++ b/fs/nilfs2/dir.c @@ -435,7 +435,7 @@ void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de, */ int nilfs_add_link(struct dentry *dentry, struct inode *inode) { - struct inode *dir = dentry->d_parent->d_inode; + struct inode *dir = d_inode(dentry->d_parent); const unsigned char *name = dentry->d_name.name; int namelen = dentry->d_name.len; unsigned chunk_size = nilfs_chunk_size(dir); diff --git a/fs/nilfs2/direct.c b/fs/nilfs2/direct.c index 82f4865..ebf89fd 100644 --- a/fs/nilfs2/direct.c +++ b/fs/nilfs2/direct.c @@ -173,6 +173,21 @@ static int nilfs_direct_delete(struct nilfs_bmap *bmap, __u64 key) return ret; } +static int nilfs_direct_seek_key(const struct nilfs_bmap *direct, __u64 start, + __u64 *keyp) +{ + __u64 key; + + for (key = start; key <= NILFS_DIRECT_KEY_MAX; key++) { + if (nilfs_direct_get_ptr(direct, key) != + NILFS_BMAP_INVALID_PTR) { + *keyp = key; + return 0; + } + } + return -ENOENT; +} + static int nilfs_direct_last_key(const struct nilfs_bmap *direct, __u64 *keyp) { __u64 key, lastkey; @@ -355,7 +370,9 @@ static const struct nilfs_bmap_operations nilfs_direct_ops = { .bop_assign = nilfs_direct_assign, .bop_mark = NULL, + .bop_seek_key = nilfs_direct_seek_key, .bop_last_key = nilfs_direct_last_key, + .bop_check_insert = nilfs_direct_check_insert, .bop_check_delete = NULL, .bop_gather_data = nilfs_direct_gather_data, diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c index a8c728ac..54575e3 100644 --- a/fs/nilfs2/file.c +++ b/fs/nilfs2/file.c @@ -143,8 +143,6 @@ static int nilfs_file_mmap(struct file *file, struct vm_area_struct *vma) */ const struct file_operations nilfs_file_operations = { .llseek = generic_file_llseek, - .read = new_sync_read, - .write = new_sync_write, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, .unlocked_ioctl = nilfs_ioctl, diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index 8b59695..258d9fe 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -26,7 +26,7 @@ #include <linux/mpage.h> #include <linux/pagemap.h> #include <linux/writeback.h> -#include <linux/aio.h> +#include <linux/uio.h> #include "nilfs.h" #include "btnode.h" #include "segment.h" @@ -106,7 +106,7 @@ int nilfs_get_block(struct inode *inode, sector_t blkoff, err = nilfs_transaction_begin(inode->i_sb, &ti, 1); if (unlikely(err)) goto out; - err = nilfs_bmap_insert(ii->i_bmap, (unsigned long)blkoff, + err = nilfs_bmap_insert(ii->i_bmap, blkoff, (unsigned long)bh_result); if (unlikely(err != 0)) { if (err == -EEXIST) { @@ -305,8 +305,7 @@ static int nilfs_write_end(struct file *file, struct address_space *mapping, } static ssize_t -nilfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, - loff_t offset) +nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; @@ -314,18 +313,17 @@ nilfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, size_t count = iov_iter_count(iter); ssize_t size; - if (rw == WRITE) + if (iov_iter_rw(iter) == WRITE) return 0; /* Needs synchronization with the cleaner */ - size = blockdev_direct_IO(rw, iocb, inode, iter, offset, - nilfs_get_block); + size = blockdev_direct_IO(iocb, inode, iter, offset, nilfs_get_block); /* * In case of error extending write may have instantiated a few * blocks outside i_size. Trim these off again. */ - if (unlikely((rw & WRITE) && size < 0)) { + if (unlikely(iov_iter_rw(iter) == WRITE && size < 0)) { loff_t isize = i_size_read(inode); loff_t end = offset + count; @@ -443,21 +441,20 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode) void nilfs_set_inode_flags(struct inode *inode) { unsigned int flags = NILFS_I(inode)->i_flags; + unsigned int new_fl = 0; - inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | - S_DIRSYNC); if (flags & FS_SYNC_FL) - inode->i_flags |= S_SYNC; + new_fl |= S_SYNC; if (flags & FS_APPEND_FL) - inode->i_flags |= S_APPEND; + new_fl |= S_APPEND; if (flags & FS_IMMUTABLE_FL) - inode->i_flags |= S_IMMUTABLE; + new_fl |= S_IMMUTABLE; if (flags & FS_NOATIME_FL) - inode->i_flags |= S_NOATIME; + new_fl |= S_NOATIME; if (flags & FS_DIRSYNC_FL) - inode->i_flags |= S_DIRSYNC; - mapping_set_gfp_mask(inode->i_mapping, - mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); + new_fl |= S_DIRSYNC; + inode_set_flags(inode, new_fl, S_SYNC | S_APPEND | S_IMMUTABLE | + S_NOATIME | S_DIRSYNC); } int nilfs_read_inode_common(struct inode *inode, @@ -542,6 +539,8 @@ static int __nilfs_read_inode(struct super_block *sb, brelse(bh); up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); nilfs_set_inode_flags(inode); + mapping_set_gfp_mask(inode->i_mapping, + mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); return 0; failed_unmap: @@ -714,7 +713,7 @@ void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh, int flags) static void nilfs_truncate_bmap(struct nilfs_inode_info *ii, unsigned long from) { - unsigned long b; + __u64 b; int ret; if (!test_bit(NILFS_I_BMAP, &ii->i_state)) @@ -729,7 +728,7 @@ repeat: if (b < from) return; - b -= min_t(unsigned long, NILFS_MAX_TRUNCATE_BLOCKS, b - from); + b -= min_t(__u64, NILFS_MAX_TRUNCATE_BLOCKS, b - from); ret = nilfs_bmap_truncate(ii->i_bmap, b); nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb); if (!ret || (ret == -ENOMEM && @@ -836,7 +835,7 @@ void nilfs_evict_inode(struct inode *inode) int nilfs_setattr(struct dentry *dentry, struct iattr *iattr) { struct nilfs_transaction_info ti; - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct super_block *sb = inode->i_sb; int err; diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c index 892cf5f..dee34d9 100644 --- a/fs/nilfs2/mdt.c +++ b/fs/nilfs2/mdt.c @@ -261,6 +261,60 @@ int nilfs_mdt_get_block(struct inode *inode, unsigned long blkoff, int create, } /** + * nilfs_mdt_find_block - find and get a buffer on meta data file. + * @inode: inode of the meta data file + * @start: start block offset (inclusive) + * @end: end block offset (inclusive) + * @blkoff: block offset + * @out_bh: place to store a pointer to buffer_head struct + * + * nilfs_mdt_find_block() looks up an existing block in range of + * [@start, @end] and stores pointer to a buffer head of the block to + * @out_bh, and block offset to @blkoff, respectively. @out_bh and + * @blkoff are substituted only when zero is returned. + * + * Return Value: On success, it returns 0. On error, the following negative + * error code is returned. + * + * %-ENOMEM - Insufficient memory available. + * + * %-EIO - I/O error + * + * %-ENOENT - no block was found in the range + */ +int nilfs_mdt_find_block(struct inode *inode, unsigned long start, + unsigned long end, unsigned long *blkoff, + struct buffer_head **out_bh) +{ + __u64 next; + int ret; + + if (unlikely(start > end)) + return -ENOENT; + + ret = nilfs_mdt_read_block(inode, start, true, out_bh); + if (!ret) { + *blkoff = start; + goto out; + } + if (unlikely(ret != -ENOENT || start == ULONG_MAX)) + goto out; + + ret = nilfs_bmap_seek_key(NILFS_I(inode)->i_bmap, start + 1, &next); + if (!ret) { + if (next <= end) { + ret = nilfs_mdt_read_block(inode, next, true, out_bh); + if (!ret) + *blkoff = next; + } else { + ret = -ENOENT; + } + } +out: + return ret; +} + +/** * nilfs_mdt_delete_block - make a hole on the meta data file. * @inode: inode of the meta data file * @block: block offset diff --git a/fs/nilfs2/mdt.h b/fs/nilfs2/mdt.h index ab172e8..fe529a8 100644 --- a/fs/nilfs2/mdt.h +++ b/fs/nilfs2/mdt.h @@ -78,6 +78,9 @@ int nilfs_mdt_get_block(struct inode *, unsigned long, int, void (*init_block)(struct inode *, struct buffer_head *, void *), struct buffer_head **); +int nilfs_mdt_find_block(struct inode *inode, unsigned long start, + unsigned long end, unsigned long *blkoff, + struct buffer_head **out_bh); int nilfs_mdt_delete_block(struct inode *, unsigned long); int nilfs_mdt_forget_block(struct inode *, unsigned long); int nilfs_mdt_mark_block_dirty(struct inode *, unsigned long); @@ -111,7 +114,10 @@ static inline __u64 nilfs_mdt_cno(struct inode *inode) return ((struct the_nilfs *)inode->i_sb->s_fs_info)->ns_cno; } -#define nilfs_mdt_bgl_lock(inode, bg) \ - (&NILFS_MDT(inode)->mi_bgl->locks[(bg) & (NR_BG_LOCKS-1)].lock) +static inline spinlock_t * +nilfs_mdt_bgl_lock(struct inode *inode, unsigned int block_group) +{ + return bgl_lock_ptr(NILFS_MDT(inode)->mi_bgl, block_group); +} #endif /* _NILFS_MDT_H */ diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c index 0f84b25..2218083 100644 --- a/fs/nilfs2/namei.c +++ b/fs/nilfs2/namei.c @@ -192,7 +192,7 @@ out_fail: static int nilfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { - struct inode *inode = old_dentry->d_inode; + struct inode *inode = d_inode(old_dentry); struct nilfs_transaction_info ti; int err; @@ -283,7 +283,7 @@ static int nilfs_do_unlink(struct inode *dir, struct dentry *dentry) if (!de) goto out; - inode = dentry->d_inode; + inode = d_inode(dentry); err = -EIO; if (le64_to_cpu(de->inode) != inode->i_ino) goto out; @@ -318,7 +318,7 @@ static int nilfs_unlink(struct inode *dir, struct dentry *dentry) if (!err) { nilfs_mark_inode_dirty(dir); - nilfs_mark_inode_dirty(dentry->d_inode); + nilfs_mark_inode_dirty(d_inode(dentry)); err = nilfs_transaction_commit(dir->i_sb); } else nilfs_transaction_abort(dir->i_sb); @@ -328,7 +328,7 @@ static int nilfs_unlink(struct inode *dir, struct dentry *dentry) static int nilfs_rmdir(struct inode *dir, struct dentry *dentry) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct nilfs_transaction_info ti; int err; @@ -358,8 +358,8 @@ static int nilfs_rmdir(struct inode *dir, struct dentry *dentry) static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { - struct inode *old_inode = old_dentry->d_inode; - struct inode *new_inode = new_dentry->d_inode; + struct inode *old_inode = d_inode(old_dentry); + struct inode *new_inode = d_inode(new_dentry); struct page *dir_page = NULL; struct nilfs_dir_entry *dir_de = NULL; struct page *old_page; @@ -453,13 +453,13 @@ static struct dentry *nilfs_get_parent(struct dentry *child) struct qstr dotdot = QSTR_INIT("..", 2); struct nilfs_root *root; - ino = nilfs_inode_by_name(child->d_inode, &dotdot); + ino = nilfs_inode_by_name(d_inode(child), &dotdot); if (!ino) return ERR_PTR(-ENOENT); - root = NILFS_I(child->d_inode)->i_root; + root = NILFS_I(d_inode(child))->i_root; - inode = nilfs_iget(child->d_inode->i_sb, root, ino); + inode = nilfs_iget(d_inode(child)->i_sb, root, ino); if (IS_ERR(inode)) return ERR_CAST(inode); diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c index 700ecbc..45d650a 100644 --- a/fs/nilfs2/page.c +++ b/fs/nilfs2/page.c @@ -89,18 +89,16 @@ struct buffer_head *nilfs_grab_buffer(struct inode *inode, void nilfs_forget_buffer(struct buffer_head *bh) { struct page *page = bh->b_page; + const unsigned long clear_bits = + (1 << BH_Uptodate | 1 << BH_Dirty | 1 << BH_Mapped | + 1 << BH_Async_Write | 1 << BH_NILFS_Volatile | + 1 << BH_NILFS_Checked | 1 << BH_NILFS_Redirected); lock_buffer(bh); - clear_buffer_nilfs_volatile(bh); - clear_buffer_nilfs_checked(bh); - clear_buffer_nilfs_redirected(bh); - clear_buffer_async_write(bh); - clear_buffer_dirty(bh); + set_mask_bits(&bh->b_state, clear_bits, 0); if (nilfs_page_buffers_clean(page)) __nilfs_clear_page_dirty(page); - clear_buffer_uptodate(bh); - clear_buffer_mapped(bh); bh->b_blocknr = -1; ClearPageUptodate(page); ClearPageMappedToDisk(page); @@ -421,6 +419,10 @@ void nilfs_clear_dirty_page(struct page *page, bool silent) if (page_has_buffers(page)) { struct buffer_head *bh, *head; + const unsigned long clear_bits = + (1 << BH_Uptodate | 1 << BH_Dirty | 1 << BH_Mapped | + 1 << BH_Async_Write | 1 << BH_NILFS_Volatile | + 1 << BH_NILFS_Checked | 1 << BH_NILFS_Redirected); bh = head = page_buffers(page); do { @@ -430,13 +432,7 @@ void nilfs_clear_dirty_page(struct page *page, bool silent) "discard block %llu, size %zu", (u64)bh->b_blocknr, bh->b_size); } - clear_buffer_async_write(bh); - clear_buffer_dirty(bh); - clear_buffer_nilfs_volatile(bh); - clear_buffer_nilfs_checked(bh); - clear_buffer_nilfs_redirected(bh); - clear_buffer_uptodate(bh); - clear_buffer_mapped(bh); + set_mask_bits(&bh->b_state, clear_bits, 0); unlock_buffer(bh); } while (bh = bh->b_this_page, bh != head); } diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 0c3f303..c6abbad9 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -24,6 +24,7 @@ #include <linux/pagemap.h> #include <linux/buffer_head.h> #include <linux/writeback.h> +#include <linux/bitops.h> #include <linux/bio.h> #include <linux/completion.h> #include <linux/blkdev.h> @@ -1588,7 +1589,6 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci) list_for_each_entry(bh, &segbuf->sb_segsum_buffers, b_assoc_buffers) { - set_buffer_async_write(bh); if (bh->b_page != bd_page) { if (bd_page) { lock_page(bd_page); @@ -1688,7 +1688,6 @@ static void nilfs_abort_logs(struct list_head *logs, int err) list_for_each_entry(segbuf, logs, sb_list) { list_for_each_entry(bh, &segbuf->sb_segsum_buffers, b_assoc_buffers) { - clear_buffer_async_write(bh); if (bh->b_page != bd_page) { if (bd_page) end_page_writeback(bd_page); @@ -1768,7 +1767,6 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci) b_assoc_buffers) { set_buffer_uptodate(bh); clear_buffer_dirty(bh); - clear_buffer_async_write(bh); if (bh->b_page != bd_page) { if (bd_page) end_page_writeback(bd_page); @@ -1788,12 +1786,13 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci) */ list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) { - set_buffer_uptodate(bh); - clear_buffer_dirty(bh); - clear_buffer_async_write(bh); - clear_buffer_delay(bh); - clear_buffer_nilfs_volatile(bh); - clear_buffer_nilfs_redirected(bh); + const unsigned long set_bits = (1 << BH_Uptodate); + const unsigned long clear_bits = + (1 << BH_Dirty | 1 << BH_Async_Write | + 1 << BH_Delay | 1 << BH_NILFS_Volatile | + 1 << BH_NILFS_Redirected); + + set_mask_bits(&bh->b_state, clear_bits, set_bits); if (bh == segbuf->sb_super_root) { if (bh->b_page != bd_page) { end_page_writeback(bd_page); diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index 5bc2a1c..f47585b 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -610,7 +610,7 @@ static int nilfs_unfreeze(struct super_block *sb) static int nilfs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; - struct nilfs_root *root = NILFS_I(dentry->d_inode)->i_root; + struct nilfs_root *root = NILFS_I(d_inode(dentry))->i_root; struct the_nilfs *nilfs = root->nilfs; u64 id = huge_encode_dev(sb->s_bdev->bd_dev); unsigned long long blocks; @@ -681,7 +681,7 @@ static int nilfs_show_options(struct seq_file *seq, struct dentry *dentry) { struct super_block *sb = dentry->d_sb; struct the_nilfs *nilfs = sb->s_fs_info; - struct nilfs_root *root = NILFS_I(dentry->d_inode)->i_root; + struct nilfs_root *root = NILFS_I(d_inode(dentry))->i_root; if (!nilfs_test_opt(nilfs, BARRIER)) seq_puts(seq, ",nobarrier"); @@ -1020,7 +1020,7 @@ int nilfs_checkpoint_is_mounted(struct super_block *sb, __u64 cno) struct dentry *dentry; int ret; - if (cno < 0 || cno > nilfs->ns_cno) + if (cno > nilfs->ns_cno) return false; if (cno >= nilfs_last_cno(nilfs)) @@ -1190,7 +1190,7 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data) sb->s_flags &= ~MS_RDONLY; - root = NILFS_I(sb->s_root->d_inode)->i_root; + root = NILFS_I(d_inode(sb->s_root))->i_root; err = nilfs_attach_log_writer(sb, root); if (err) goto restore_opts; @@ -13,7 +13,7 @@ static const struct file_operations ns_file_operations = { static char *ns_dname(struct dentry *dentry, char *buffer, int buflen) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); const struct proc_ns_operations *ns_ops = dentry->d_fsdata; return dynamic_dname(dentry, buffer, buflen, "%s:[%lu]", @@ -22,7 +22,7 @@ static char *ns_dname(struct dentry *dentry, char *buffer, int buflen) static void ns_prune_dentry(struct dentry *dentry) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); if (inode) { struct ns_common *ns = inode->i_private; atomic_long_set(&ns->stashed, 0); diff --git a/fs/ntfs/Makefile b/fs/ntfs/Makefile index 36ae529..2ff263e 100644 --- a/fs/ntfs/Makefile +++ b/fs/ntfs/Makefile @@ -8,7 +8,7 @@ ntfs-y := aops.o attrib.o collate.o compress.o debug.o dir.o file.o \ ntfs-$(CONFIG_NTFS_RW) += bitmap.o lcnalloc.o logfile.o quota.o usnjrnl.o -ccflags-y := -DNTFS_VERSION=\"2.1.31\" +ccflags-y := -DNTFS_VERSION=\"2.1.32\" ccflags-$(CONFIG_NTFS_DEBUG) += -DDEBUG ccflags-$(CONFIG_NTFS_RW) += -DNTFS_RW diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c index 1da9b2d..7bb487e 100644 --- a/fs/ntfs/file.c +++ b/fs/ntfs/file.c @@ -1,7 +1,7 @@ /* * file.c - NTFS kernel file operations. Part of the Linux-NTFS project. * - * Copyright (c) 2001-2014 Anton Altaparmakov and Tuxera Inc. + * Copyright (c) 2001-2015 Anton Altaparmakov and Tuxera Inc. * * This program/include file is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as published @@ -28,7 +28,6 @@ #include <linux/swap.h> #include <linux/uio.h> #include <linux/writeback.h> -#include <linux/aio.h> #include <asm/page.h> #include <asm/uaccess.h> @@ -329,62 +328,166 @@ err_out: return err; } -/** - * ntfs_fault_in_pages_readable - - * - * Fault a number of userspace pages into pagetables. - * - * Unlike include/linux/pagemap.h::fault_in_pages_readable(), this one copes - * with more than two userspace pages as well as handling the single page case - * elegantly. - * - * If you find this difficult to understand, then think of the while loop being - * the following code, except that we do without the integer variable ret: - * - * do { - * ret = __get_user(c, uaddr); - * uaddr += PAGE_SIZE; - * } while (!ret && uaddr < end); - * - * Note, the final __get_user() may well run out-of-bounds of the user buffer, - * but _not_ out-of-bounds of the page the user buffer belongs to, and since - * this is only a read and not a write, and since it is still in the same page, - * it should not matter and this makes the code much simpler. - */ -static inline void ntfs_fault_in_pages_readable(const char __user *uaddr, - int bytes) -{ - const char __user *end; - volatile char c; - - /* Set @end to the first byte outside the last page we care about. */ - end = (const char __user*)PAGE_ALIGN((unsigned long)uaddr + bytes); - - while (!__get_user(c, uaddr) && (uaddr += PAGE_SIZE, uaddr < end)) - ; -} - -/** - * ntfs_fault_in_pages_readable_iovec - - * - * Same as ntfs_fault_in_pages_readable() but operates on an array of iovecs. - */ -static inline void ntfs_fault_in_pages_readable_iovec(const struct iovec *iov, - size_t iov_ofs, int bytes) +static ssize_t ntfs_prepare_file_for_write(struct kiocb *iocb, + struct iov_iter *from) { - do { - const char __user *buf; - unsigned len; + loff_t pos; + s64 end, ll; + ssize_t err; + unsigned long flags; + struct file *file = iocb->ki_filp; + struct inode *vi = file_inode(file); + ntfs_inode *base_ni, *ni = NTFS_I(vi); + ntfs_volume *vol = ni->vol; - buf = iov->iov_base + iov_ofs; - len = iov->iov_len - iov_ofs; - if (len > bytes) - len = bytes; - ntfs_fault_in_pages_readable(buf, len); - bytes -= len; - iov++; - iov_ofs = 0; - } while (bytes); + ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, pos " + "0x%llx, count 0x%zx.", vi->i_ino, + (unsigned)le32_to_cpu(ni->type), + (unsigned long long)iocb->ki_pos, + iov_iter_count(from)); + err = generic_write_checks(iocb, from); + if (unlikely(err <= 0)) + goto out; + /* + * All checks have passed. Before we start doing any writing we want + * to abort any totally illegal writes. + */ + BUG_ON(NInoMstProtected(ni)); + BUG_ON(ni->type != AT_DATA); + /* If file is encrypted, deny access, just like NT4. */ + if (NInoEncrypted(ni)) { + /* Only $DATA attributes can be encrypted. */ + /* + * Reminder for later: Encrypted files are _always_ + * non-resident so that the content can always be encrypted. + */ + ntfs_debug("Denying write access to encrypted file."); + err = -EACCES; + goto out; + } + if (NInoCompressed(ni)) { + /* Only unnamed $DATA attribute can be compressed. */ + BUG_ON(ni->name_len); + /* + * Reminder for later: If resident, the data is not actually + * compressed. Only on the switch to non-resident does + * compression kick in. This is in contrast to encrypted files + * (see above). + */ + ntfs_error(vi->i_sb, "Writing to compressed files is not " + "implemented yet. Sorry."); + err = -EOPNOTSUPP; + goto out; + } + base_ni = ni; + if (NInoAttr(ni)) + base_ni = ni->ext.base_ntfs_ino; + err = file_remove_suid(file); + if (unlikely(err)) + goto out; + /* + * Our ->update_time method always succeeds thus file_update_time() + * cannot fail either so there is no need to check the return code. + */ + file_update_time(file); + pos = iocb->ki_pos; + /* The first byte after the last cluster being written to. */ + end = (pos + iov_iter_count(from) + vol->cluster_size_mask) & + ~(u64)vol->cluster_size_mask; + /* + * If the write goes beyond the allocated size, extend the allocation + * to cover the whole of the write, rounded up to the nearest cluster. + */ + read_lock_irqsave(&ni->size_lock, flags); + ll = ni->allocated_size; + read_unlock_irqrestore(&ni->size_lock, flags); + if (end > ll) { + /* + * Extend the allocation without changing the data size. + * + * Note we ensure the allocation is big enough to at least + * write some data but we do not require the allocation to be + * complete, i.e. it may be partial. + */ + ll = ntfs_attr_extend_allocation(ni, end, -1, pos); + if (likely(ll >= 0)) { + BUG_ON(pos >= ll); + /* If the extension was partial truncate the write. */ + if (end > ll) { + ntfs_debug("Truncating write to inode 0x%lx, " + "attribute type 0x%x, because " + "the allocation was only " + "partially extended.", + vi->i_ino, (unsigned) + le32_to_cpu(ni->type)); + iov_iter_truncate(from, ll - pos); + } + } else { + err = ll; + read_lock_irqsave(&ni->size_lock, flags); + ll = ni->allocated_size; + read_unlock_irqrestore(&ni->size_lock, flags); + /* Perform a partial write if possible or fail. */ + if (pos < ll) { + ntfs_debug("Truncating write to inode 0x%lx " + "attribute type 0x%x, because " + "extending the allocation " + "failed (error %d).", + vi->i_ino, (unsigned) + le32_to_cpu(ni->type), + (int)-err); + iov_iter_truncate(from, ll - pos); + } else { + if (err != -ENOSPC) + ntfs_error(vi->i_sb, "Cannot perform " + "write to inode " + "0x%lx, attribute " + "type 0x%x, because " + "extending the " + "allocation failed " + "(error %ld).", + vi->i_ino, (unsigned) + le32_to_cpu(ni->type), + (long)-err); + else + ntfs_debug("Cannot perform write to " + "inode 0x%lx, " + "attribute type 0x%x, " + "because there is not " + "space left.", + vi->i_ino, (unsigned) + le32_to_cpu(ni->type)); + goto out; + } + } + } + /* + * If the write starts beyond the initialized size, extend it up to the + * beginning of the write and initialize all non-sparse space between + * the old initialized size and the new one. This automatically also + * increments the vfs inode->i_size to keep it above or equal to the + * initialized_size. + */ + read_lock_irqsave(&ni->size_lock, flags); + ll = ni->initialized_size; + read_unlock_irqrestore(&ni->size_lock, flags); + if (pos > ll) { + /* + * Wait for ongoing direct i/o to complete before proceeding. + * New direct i/o cannot start as we hold i_mutex. + */ + inode_dio_wait(vi); + err = ntfs_attr_extend_initialized(ni, pos); + if (unlikely(err < 0)) + ntfs_error(vi->i_sb, "Cannot perform write to inode " + "0x%lx, attribute type 0x%x, because " + "extending the initialized size " + "failed (error %d).", vi->i_ino, + (unsigned)le32_to_cpu(ni->type), + (int)-err); + } +out: + return err; } /** @@ -421,8 +524,8 @@ static inline int __ntfs_grab_cache_pages(struct address_space *mapping, goto err_out; } } - err = add_to_page_cache_lru(*cached_page, mapping, index, - GFP_KERNEL); + err = add_to_page_cache_lru(*cached_page, mapping, + index, GFP_KERNEL); if (unlikely(err)) { if (err == -EEXIST) continue; @@ -1268,180 +1371,6 @@ rl_not_mapped_enoent: return err; } -/* - * Copy as much as we can into the pages and return the number of bytes which - * were successfully copied. If a fault is encountered then clear the pages - * out to (ofs + bytes) and return the number of bytes which were copied. - */ -static inline size_t ntfs_copy_from_user(struct page **pages, - unsigned nr_pages, unsigned ofs, const char __user *buf, - size_t bytes) -{ - struct page **last_page = pages + nr_pages; - char *addr; - size_t total = 0; - unsigned len; - int left; - - do { - len = PAGE_CACHE_SIZE - ofs; - if (len > bytes) - len = bytes; - addr = kmap_atomic(*pages); - left = __copy_from_user_inatomic(addr + ofs, buf, len); - kunmap_atomic(addr); - if (unlikely(left)) { - /* Do it the slow way. */ - addr = kmap(*pages); - left = __copy_from_user(addr + ofs, buf, len); - kunmap(*pages); - if (unlikely(left)) - goto err_out; - } - total += len; - bytes -= len; - if (!bytes) - break; - buf += len; - ofs = 0; - } while (++pages < last_page); -out: - return total; -err_out: - total += len - left; - /* Zero the rest of the target like __copy_from_user(). */ - while (++pages < last_page) { - bytes -= len; - if (!bytes) - break; - len = PAGE_CACHE_SIZE; - if (len > bytes) - len = bytes; - zero_user(*pages, 0, len); - } - goto out; -} - -static size_t __ntfs_copy_from_user_iovec_inatomic(char *vaddr, - const struct iovec *iov, size_t iov_ofs, size_t bytes) -{ - size_t total = 0; - - while (1) { - const char __user *buf = iov->iov_base + iov_ofs; - unsigned len; - size_t left; - - len = iov->iov_len - iov_ofs; - if (len > bytes) - len = bytes; - left = __copy_from_user_inatomic(vaddr, buf, len); - total += len; - bytes -= len; - vaddr += len; - if (unlikely(left)) { - total -= left; - break; - } - if (!bytes) - break; - iov++; - iov_ofs = 0; - } - return total; -} - -static inline void ntfs_set_next_iovec(const struct iovec **iovp, - size_t *iov_ofsp, size_t bytes) -{ - const struct iovec *iov = *iovp; - size_t iov_ofs = *iov_ofsp; - - while (bytes) { - unsigned len; - - len = iov->iov_len - iov_ofs; - if (len > bytes) - len = bytes; - bytes -= len; - iov_ofs += len; - if (iov->iov_len == iov_ofs) { - iov++; - iov_ofs = 0; - } - } - *iovp = iov; - *iov_ofsp = iov_ofs; -} - -/* - * This has the same side-effects and return value as ntfs_copy_from_user(). - * The difference is that on a fault we need to memset the remainder of the - * pages (out to offset + bytes), to emulate ntfs_copy_from_user()'s - * single-segment behaviour. - * - * We call the same helper (__ntfs_copy_from_user_iovec_inatomic()) both when - * atomic and when not atomic. This is ok because it calls - * __copy_from_user_inatomic() and it is ok to call this when non-atomic. In - * fact, the only difference between __copy_from_user_inatomic() and - * __copy_from_user() is that the latter calls might_sleep() and the former - * should not zero the tail of the buffer on error. And on many architectures - * __copy_from_user_inatomic() is just defined to __copy_from_user() so it - * makes no difference at all on those architectures. - */ -static inline size_t ntfs_copy_from_user_iovec(struct page **pages, - unsigned nr_pages, unsigned ofs, const struct iovec **iov, - size_t *iov_ofs, size_t bytes) -{ - struct page **last_page = pages + nr_pages; - char *addr; - size_t copied, len, total = 0; - - do { - len = PAGE_CACHE_SIZE - ofs; - if (len > bytes) - len = bytes; - addr = kmap_atomic(*pages); - copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs, - *iov, *iov_ofs, len); - kunmap_atomic(addr); - if (unlikely(copied != len)) { - /* Do it the slow way. */ - addr = kmap(*pages); - copied = __ntfs_copy_from_user_iovec_inatomic(addr + - ofs, *iov, *iov_ofs, len); - if (unlikely(copied != len)) - goto err_out; - kunmap(*pages); - } - total += len; - ntfs_set_next_iovec(iov, iov_ofs, len); - bytes -= len; - if (!bytes) - break; - ofs = 0; - } while (++pages < last_page); -out: - return total; -err_out: - BUG_ON(copied > len); - /* Zero the rest of the target like __copy_from_user(). */ - memset(addr + ofs + copied, 0, len - copied); - kunmap(*pages); - total += copied; - ntfs_set_next_iovec(iov, iov_ofs, copied); - while (++pages < last_page) { - bytes -= len; - if (!bytes) - break; - len = PAGE_CACHE_SIZE; - if (len > bytes) - len = bytes; - zero_user(*pages, 0, len); - } - goto out; -} - static inline void ntfs_flush_dcache_pages(struct page **pages, unsigned nr_pages) { @@ -1762,86 +1691,83 @@ err_out: return err; } -static void ntfs_write_failed(struct address_space *mapping, loff_t to) +/* + * Copy as much as we can into the pages and return the number of bytes which + * were successfully copied. If a fault is encountered then clear the pages + * out to (ofs + bytes) and return the number of bytes which were copied. + */ +static size_t ntfs_copy_from_user_iter(struct page **pages, unsigned nr_pages, + unsigned ofs, struct iov_iter *i, size_t bytes) { - struct inode *inode = mapping->host; + struct page **last_page = pages + nr_pages; + size_t total = 0; + struct iov_iter data = *i; + unsigned len, copied; - if (to > inode->i_size) { - truncate_pagecache(inode, inode->i_size); - ntfs_truncate_vfs(inode); - } + do { + len = PAGE_CACHE_SIZE - ofs; + if (len > bytes) + len = bytes; + copied = iov_iter_copy_from_user_atomic(*pages, &data, ofs, + len); + total += copied; + bytes -= copied; + if (!bytes) + break; + iov_iter_advance(&data, copied); + if (copied < len) + goto err; + ofs = 0; + } while (++pages < last_page); +out: + return total; +err: + /* Zero the rest of the target like __copy_from_user(). */ + len = PAGE_CACHE_SIZE - copied; + do { + if (len > bytes) + len = bytes; + zero_user(*pages, copied, len); + bytes -= len; + copied = 0; + len = PAGE_CACHE_SIZE; + } while (++pages < last_page); + goto out; } /** - * ntfs_file_buffered_write - - * - * Locking: The vfs is holding ->i_mutex on the inode. + * ntfs_perform_write - perform buffered write to a file + * @file: file to write to + * @i: iov_iter with data to write + * @pos: byte offset in file at which to begin writing to */ -static ssize_t ntfs_file_buffered_write(struct kiocb *iocb, - const struct iovec *iov, unsigned long nr_segs, - loff_t pos, loff_t *ppos, size_t count) +static ssize_t ntfs_perform_write(struct file *file, struct iov_iter *i, + loff_t pos) { - struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; struct inode *vi = mapping->host; ntfs_inode *ni = NTFS_I(vi); ntfs_volume *vol = ni->vol; struct page *pages[NTFS_MAX_PAGES_PER_CLUSTER]; struct page *cached_page = NULL; - char __user *buf = NULL; - s64 end, ll; VCN last_vcn; LCN lcn; - unsigned long flags; - size_t bytes, iov_ofs = 0; /* Offset in the current iovec. */ - ssize_t status, written; + size_t bytes; + ssize_t status, written = 0; unsigned nr_pages; - int err; - ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, " - "pos 0x%llx, count 0x%lx.", - vi->i_ino, (unsigned)le32_to_cpu(ni->type), - (unsigned long long)pos, (unsigned long)count); - if (unlikely(!count)) - return 0; - BUG_ON(NInoMstProtected(ni)); - /* - * If the attribute is not an index root and it is encrypted or - * compressed, we cannot write to it yet. Note we need to check for - * AT_INDEX_ALLOCATION since this is the type of both directory and - * index inodes. - */ - if (ni->type != AT_INDEX_ALLOCATION) { - /* If file is encrypted, deny access, just like NT4. */ - if (NInoEncrypted(ni)) { - /* - * Reminder for later: Encrypted files are _always_ - * non-resident so that the content can always be - * encrypted. - */ - ntfs_debug("Denying write access to encrypted file."); - return -EACCES; - } - if (NInoCompressed(ni)) { - /* Only unnamed $DATA attribute can be compressed. */ - BUG_ON(ni->type != AT_DATA); - BUG_ON(ni->name_len); - /* - * Reminder for later: If resident, the data is not - * actually compressed. Only on the switch to non- - * resident does compression kick in. This is in - * contrast to encrypted files (see above). - */ - ntfs_error(vi->i_sb, "Writing to compressed files is " - "not implemented yet. Sorry."); - return -EOPNOTSUPP; - } - } + ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, pos " + "0x%llx, count 0x%lx.", vi->i_ino, + (unsigned)le32_to_cpu(ni->type), + (unsigned long long)pos, + (unsigned long)iov_iter_count(i)); /* * If a previous ntfs_truncate() failed, repeat it and abort if it * fails again. */ if (unlikely(NInoTruncateFailed(ni))) { + int err; + inode_dio_wait(vi); err = ntfs_truncate(vi); if (err || NInoTruncateFailed(ni)) { @@ -1855,81 +1781,6 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb, return err; } } - /* The first byte after the write. */ - end = pos + count; - /* - * If the write goes beyond the allocated size, extend the allocation - * to cover the whole of the write, rounded up to the nearest cluster. - */ - read_lock_irqsave(&ni->size_lock, flags); - ll = ni->allocated_size; - read_unlock_irqrestore(&ni->size_lock, flags); - if (end > ll) { - /* Extend the allocation without changing the data size. */ - ll = ntfs_attr_extend_allocation(ni, end, -1, pos); - if (likely(ll >= 0)) { - BUG_ON(pos >= ll); - /* If the extension was partial truncate the write. */ - if (end > ll) { - ntfs_debug("Truncating write to inode 0x%lx, " - "attribute type 0x%x, because " - "the allocation was only " - "partially extended.", - vi->i_ino, (unsigned) - le32_to_cpu(ni->type)); - end = ll; - count = ll - pos; - } - } else { - err = ll; - read_lock_irqsave(&ni->size_lock, flags); - ll = ni->allocated_size; - read_unlock_irqrestore(&ni->size_lock, flags); - /* Perform a partial write if possible or fail. */ - if (pos < ll) { - ntfs_debug("Truncating write to inode 0x%lx, " - "attribute type 0x%x, because " - "extending the allocation " - "failed (error code %i).", - vi->i_ino, (unsigned) - le32_to_cpu(ni->type), err); - end = ll; - count = ll - pos; - } else { - ntfs_error(vol->sb, "Cannot perform write to " - "inode 0x%lx, attribute type " - "0x%x, because extending the " - "allocation failed (error " - "code %i).", vi->i_ino, - (unsigned) - le32_to_cpu(ni->type), err); - return err; - } - } - } - written = 0; - /* - * If the write starts beyond the initialized size, extend it up to the - * beginning of the write and initialize all non-sparse space between - * the old initialized size and the new one. This automatically also - * increments the vfs inode->i_size to keep it above or equal to the - * initialized_size. - */ - read_lock_irqsave(&ni->size_lock, flags); - ll = ni->initialized_size; - read_unlock_irqrestore(&ni->size_lock, flags); - if (pos > ll) { - err = ntfs_attr_extend_initialized(ni, pos); - if (err < 0) { - ntfs_error(vol->sb, "Cannot perform write to inode " - "0x%lx, attribute type 0x%x, because " - "extending the initialized size " - "failed (error code %i).", vi->i_ino, - (unsigned)le32_to_cpu(ni->type), err); - status = err; - goto err_out; - } - } /* * Determine the number of pages per cluster for non-resident * attributes. @@ -1937,10 +1788,7 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb, nr_pages = 1; if (vol->cluster_size > PAGE_CACHE_SIZE && NInoNonResident(ni)) nr_pages = vol->cluster_size >> PAGE_CACHE_SHIFT; - /* Finally, perform the actual write. */ last_vcn = -1; - if (likely(nr_segs == 1)) - buf = iov->iov_base; do { VCN vcn; pgoff_t idx, start_idx; @@ -1965,10 +1813,10 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb, vol->cluster_size_bits, false); up_read(&ni->runlist.lock); if (unlikely(lcn < LCN_HOLE)) { - status = -EIO; if (lcn == LCN_ENOMEM) status = -ENOMEM; - else + else { + status = -EIO; ntfs_error(vol->sb, "Cannot " "perform write to " "inode 0x%lx, " @@ -1977,6 +1825,7 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb, "is corrupt.", vi->i_ino, (unsigned) le32_to_cpu(ni->type)); + } break; } if (lcn == LCN_HOLE) { @@ -1989,8 +1838,9 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb, } } } - if (bytes > count) - bytes = count; + if (bytes > iov_iter_count(i)) + bytes = iov_iter_count(i); +again: /* * Bring in the user page(s) that we will copy from _first_. * Otherwise there is a nasty deadlock on copying from the same @@ -1999,10 +1849,10 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb, * pages being swapped out between us bringing them into memory * and doing the actual copying. */ - if (likely(nr_segs == 1)) - ntfs_fault_in_pages_readable(buf, bytes); - else - ntfs_fault_in_pages_readable_iovec(iov, iov_ofs, bytes); + if (unlikely(iov_iter_fault_in_multipages_readable(i, bytes))) { + status = -EFAULT; + break; + } /* Get and lock @do_pages starting at index @start_idx. */ status = __ntfs_grab_cache_pages(mapping, start_idx, do_pages, pages, &cached_page); @@ -2018,56 +1868,57 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb, status = ntfs_prepare_pages_for_non_resident_write( pages, do_pages, pos, bytes); if (unlikely(status)) { - loff_t i_size; - do { unlock_page(pages[--do_pages]); page_cache_release(pages[do_pages]); } while (do_pages); - /* - * The write preparation may have instantiated - * allocated space outside i_size. Trim this - * off again. We can ignore any errors in this - * case as we will just be waisting a bit of - * allocated space, which is not a disaster. - */ - i_size = i_size_read(vi); - if (pos + bytes > i_size) { - ntfs_write_failed(mapping, pos + bytes); - } break; } } u = (pos >> PAGE_CACHE_SHIFT) - pages[0]->index; - if (likely(nr_segs == 1)) { - copied = ntfs_copy_from_user(pages + u, do_pages - u, - ofs, buf, bytes); - buf += copied; - } else - copied = ntfs_copy_from_user_iovec(pages + u, - do_pages - u, ofs, &iov, &iov_ofs, - bytes); + copied = ntfs_copy_from_user_iter(pages + u, do_pages - u, ofs, + i, bytes); ntfs_flush_dcache_pages(pages + u, do_pages - u); - status = ntfs_commit_pages_after_write(pages, do_pages, pos, - bytes); - if (likely(!status)) { - written += copied; - count -= copied; - pos += copied; - if (unlikely(copied != bytes)) - status = -EFAULT; + status = 0; + if (likely(copied == bytes)) { + status = ntfs_commit_pages_after_write(pages, do_pages, + pos, bytes); + if (!status) + status = bytes; } do { unlock_page(pages[--do_pages]); page_cache_release(pages[do_pages]); } while (do_pages); - if (unlikely(status)) + if (unlikely(status < 0)) break; - balance_dirty_pages_ratelimited(mapping); + copied = status; cond_resched(); - } while (count); -err_out: - *ppos = pos; + if (unlikely(!copied)) { + size_t sc; + + /* + * We failed to copy anything. Fall back to single + * segment length write. + * + * This is needed to avoid possible livelock in the + * case that all segments in the iov cannot be copied + * at once without a pagefault. + */ + sc = iov_iter_single_seg_count(i); + if (bytes > sc) + bytes = sc; + goto again; + } + iov_iter_advance(i, copied); + pos += copied; + written += copied; + balance_dirty_pages_ratelimited(mapping); + if (fatal_signal_pending(current)) { + status = -EINTR; + break; + } + } while (iov_iter_count(i)); if (cached_page) page_cache_release(cached_page); ntfs_debug("Done. Returning %s (written 0x%lx, status %li).", @@ -2077,63 +1928,36 @@ err_out: } /** - * ntfs_file_aio_write_nolock - + * ntfs_file_write_iter - simple wrapper for ntfs_file_write_iter_nolock() + * @iocb: IO state structure + * @from: iov_iter with data to write + * + * Basically the same as generic_file_write_iter() except that it ends up + * up calling ntfs_perform_write() instead of generic_perform_write() and that + * O_DIRECT is not implemented. */ -static ssize_t ntfs_file_aio_write_nolock(struct kiocb *iocb, - const struct iovec *iov, unsigned long nr_segs, loff_t *ppos) +static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; - struct address_space *mapping = file->f_mapping; - struct inode *inode = mapping->host; - loff_t pos; - size_t count; /* after file limit checks */ - ssize_t written, err; + struct inode *vi = file_inode(file); + ssize_t written = 0; + ssize_t err; - count = iov_length(iov, nr_segs); - pos = *ppos; + mutex_lock(&vi->i_mutex); /* We can write back this queue in page reclaim. */ - current->backing_dev_info = inode_to_bdi(inode); - written = 0; - err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); - if (err) - goto out; - if (!count) - goto out; - err = file_remove_suid(file); - if (err) - goto out; - err = file_update_time(file); - if (err) - goto out; - written = ntfs_file_buffered_write(iocb, iov, nr_segs, pos, ppos, - count); -out: + current->backing_dev_info = inode_to_bdi(vi); + err = ntfs_prepare_file_for_write(iocb, from); + if (iov_iter_count(from) && !err) + written = ntfs_perform_write(file, from, iocb->ki_pos); current->backing_dev_info = NULL; - return written ? written : err; -} - -/** - * ntfs_file_aio_write - - */ -static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov, - unsigned long nr_segs, loff_t pos) -{ - struct file *file = iocb->ki_filp; - struct address_space *mapping = file->f_mapping; - struct inode *inode = mapping->host; - ssize_t ret; - - BUG_ON(iocb->ki_pos != pos); - - mutex_lock(&inode->i_mutex); - ret = ntfs_file_aio_write_nolock(iocb, iov, nr_segs, &iocb->ki_pos); - mutex_unlock(&inode->i_mutex); - if (ret > 0) { - int err = generic_write_sync(file, iocb->ki_pos - ret, ret); + mutex_unlock(&vi->i_mutex); + if (likely(written > 0)) { + err = generic_write_sync(file, iocb->ki_pos, written); if (err < 0) - ret = err; + written = 0; } - return ret; + iocb->ki_pos += written; + return written ? written : err; } /** @@ -2197,37 +2021,15 @@ static int ntfs_file_fsync(struct file *filp, loff_t start, loff_t end, #endif /* NTFS_RW */ const struct file_operations ntfs_file_ops = { - .llseek = generic_file_llseek, /* Seek inside file. */ - .read = new_sync_read, /* Read from file. */ - .read_iter = generic_file_read_iter, /* Async read from file. */ + .llseek = generic_file_llseek, + .read_iter = generic_file_read_iter, #ifdef NTFS_RW - .write = do_sync_write, /* Write to file. */ - .aio_write = ntfs_file_aio_write, /* Async write to file. */ - /*.release = ,*/ /* Last file is closed. See - fs/ext2/file.c:: - ext2_release_file() for - how to use this to discard - preallocated space for - write opened files. */ - .fsync = ntfs_file_fsync, /* Sync a file to disk. */ - /*.aio_fsync = ,*/ /* Sync all outstanding async - i/o operations on a - kiocb. */ + .write_iter = ntfs_file_write_iter, + .fsync = ntfs_file_fsync, #endif /* NTFS_RW */ - /*.ioctl = ,*/ /* Perform function on the - mounted filesystem. */ - .mmap = generic_file_mmap, /* Mmap file. */ - .open = ntfs_file_open, /* Open file. */ - .splice_read = generic_file_splice_read /* Zero-copy data send with - the data source being on - the ntfs partition. We do - not need to care about the - data destination. */ - /*.sendpage = ,*/ /* Zero-copy data send with - the data destination being - on the ntfs partition. We - do not need to care about - the data source. */ + .mmap = generic_file_mmap, + .open = ntfs_file_open, + .splice_read = generic_file_splice_read, }; const struct inode_operations ntfs_file_inode_ops = { diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c index 898b994..d284f07 100644 --- a/fs/ntfs/inode.c +++ b/fs/ntfs/inode.c @@ -28,7 +28,6 @@ #include <linux/quotaops.h> #include <linux/slab.h> #include <linux/log2.h> -#include <linux/aio.h> #include "aops.h" #include "attrib.h" @@ -2890,7 +2889,7 @@ void ntfs_truncate_vfs(struct inode *vi) { */ int ntfs_setattr(struct dentry *dentry, struct iattr *attr) { - struct inode *vi = dentry->d_inode; + struct inode *vi = d_inode(dentry); int err; unsigned int ia_valid = attr->ia_valid; diff --git a/fs/ntfs/namei.c b/fs/ntfs/namei.c index b3973c2..0f35b80 100644 --- a/fs/ntfs/namei.c +++ b/fs/ntfs/namei.c @@ -292,14 +292,14 @@ const struct inode_operations ntfs_dir_inode_ops = { * The code is based on the ext3 ->get_parent() implementation found in * fs/ext3/namei.c::ext3_get_parent(). * - * Note: ntfs_get_parent() is called with @child_dent->d_inode->i_mutex down. + * Note: ntfs_get_parent() is called with @d_inode(child_dent)->i_mutex down. * * Return the dentry of the parent directory on success or the error code on * error (IS_ERR() is true). */ static struct dentry *ntfs_get_parent(struct dentry *child_dent) { - struct inode *vi = child_dent->d_inode; + struct inode *vi = d_inode(child_dent); ntfs_inode *ni = NTFS_I(vi); MFT_RECORD *mrec; ntfs_attr_search_ctx *ctx; diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index 044158b..2d7f76e 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -3370,7 +3370,7 @@ static int ocfs2_merge_rec_right(struct ocfs2_path *left_path, ret = ocfs2_get_right_path(et, left_path, &right_path); if (ret) { mlog_errno(ret); - goto out; + return ret; } right_el = path_leaf_el(right_path); @@ -3453,8 +3453,7 @@ static int ocfs2_merge_rec_right(struct ocfs2_path *left_path, subtree_index); } out: - if (right_path) - ocfs2_free_path(right_path); + ocfs2_free_path(right_path); return ret; } @@ -3536,7 +3535,7 @@ static int ocfs2_merge_rec_left(struct ocfs2_path *right_path, ret = ocfs2_get_left_path(et, right_path, &left_path); if (ret) { mlog_errno(ret); - goto out; + return ret; } left_el = path_leaf_el(left_path); @@ -3647,8 +3646,7 @@ static int ocfs2_merge_rec_left(struct ocfs2_path *right_path, right_path, subtree_index); } out: - if (left_path) - ocfs2_free_path(left_path); + ocfs2_free_path(left_path); return ret; } @@ -4334,17 +4332,17 @@ ocfs2_figure_merge_contig_type(struct ocfs2_extent_tree *et, } else if (path->p_tree_depth > 0) { status = ocfs2_find_cpos_for_left_leaf(sb, path, &left_cpos); if (status) - goto out; + goto exit; if (left_cpos != 0) { left_path = ocfs2_new_path_from_path(path); if (!left_path) - goto out; + goto exit; status = ocfs2_find_path(et->et_ci, left_path, left_cpos); if (status) - goto out; + goto free_left_path; new_el = path_leaf_el(left_path); @@ -4361,7 +4359,7 @@ ocfs2_figure_merge_contig_type(struct ocfs2_extent_tree *et, le16_to_cpu(new_el->l_next_free_rec), le16_to_cpu(new_el->l_count)); status = -EINVAL; - goto out; + goto free_left_path; } rec = &new_el->l_recs[ le16_to_cpu(new_el->l_next_free_rec) - 1]; @@ -4388,18 +4386,18 @@ ocfs2_figure_merge_contig_type(struct ocfs2_extent_tree *et, path->p_tree_depth > 0) { status = ocfs2_find_cpos_for_right_leaf(sb, path, &right_cpos); if (status) - goto out; + goto free_left_path; if (right_cpos == 0) - goto out; + goto free_left_path; right_path = ocfs2_new_path_from_path(path); if (!right_path) - goto out; + goto free_left_path; status = ocfs2_find_path(et->et_ci, right_path, right_cpos); if (status) - goto out; + goto free_right_path; new_el = path_leaf_el(right_path); rec = &new_el->l_recs[0]; @@ -4413,7 +4411,7 @@ ocfs2_figure_merge_contig_type(struct ocfs2_extent_tree *et, (unsigned long long)le64_to_cpu(eb->h_blkno), le16_to_cpu(new_el->l_next_free_rec)); status = -EINVAL; - goto out; + goto free_right_path; } rec = &new_el->l_recs[1]; } @@ -4430,12 +4428,11 @@ ocfs2_figure_merge_contig_type(struct ocfs2_extent_tree *et, ret = contig_type; } -out: - if (left_path) - ocfs2_free_path(left_path); - if (right_path) - ocfs2_free_path(right_path); - +free_right_path: + ocfs2_free_path(right_path); +free_left_path: + ocfs2_free_path(left_path); +exit: return ret; } @@ -6858,13 +6855,13 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode, if (pages == NULL) { ret = -ENOMEM; mlog_errno(ret); - goto out; + return ret; } ret = ocfs2_reserve_clusters(osb, 1, &data_ac); if (ret) { mlog_errno(ret); - goto out; + goto free_pages; } } @@ -6996,9 +6993,8 @@ out_commit: out: if (data_ac) ocfs2_free_alloc_context(data_ac); - if (pages) - kfree(pages); - +free_pages: + kfree(pages); return ret; } diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 44db180..f906a25 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -29,6 +29,7 @@ #include <linux/mpage.h> #include <linux/quotaops.h> #include <linux/blkdev.h> +#include <linux/uio.h> #include <cluster/masklog.h> @@ -663,6 +664,117 @@ static int ocfs2_is_overwrite(struct ocfs2_super *osb, return 0; } +static int ocfs2_direct_IO_zero_extend(struct ocfs2_super *osb, + struct inode *inode, loff_t offset, + u64 zero_len, int cluster_align) +{ + u32 p_cpos = 0; + u32 v_cpos = ocfs2_bytes_to_clusters(osb->sb, i_size_read(inode)); + unsigned int num_clusters = 0; + unsigned int ext_flags = 0; + int ret = 0; + + if (offset <= i_size_read(inode) || cluster_align) + return 0; + + ret = ocfs2_get_clusters(inode, v_cpos, &p_cpos, &num_clusters, + &ext_flags); + if (ret < 0) { + mlog_errno(ret); + return ret; + } + + if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) { + u64 s = i_size_read(inode); + sector_t sector = (p_cpos << (osb->s_clustersize_bits - 9)) + + (do_div(s, osb->s_clustersize) >> 9); + + ret = blkdev_issue_zeroout(osb->sb->s_bdev, sector, + zero_len >> 9, GFP_NOFS, false); + if (ret < 0) + mlog_errno(ret); + } + + return ret; +} + +static int ocfs2_direct_IO_extend_no_holes(struct ocfs2_super *osb, + struct inode *inode, loff_t offset) +{ + u64 zero_start, zero_len, total_zero_len; + u32 p_cpos = 0, clusters_to_add; + u32 v_cpos = ocfs2_bytes_to_clusters(osb->sb, i_size_read(inode)); + unsigned int num_clusters = 0; + unsigned int ext_flags = 0; + u32 size_div, offset_div; + int ret = 0; + + { + u64 o = offset; + u64 s = i_size_read(inode); + + offset_div = do_div(o, osb->s_clustersize); + size_div = do_div(s, osb->s_clustersize); + } + + if (offset <= i_size_read(inode)) + return 0; + + clusters_to_add = ocfs2_bytes_to_clusters(inode->i_sb, offset) - + ocfs2_bytes_to_clusters(inode->i_sb, i_size_read(inode)); + total_zero_len = offset - i_size_read(inode); + if (clusters_to_add) + total_zero_len -= offset_div; + + /* Allocate clusters to fill out holes, and this is only needed + * when we add more than one clusters. Otherwise the cluster will + * be allocated during direct IO */ + if (clusters_to_add > 1) { + ret = ocfs2_extend_allocation(inode, + OCFS2_I(inode)->ip_clusters, + clusters_to_add - 1, 0); + if (ret) { + mlog_errno(ret); + goto out; + } + } + + while (total_zero_len) { + ret = ocfs2_get_clusters(inode, v_cpos, &p_cpos, &num_clusters, + &ext_flags); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + + zero_start = ocfs2_clusters_to_bytes(osb->sb, p_cpos) + + size_div; + zero_len = ocfs2_clusters_to_bytes(osb->sb, num_clusters) - + size_div; + zero_len = min(total_zero_len, zero_len); + + if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) { + ret = blkdev_issue_zeroout(osb->sb->s_bdev, + zero_start >> 9, zero_len >> 9, + GFP_NOFS, false); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + } + + total_zero_len -= zero_len; + v_cpos += ocfs2_bytes_to_clusters(osb->sb, zero_len + size_div); + + /* Only at first iteration can be cluster not aligned. + * So set size_div to 0 for the rest */ + size_div = 0; + } + +out: + return ret; +} + static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter, loff_t offset) @@ -677,8 +789,8 @@ static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb, struct buffer_head *di_bh = NULL; size_t count = iter->count; journal_t *journal = osb->journal->j_journal; - u32 zero_len; - int cluster_align; + u64 zero_len_head, zero_len_tail; + int cluster_align_head, cluster_align_tail; loff_t final_size = offset + count; int append_write = offset >= i_size_read(inode) ? 1 : 0; unsigned int num_clusters = 0; @@ -686,9 +798,16 @@ static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb, { u64 o = offset; + u64 s = i_size_read(inode); + + zero_len_head = do_div(o, 1 << osb->s_clustersize_bits); + cluster_align_head = !zero_len_head; - zero_len = do_div(o, 1 << osb->s_clustersize_bits); - cluster_align = !zero_len; + zero_len_tail = osb->s_clustersize - + do_div(s, osb->s_clustersize); + if ((offset - i_size_read(inode)) < zero_len_tail) + zero_len_tail = offset - i_size_read(inode); + cluster_align_tail = !zero_len_tail; } /* @@ -706,21 +825,23 @@ static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb, } if (append_write) { - ret = ocfs2_inode_lock(inode, &di_bh, 1); + ret = ocfs2_inode_lock(inode, NULL, 1); if (ret < 0) { mlog_errno(ret); goto clean_orphan; } + /* zeroing out the previously allocated cluster tail + * that but not zeroed */ if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) - ret = ocfs2_zero_extend(inode, di_bh, offset); + ret = ocfs2_direct_IO_zero_extend(osb, inode, offset, + zero_len_tail, cluster_align_tail); else - ret = ocfs2_extend_no_holes(inode, di_bh, offset, + ret = ocfs2_direct_IO_extend_no_holes(osb, inode, offset); if (ret < 0) { mlog_errno(ret); ocfs2_inode_unlock(inode, 1); - brelse(di_bh); goto clean_orphan; } @@ -728,19 +849,15 @@ static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb, if (is_overwrite < 0) { mlog_errno(is_overwrite); ocfs2_inode_unlock(inode, 1); - brelse(di_bh); goto clean_orphan; } ocfs2_inode_unlock(inode, 1); - brelse(di_bh); - di_bh = NULL; } - written = __blockdev_direct_IO(WRITE, iocb, inode, inode->i_sb->s_bdev, - iter, offset, - ocfs2_direct_IO_get_blocks, - ocfs2_dio_end_io, NULL, 0); + written = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter, + offset, ocfs2_direct_IO_get_blocks, + ocfs2_dio_end_io, NULL, 0); if (unlikely(written < 0)) { loff_t i_size = i_size_read(inode); @@ -771,15 +888,23 @@ static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb, if (ret < 0) mlog_errno(ret); } - } else if (written < 0 && append_write && !is_overwrite && - !cluster_align) { + } else if (written > 0 && append_write && !is_overwrite && + !cluster_align_head) { + /* zeroing out the allocated cluster head */ u32 p_cpos = 0; u32 v_cpos = ocfs2_bytes_to_clusters(osb->sb, offset); + ret = ocfs2_inode_lock(inode, NULL, 0); + if (ret < 0) { + mlog_errno(ret); + goto clean_orphan; + } + ret = ocfs2_get_clusters(inode, v_cpos, &p_cpos, &num_clusters, &ext_flags); if (ret < 0) { mlog_errno(ret); + ocfs2_inode_unlock(inode, 0); goto clean_orphan; } @@ -787,9 +912,11 @@ static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb, ret = blkdev_issue_zeroout(osb->sb->s_bdev, p_cpos << (osb->s_clustersize_bits - 9), - zero_len >> 9, GFP_KERNEL, false); + zero_len_head >> 9, GFP_NOFS, false); if (ret < 0) mlog_errno(ret); + + ocfs2_inode_unlock(inode, 0); } clean_orphan: @@ -818,9 +945,7 @@ out: return ret; } -static ssize_t ocfs2_direct_IO(int rw, - struct kiocb *iocb, - struct iov_iter *iter, +static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset) { struct file *file = iocb->ki_filp; @@ -842,12 +967,11 @@ static ssize_t ocfs2_direct_IO(int rw, if (i_size_read(inode) <= offset && !full_coherency) return 0; - if (rw == READ) - return __blockdev_direct_IO(rw, iocb, inode, - inode->i_sb->s_bdev, - iter, offset, - ocfs2_direct_IO_get_blocks, - ocfs2_dio_end_io, NULL, 0); + if (iov_iter_rw(iter) == READ) + return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, + iter, offset, + ocfs2_direct_IO_get_blocks, + ocfs2_dio_end_io, NULL, 0); else return ocfs2_direct_IO_write(iocb, iter, offset); } diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h index 6cae155..dd59599 100644 --- a/fs/ocfs2/aops.h +++ b/fs/ocfs2/aops.h @@ -22,7 +22,7 @@ #ifndef OCFS2_AOPS_H #define OCFS2_AOPS_H -#include <linux/aio.h> +#include <linux/fs.h> handle_t *ocfs2_start_walk_page_trans(struct inode *inode, struct page *page, diff --git a/fs/ocfs2/cluster/masklog.h b/fs/ocfs2/cluster/masklog.h index 2260fb9..7fdc25a 100644 --- a/fs/ocfs2/cluster/masklog.h +++ b/fs/ocfs2/cluster/masklog.h @@ -196,13 +196,14 @@ extern struct mlog_bits mlog_and_bits, mlog_not_bits; } \ } while (0) -#define mlog_errno(st) do { \ +#define mlog_errno(st) ({ \ int _st = (st); \ if (_st != -ERESTARTSYS && _st != -EINTR && \ _st != AOP_TRUNCATED_PAGE && _st != -ENOSPC && \ _st != -EDQUOT) \ mlog(ML_ERROR, "status = %lld\n", (long long)_st); \ -} while (0) + _st; \ +}) #define mlog_bug_on_msg(cond, fmt, args...) do { \ if (cond) { \ diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c index 4fda7a5..2903730 100644 --- a/fs/ocfs2/dcache.c +++ b/fs/ocfs2/dcache.c @@ -42,8 +42,8 @@ void ocfs2_dentry_attach_gen(struct dentry *dentry) { unsigned long gen = - OCFS2_I(dentry->d_parent->d_inode)->ip_dir_lock_gen; - BUG_ON(dentry->d_inode); + OCFS2_I(d_inode(dentry->d_parent))->ip_dir_lock_gen; + BUG_ON(d_inode(dentry)); dentry->d_fsdata = (void *)gen; } @@ -57,7 +57,7 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry, unsigned int flags) if (flags & LOOKUP_RCU) return -ECHILD; - inode = dentry->d_inode; + inode = d_inode(dentry); osb = OCFS2_SB(dentry->d_sb); trace_ocfs2_dentry_revalidate(dentry, dentry->d_name.len, @@ -71,7 +71,7 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry, unsigned int flags) unsigned long gen = (unsigned long) dentry->d_fsdata; unsigned long pgen; spin_lock(&dentry->d_lock); - pgen = OCFS2_I(dentry->d_parent->d_inode)->ip_dir_lock_gen; + pgen = OCFS2_I(d_inode(dentry->d_parent))->ip_dir_lock_gen; spin_unlock(&dentry->d_lock); trace_ocfs2_dentry_revalidate_negative(dentry->d_name.len, dentry->d_name.name, @@ -146,7 +146,7 @@ static int ocfs2_match_dentry(struct dentry *dentry, if (skip_unhashed && d_unhashed(dentry)) return 0; - parent = dentry->d_parent->d_inode; + parent = d_inode(dentry->d_parent); /* Negative parent dentry? */ if (!parent) return 0; @@ -243,7 +243,7 @@ int ocfs2_dentry_attach_lock(struct dentry *dentry, if (!inode) return 0; - if (!dentry->d_inode && dentry->d_fsdata) { + if (d_really_is_negative(dentry) && dentry->d_fsdata) { /* Converting a negative dentry to positive Clear dentry->d_fsdata */ dentry->d_fsdata = dl = NULL; @@ -446,7 +446,7 @@ void ocfs2_dentry_move(struct dentry *dentry, struct dentry *target, { int ret; struct ocfs2_super *osb = OCFS2_SB(old_dir->i_sb); - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); /* * Move within the same directory, so the actual lock info won't diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index b08050b..ccd4dcf 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c @@ -18,7 +18,7 @@ * * linux/fs/minix/dir.c * - * Copyright (C) 1991, 1992 Linux Torvalds + * Copyright (C) 1991, 1992 Linus Torvalds * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public @@ -2047,22 +2047,19 @@ int ocfs2_check_dir_for_entry(struct inode *dir, const char *name, int namelen) { - int ret; + int ret = 0; struct ocfs2_dir_lookup_result lookup = { NULL, }; trace_ocfs2_check_dir_for_entry( (unsigned long long)OCFS2_I(dir)->ip_blkno, namelen, name); - ret = -EEXIST; - if (ocfs2_find_entry(name, namelen, dir, &lookup) == 0) - goto bail; + if (ocfs2_find_entry(name, namelen, dir, &lookup) == 0) { + ret = -EEXIST; + mlog_errno(ret); + } - ret = 0; -bail: ocfs2_free_dir_lookup_result(&lookup); - if (ret) - mlog_errno(ret); return ret; } diff --git a/fs/ocfs2/dir.h b/fs/ocfs2/dir.h index f0344b7..3d8639f 100644 --- a/fs/ocfs2/dir.h +++ b/fs/ocfs2/dir.h @@ -72,7 +72,7 @@ static inline int ocfs2_add_entry(handle_t *handle, struct buffer_head *parent_fe_bh, struct ocfs2_dir_lookup_result *lookup) { - return __ocfs2_add_entry(handle, dentry->d_parent->d_inode, + return __ocfs2_add_entry(handle, d_inode(dentry->d_parent), dentry->d_name.name, dentry->d_name.len, inode, blkno, parent_fe_bh, lookup); } diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c index 061ba6a..b5cf27d 100644 --- a/fs/ocfs2/dlmfs/dlmfs.c +++ b/fs/ocfs2/dlmfs/dlmfs.c @@ -208,7 +208,7 @@ static int dlmfs_file_release(struct inode *inode, static int dlmfs_file_setattr(struct dentry *dentry, struct iattr *attr) { int error; - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); attr->ia_valid &= ~ATTR_SIZE; error = inode_change_ok(inode, attr); @@ -549,7 +549,7 @@ static int dlmfs_unlink(struct inode *dir, struct dentry *dentry) { int status; - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); mlog(0, "unlink inode %lu\n", inode->i_ino); diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 11849a4..8b23aa2 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -1391,6 +1391,11 @@ static int __ocfs2_cluster_lock(struct ocfs2_super *osb, int noqueue_attempted = 0; int dlm_locked = 0; + if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED)) { + mlog_errno(-EINVAL); + return -EINVAL; + } + ocfs2_init_mask_waiter(&mw); if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c index 2965116..827fc98 100644 --- a/fs/ocfs2/export.c +++ b/fs/ocfs2/export.c @@ -82,7 +82,6 @@ static struct dentry *ocfs2_get_dentry(struct super_block *sb, } status = ocfs2_test_inode_bit(osb, blkno, &set); - trace_ocfs2_get_dentry_test_bit(status, set); if (status < 0) { if (status == -EINVAL) { /* @@ -96,6 +95,7 @@ static struct dentry *ocfs2_get_dentry(struct super_block *sb, goto unlock_nfs_sync; } + trace_ocfs2_get_dentry_test_bit(status, set); /* If the inode allocator bit is clear, this inode must be stale */ if (!set) { status = -ESTALE; @@ -147,7 +147,7 @@ static struct dentry *ocfs2_get_parent(struct dentry *child) int status; u64 blkno; struct dentry *parent; - struct inode *dir = child->d_inode; + struct inode *dir = d_inode(child); trace_ocfs2_get_parent(child, child->d_name.len, child->d_name.name, (unsigned long long)OCFS2_I(dir)->ip_blkno); diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 46e0d4e..d8b670c 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -1126,7 +1126,7 @@ out: int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) { int status = 0, size_change; - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct super_block *sb = inode->i_sb; struct ocfs2_super *osb = OCFS2_SB(sb); struct buffer_head *bh = NULL; @@ -1275,8 +1275,8 @@ int ocfs2_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { - struct inode *inode = dentry->d_inode; - struct super_block *sb = dentry->d_inode->i_sb; + struct inode *inode = d_inode(dentry); + struct super_block *sb = d_inode(dentry)->i_sb; struct ocfs2_super *osb = sb->s_fs_info; int err; @@ -2106,7 +2106,7 @@ out: } static int ocfs2_prepare_inode_for_write(struct file *file, - loff_t *ppos, + loff_t pos, size_t count, int appending, int *direct_io, @@ -2114,8 +2114,8 @@ static int ocfs2_prepare_inode_for_write(struct file *file, { int ret = 0, meta_level = 0; struct dentry *dentry = file->f_path.dentry; - struct inode *inode = dentry->d_inode; - loff_t saved_pos = 0, end; + struct inode *inode = d_inode(dentry); + loff_t end; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); int full_coherency = !(osb->s_mount_opt & OCFS2_MOUNT_COHERENCY_BUFFERED); @@ -2155,23 +2155,16 @@ static int ocfs2_prepare_inode_for_write(struct file *file, } } - /* work on a copy of ppos until we're sure that we won't have - * to recalculate it due to relocking. */ - if (appending) - saved_pos = i_size_read(inode); - else - saved_pos = *ppos; - - end = saved_pos + count; + end = pos + count; - ret = ocfs2_check_range_for_refcount(inode, saved_pos, count); + ret = ocfs2_check_range_for_refcount(inode, pos, count); if (ret == 1) { ocfs2_inode_unlock(inode, meta_level); meta_level = -1; ret = ocfs2_prepare_inode_for_refcount(inode, file, - saved_pos, + pos, count, &meta_level); if (has_refcount) @@ -2227,7 +2220,7 @@ static int ocfs2_prepare_inode_for_write(struct file *file, * caller will have to retake some cluster * locks and initiate the io as buffered. */ - ret = ocfs2_check_range_for_holes(inode, saved_pos, count); + ret = ocfs2_check_range_for_holes(inode, pos, count); if (ret == 1) { /* * Fallback to old way if the feature bit is not set. @@ -2242,12 +2235,9 @@ static int ocfs2_prepare_inode_for_write(struct file *file, break; } - if (appending) - *ppos = saved_pos; - out_unlock: trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode)->ip_blkno, - saved_pos, appending, count, + pos, appending, count, direct_io, has_refcount); if (meta_level >= 0) @@ -2260,19 +2250,20 @@ out: static ssize_t ocfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from) { - int ret, direct_io, appending, rw_level, have_alloc_sem = 0; + int direct_io, appending, rw_level, have_alloc_sem = 0; int can_do_direct, has_refcount = 0; ssize_t written = 0; - size_t count = iov_iter_count(from); - loff_t old_size, *ppos = &iocb->ki_pos; + ssize_t ret; + size_t count = iov_iter_count(from), orig_count; + loff_t old_size; u32 old_clusters; struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); - struct address_space *mapping = file->f_mapping; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); int full_coherency = !(osb->s_mount_opt & OCFS2_MOUNT_COHERENCY_BUFFERED); int unaligned_dio = 0; + int dropped_dio = 0; trace_ocfs2_file_aio_write(inode, file, file->f_path.dentry, (unsigned long long)OCFS2_I(inode)->ip_blkno, @@ -2280,11 +2271,11 @@ static ssize_t ocfs2_file_write_iter(struct kiocb *iocb, file->f_path.dentry->d_name.name, (unsigned int)from->nr_segs); /* GRRRRR */ - if (iocb->ki_nbytes == 0) + if (count == 0) return 0; - appending = file->f_flags & O_APPEND ? 1 : 0; - direct_io = file->f_flags & O_DIRECT ? 1 : 0; + appending = iocb->ki_flags & IOCB_APPEND ? 1 : 0; + direct_io = iocb->ki_flags & IOCB_DIRECT ? 1 : 0; mutex_lock(&inode->i_mutex); @@ -2329,9 +2320,17 @@ relock: ocfs2_inode_unlock(inode, 1); } + orig_count = iov_iter_count(from); + ret = generic_write_checks(iocb, from); + if (ret <= 0) { + if (ret) + mlog_errno(ret); + goto out; + } + count = ret; + can_do_direct = direct_io; - ret = ocfs2_prepare_inode_for_write(file, ppos, - iocb->ki_nbytes, appending, + ret = ocfs2_prepare_inode_for_write(file, iocb->ki_pos, count, appending, &can_do_direct, &has_refcount); if (ret < 0) { mlog_errno(ret); @@ -2339,8 +2338,7 @@ relock: } if (direct_io && !is_sync_kiocb(iocb)) - unaligned_dio = ocfs2_is_io_unaligned(inode, iocb->ki_nbytes, - *ppos); + unaligned_dio = ocfs2_is_io_unaligned(inode, count, iocb->ki_pos); /* * We can't complete the direct I/O as requested, fall back to @@ -2353,6 +2351,9 @@ relock: rw_level = -1; direct_io = 0; + iocb->ki_flags &= ~IOCB_DIRECT; + iov_iter_reexpand(from, orig_count); + dropped_dio = 1; goto relock; } @@ -2376,74 +2377,18 @@ relock: /* communicate with ocfs2_dio_end_io */ ocfs2_iocb_set_rw_locked(iocb, rw_level); - ret = generic_write_checks(file, ppos, &count, - S_ISBLK(inode->i_mode)); - if (ret) - goto out_dio; - - iov_iter_truncate(from, count); - if (direct_io) { - loff_t endbyte; - ssize_t written_buffered; - written = generic_file_direct_write(iocb, from, *ppos); - if (written < 0 || written == count) { - ret = written; - goto out_dio; - } - - /* - * for completing the rest of the request. - */ - *ppos += written; - count -= written; - written_buffered = generic_perform_write(file, from, *ppos); - /* - * If generic_file_buffered_write() returned a synchronous error - * then we want to return the number of bytes which were - * direct-written, or the error code if that was zero. Note - * that this differs from normal direct-io semantics, which - * will return -EFOO even if some bytes were written. - */ - if (written_buffered < 0) { - ret = written_buffered; - goto out_dio; - } - - iocb->ki_pos = *ppos + written_buffered; - /* We need to ensure that the page cache pages are written to - * disk and invalidated to preserve the expected O_DIRECT - * semantics. - */ - endbyte = *ppos + written_buffered - 1; - ret = filemap_write_and_wait_range(file->f_mapping, *ppos, - endbyte); - if (ret == 0) { - written += written_buffered; - invalidate_mapping_pages(mapping, - *ppos >> PAGE_CACHE_SHIFT, - endbyte >> PAGE_CACHE_SHIFT); - } else { - /* - * We don't know how much we wrote, so just return - * the number of bytes which were direct-written - */ - } - } else { - current->backing_dev_info = inode_to_bdi(inode); - written = generic_perform_write(file, from, *ppos); - if (likely(written >= 0)) - iocb->ki_pos = *ppos + written; - current->backing_dev_info = NULL; - } - -out_dio: + written = __generic_file_write_iter(iocb, from); /* buffered aio wouldn't have proper lock coverage today */ - BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT)); + BUG_ON(written == -EIOCBQUEUED && !(iocb->ki_flags & IOCB_DIRECT)); + + if (unlikely(written <= 0)) + goto no_sync; - if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) || - ((file->f_flags & O_DIRECT) && !direct_io)) { - ret = filemap_fdatawrite_range(file->f_mapping, *ppos, - *ppos + count - 1); + if (((file->f_flags & O_DSYNC) && !direct_io) || + IS_SYNC(inode) || dropped_dio) { + ret = filemap_fdatawrite_range(file->f_mapping, + iocb->ki_pos - written, + iocb->ki_pos - 1); if (ret < 0) written = ret; @@ -2454,10 +2399,12 @@ out_dio: } if (!ret) - ret = filemap_fdatawait_range(file->f_mapping, *ppos, - *ppos + count - 1); + ret = filemap_fdatawait_range(file->f_mapping, + iocb->ki_pos - written, + iocb->ki_pos - 1); } +no_sync: /* * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io * function pointer which is called when o_direct io completes so that @@ -2549,7 +2496,7 @@ static ssize_t ocfs2_file_read_iter(struct kiocb *iocb, * buffered reads protect themselves in ->readpage(). O_DIRECT reads * need locks to protect pending reads from racing with truncate. */ - if (filp->f_flags & O_DIRECT) { + if (iocb->ki_flags & IOCB_DIRECT) { have_alloc_sem = 1; ocfs2_iocb_set_sem_locked(iocb); @@ -2583,7 +2530,7 @@ static ssize_t ocfs2_file_read_iter(struct kiocb *iocb, trace_generic_file_aio_read_ret(ret); /* buffered aio wouldn't have proper lock coverage today */ - BUG_ON(ret == -EIOCBQUEUED && !(filp->f_flags & O_DIRECT)); + BUG_ON(ret == -EIOCBQUEUED && !(iocb->ki_flags & IOCB_DIRECT)); /* see ocfs2_file_write_iter */ if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) { @@ -2678,8 +2625,6 @@ const struct inode_operations ocfs2_special_file_iops = { */ const struct file_operations ocfs2_fops = { .llseek = ocfs2_file_llseek, - .read = new_sync_read, - .write = new_sync_write, .mmap = ocfs2_mmap, .fsync = ocfs2_sync_file, .release = ocfs2_file_release, @@ -2726,8 +2671,6 @@ const struct file_operations ocfs2_dops = { */ const struct file_operations ocfs2_fops_no_plocks = { .llseek = ocfs2_file_llseek, - .read = new_sync_read, - .write = new_sync_write, .mmap = ocfs2_mmap, .fsync = ocfs2_sync_file, .release = ocfs2_file_release, diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c index 3025c0d..b254416 100644 --- a/fs/ocfs2/inode.c +++ b/fs/ocfs2/inode.c @@ -624,7 +624,7 @@ static int ocfs2_remove_inode(struct inode *inode, ocfs2_get_system_file_inode(osb, INODE_ALLOC_SYSTEM_INODE, le16_to_cpu(di->i_suballoc_slot)); if (!inode_alloc_inode) { - status = -EEXIST; + status = -ENOENT; mlog_errno(status); goto bail; } @@ -742,7 +742,7 @@ static int ocfs2_wipe_inode(struct inode *inode, ORPHAN_DIR_SYSTEM_INODE, orphaned_slot); if (!orphan_dir_inode) { - status = -EEXIST; + status = -ENOENT; mlog_errno(status); goto bail; } @@ -1209,7 +1209,7 @@ int ocfs2_drop_inode(struct inode *inode) */ int ocfs2_inode_revalidate(struct dentry *dentry) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); int status = 0; trace_ocfs2_inode_revalidate(inode, diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c index 0440134..857bbbc 100644 --- a/fs/ocfs2/localalloc.c +++ b/fs/ocfs2/localalloc.c @@ -666,7 +666,7 @@ int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb, if (le32_to_cpu(alloc->id1.bitmap1.i_used) != ocfs2_local_alloc_count_bits(alloc)) { ocfs2_error(osb->sb, "local alloc inode %llu says it has " - "%u free bits, but a count shows %u", + "%u used bits, but a count shows %u", (unsigned long long)le64_to_cpu(alloc->i_blkno), le32_to_cpu(alloc->id1.bitmap1.i_used), ocfs2_local_alloc_count_bits(alloc)); @@ -839,7 +839,7 @@ static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb, u32 *numbits, struct ocfs2_alloc_reservation *resv) { - int numfound, bitoff, left, startoff, lastzero; + int numfound = 0, bitoff, left, startoff, lastzero; int local_resv = 0; struct ocfs2_alloc_reservation r; void *bitmap = NULL; diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index b5c3a5e..176fe6a 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -689,8 +689,8 @@ static int ocfs2_link(struct dentry *old_dentry, struct dentry *dentry) { handle_t *handle; - struct inode *inode = old_dentry->d_inode; - struct inode *old_dir = old_dentry->d_parent->d_inode; + struct inode *inode = d_inode(old_dentry); + struct inode *old_dir = d_inode(old_dentry->d_parent); int err; struct buffer_head *fe_bh = NULL; struct buffer_head *old_dir_bh = NULL; @@ -879,7 +879,7 @@ static int ocfs2_unlink(struct inode *dir, int status; int child_locked = 0; bool is_unlinkable = false; - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct inode *orphan_dir = NULL; struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); u64 blkno; @@ -898,7 +898,7 @@ static int ocfs2_unlink(struct inode *dir, dquot_initialize(dir); - BUG_ON(dentry->d_parent->d_inode != dir); + BUG_ON(d_inode(dentry->d_parent) != dir); if (inode == osb->root_inode) return -EPERM; @@ -1209,8 +1209,8 @@ static int ocfs2_rename(struct inode *old_dir, { int status = 0, rename_lock = 0, parents_locked = 0, target_exists = 0; int old_child_locked = 0, new_child_locked = 0, update_dot_dot = 0; - struct inode *old_inode = old_dentry->d_inode; - struct inode *new_inode = new_dentry->d_inode; + struct inode *old_inode = d_inode(old_dentry); + struct inode *new_inode = d_inode(new_dentry); struct inode *orphan_dir = NULL; struct ocfs2_dinode *newfe = NULL; char orphan_name[OCFS2_ORPHAN_NAMELEN + 1]; @@ -1454,7 +1454,7 @@ static int ocfs2_rename(struct inode *old_dir, should_add_orphan = true; } } else { - BUG_ON(new_dentry->d_parent->d_inode != new_dir); + BUG_ON(d_inode(new_dentry->d_parent) != new_dir); status = ocfs2_check_dir_for_entry(new_dir, new_dentry->d_name.name, @@ -2322,10 +2322,10 @@ int ocfs2_orphan_del(struct ocfs2_super *osb, trace_ocfs2_orphan_del( (unsigned long long)OCFS2_I(orphan_dir_inode)->ip_blkno, - name, namelen); + name, strlen(name)); /* find it's spot in the orphan directory */ - status = ocfs2_find_entry(name, namelen, orphan_dir_inode, + status = ocfs2_find_entry(name, strlen(name), orphan_dir_inode, &lookup); if (status) { mlog_errno(status); @@ -2808,7 +2808,7 @@ int ocfs2_mv_orphaned_inode_to_new(struct inode *dir, ORPHAN_DIR_SYSTEM_INODE, osb->slot_num); if (!orphan_dir_inode) { - status = -EEXIST; + status = -ENOENT; mlog_errno(status); goto leave; } diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index ee541f9..d8c6af1 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c @@ -4194,7 +4194,7 @@ static int __ocfs2_reflink(struct dentry *old_dentry, bool preserve) { int ret; - struct inode *inode = old_dentry->d_inode; + struct inode *inode = d_inode(old_dentry); struct buffer_head *new_bh = NULL; if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE) { @@ -4263,7 +4263,7 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry, bool preserve) { int error; - struct inode *inode = old_dentry->d_inode; + struct inode *inode = d_inode(old_dentry); struct buffer_head *old_bh = NULL; struct inode *new_orphan_inode = NULL; struct posix_acl *default_acl, *acl; @@ -4276,7 +4276,7 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir, error = posix_acl_create(dir, &mode, &default_acl, &acl); if (error) { mlog_errno(error); - goto out; + return error; } error = ocfs2_create_inode_in_orphan(dir, mode, @@ -4357,7 +4357,7 @@ out: /* copied from may_create in VFS. */ static inline int ocfs2_may_create(struct inode *dir, struct dentry *child) { - if (child->d_inode) + if (d_really_is_positive(child)) return -EEXIST; if (IS_DEADDIR(dir)) return -ENOENT; @@ -4375,7 +4375,7 @@ static inline int ocfs2_may_create(struct inode *dir, struct dentry *child) static int ocfs2_vfs_reflink(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry, bool preserve) { - struct inode *inode = old_dentry->d_inode; + struct inode *inode = d_inode(old_dentry); int error; if (!inode) @@ -4463,7 +4463,7 @@ int ocfs2_reflink_ioctl(struct inode *inode, } error = ocfs2_vfs_reflink(old_path.dentry, - new_path.dentry->d_inode, + d_inode(new_path.dentry), new_dentry, preserve); out_dput: done_path_create(&new_path, new_dentry); diff --git a/fs/ocfs2/slot_map.c b/fs/ocfs2/slot_map.c index d5493e3..e78a203 100644 --- a/fs/ocfs2/slot_map.c +++ b/fs/ocfs2/slot_map.c @@ -427,7 +427,7 @@ int ocfs2_init_slot_info(struct ocfs2_super *osb) if (!si) { status = -ENOMEM; mlog_errno(status); - goto bail; + return status; } si->si_extended = ocfs2_uses_extended_slot_map(osb); @@ -452,7 +452,7 @@ int ocfs2_init_slot_info(struct ocfs2_super *osb) osb->slot_info = (struct ocfs2_slot_info *)si; bail: - if (status < 0 && si) + if (status < 0) __ocfs2_free_slot_info(si); return status; diff --git a/fs/ocfs2/stack_o2cb.c b/fs/ocfs2/stack_o2cb.c index 1724d43..220cae7 100644 --- a/fs/ocfs2/stack_o2cb.c +++ b/fs/ocfs2/stack_o2cb.c @@ -295,7 +295,7 @@ static int o2cb_cluster_check(void) set_bit(node_num, netmap); if (!memcmp(hbmap, netmap, sizeof(hbmap))) return 0; - if (i < O2CB_MAP_STABILIZE_COUNT) + if (i < O2CB_MAP_STABILIZE_COUNT - 1) msleep(1000); } diff --git a/fs/ocfs2/stack_user.c b/fs/ocfs2/stack_user.c index 720aa38..2768eb1 100644 --- a/fs/ocfs2/stack_user.c +++ b/fs/ocfs2/stack_user.c @@ -1004,10 +1004,8 @@ static int user_cluster_connect(struct ocfs2_cluster_connection *conn) BUG_ON(conn == NULL); lc = kzalloc(sizeof(struct ocfs2_live_connection), GFP_KERNEL); - if (!lc) { - rc = -ENOMEM; - goto out; - } + if (!lc) + return -ENOMEM; init_waitqueue_head(&lc->oc_wait); init_completion(&lc->oc_sync_wait); @@ -1063,7 +1061,7 @@ static int user_cluster_connect(struct ocfs2_cluster_connection *conn) } out: - if (rc && lc) + if (rc) kfree(lc); return rc; } diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index 0cb889a..4479029 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c @@ -2499,6 +2499,8 @@ static int _ocfs2_free_suballoc_bits(handle_t *handle, alloc_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); + ocfs2_block_group_set_bits(handle, alloc_inode, group, group_bh, + start_bit, count); goto bail; } diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 2667518..403c566 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -2069,6 +2069,8 @@ static int ocfs2_initialize_super(struct super_block *sb, cbits = le32_to_cpu(di->id2.i_super.s_clustersize_bits); bbits = le32_to_cpu(di->id2.i_super.s_blocksize_bits); sb->s_maxbytes = ocfs2_max_file_offset(bbits, cbits); + memcpy(sb->s_uuid, di->id2.i_super.s_uuid, + sizeof(di->id2.i_super.s_uuid)); osb->osb_dx_mask = (1 << (cbits - bbits)) - 1; @@ -2333,7 +2335,7 @@ static int ocfs2_initialize_super(struct super_block *sb, mlog_errno(status); goto bail; } - cleancache_init_shared_fs((char *)&di->id2.i_super.s_uuid, sb); + cleancache_init_shared_fs(sb); bail: return status; @@ -2563,22 +2565,22 @@ static void ocfs2_handle_error(struct super_block *sb) ocfs2_set_ro_flag(osb, 0); } -static char error_buf[1024]; - -void __ocfs2_error(struct super_block *sb, - const char *function, - const char *fmt, ...) +void __ocfs2_error(struct super_block *sb, const char *function, + const char *fmt, ...) { + struct va_format vaf; va_list args; va_start(args, fmt); - vsnprintf(error_buf, sizeof(error_buf), fmt, args); - va_end(args); + vaf.fmt = fmt; + vaf.va = &args; /* Not using mlog here because we want to show the actual * function the error came from. */ - printk(KERN_CRIT "OCFS2: ERROR (device %s): %s: %s\n", - sb->s_id, function, error_buf); + printk(KERN_CRIT "OCFS2: ERROR (device %s): %s: %pV\n", + sb->s_id, function, &vaf); + + va_end(args); ocfs2_handle_error(sb); } @@ -2586,18 +2588,21 @@ void __ocfs2_error(struct super_block *sb, /* Handle critical errors. This is intentionally more drastic than * ocfs2_handle_error, so we only use for things like journal errors, * etc. */ -void __ocfs2_abort(struct super_block* sb, - const char *function, +void __ocfs2_abort(struct super_block *sb, const char *function, const char *fmt, ...) { + struct va_format vaf; va_list args; va_start(args, fmt); - vsnprintf(error_buf, sizeof(error_buf), fmt, args); - va_end(args); - printk(KERN_CRIT "OCFS2: abort (device %s): %s: %s\n", - sb->s_id, function, error_buf); + vaf.fmt = fmt; + vaf.va = &args; + + printk(KERN_CRIT "OCFS2: abort (device %s): %s: %pV\n", + sb->s_id, function, &vaf); + + va_end(args); /* We don't have the cluster support yet to go straight to * hard readonly in here. Until then, we want to keep diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 85b190d..d03bfbf 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -1020,7 +1020,7 @@ ssize_t ocfs2_listxattr(struct dentry *dentry, int ret = 0, i_ret = 0, b_ret = 0; struct buffer_head *di_bh = NULL; struct ocfs2_dinode *di = NULL; - struct ocfs2_inode_info *oi = OCFS2_I(dentry->d_inode); + struct ocfs2_inode_info *oi = OCFS2_I(d_inode(dentry)); if (!ocfs2_supports_xattr(OCFS2_SB(dentry->d_sb))) return -EOPNOTSUPP; @@ -1028,7 +1028,7 @@ ssize_t ocfs2_listxattr(struct dentry *dentry, if (!(oi->ip_dyn_features & OCFS2_HAS_XATTR_FL)) return ret; - ret = ocfs2_inode_lock(dentry->d_inode, &di_bh, 0); + ret = ocfs2_inode_lock(d_inode(dentry), &di_bh, 0); if (ret < 0) { mlog_errno(ret); return ret; @@ -1037,7 +1037,7 @@ ssize_t ocfs2_listxattr(struct dentry *dentry, di = (struct ocfs2_dinode *)di_bh->b_data; down_read(&oi->ip_xattr_sem); - i_ret = ocfs2_xattr_ibody_list(dentry->d_inode, di, buffer, size); + i_ret = ocfs2_xattr_ibody_list(d_inode(dentry), di, buffer, size); if (i_ret < 0) b_ret = 0; else { @@ -1045,13 +1045,13 @@ ssize_t ocfs2_listxattr(struct dentry *dentry, buffer += i_ret; size -= i_ret; } - b_ret = ocfs2_xattr_block_list(dentry->d_inode, di, + b_ret = ocfs2_xattr_block_list(d_inode(dentry), di, buffer, size); if (b_ret < 0) i_ret = 0; } up_read(&oi->ip_xattr_sem); - ocfs2_inode_unlock(dentry->d_inode, 0); + ocfs2_inode_unlock(d_inode(dentry), 0); brelse(di_bh); @@ -1238,6 +1238,10 @@ static int ocfs2_xattr_block_get(struct inode *inode, i, &block_off, &name_offset); + if (ret) { + mlog_errno(ret); + goto cleanup; + } xs->base = bucket_block(xs->bucket, block_off); } if (ocfs2_xattr_is_local(xs->here)) { @@ -5665,6 +5669,10 @@ static int ocfs2_delete_xattr_in_bucket(struct inode *inode, ret = ocfs2_get_xattr_tree_value_root(inode->i_sb, bucket, i, &xv, NULL); + if (ret) { + mlog_errno(ret); + break; + } ret = ocfs2_lock_xattr_remove_allocators(inode, xv, args->ref_ci, @@ -7249,7 +7257,7 @@ static int ocfs2_xattr_security_get(struct dentry *dentry, const char *name, { if (strcmp(name, "") == 0) return -EINVAL; - return ocfs2_xattr_get(dentry->d_inode, OCFS2_XATTR_INDEX_SECURITY, + return ocfs2_xattr_get(d_inode(dentry), OCFS2_XATTR_INDEX_SECURITY, name, buffer, size); } @@ -7259,7 +7267,7 @@ static int ocfs2_xattr_security_set(struct dentry *dentry, const char *name, if (strcmp(name, "") == 0) return -EINVAL; - return ocfs2_xattr_set(dentry->d_inode, OCFS2_XATTR_INDEX_SECURITY, + return ocfs2_xattr_set(d_inode(dentry), OCFS2_XATTR_INDEX_SECURITY, name, value, size, flags); } @@ -7339,7 +7347,7 @@ static int ocfs2_xattr_trusted_get(struct dentry *dentry, const char *name, { if (strcmp(name, "") == 0) return -EINVAL; - return ocfs2_xattr_get(dentry->d_inode, OCFS2_XATTR_INDEX_TRUSTED, + return ocfs2_xattr_get(d_inode(dentry), OCFS2_XATTR_INDEX_TRUSTED, name, buffer, size); } @@ -7349,7 +7357,7 @@ static int ocfs2_xattr_trusted_set(struct dentry *dentry, const char *name, if (strcmp(name, "") == 0) return -EINVAL; - return ocfs2_xattr_set(dentry->d_inode, OCFS2_XATTR_INDEX_TRUSTED, + return ocfs2_xattr_set(d_inode(dentry), OCFS2_XATTR_INDEX_TRUSTED, name, value, size, flags); } @@ -7391,7 +7399,7 @@ static int ocfs2_xattr_user_get(struct dentry *dentry, const char *name, return -EINVAL; if (osb->s_mount_opt & OCFS2_MOUNT_NOUSERXATTR) return -EOPNOTSUPP; - return ocfs2_xattr_get(dentry->d_inode, OCFS2_XATTR_INDEX_USER, name, + return ocfs2_xattr_get(d_inode(dentry), OCFS2_XATTR_INDEX_USER, name, buffer, size); } @@ -7405,7 +7413,7 @@ static int ocfs2_xattr_user_set(struct dentry *dentry, const char *name, if (osb->s_mount_opt & OCFS2_MOUNT_NOUSERXATTR) return -EOPNOTSUPP; - return ocfs2_xattr_set(dentry->d_inode, OCFS2_XATTR_INDEX_USER, + return ocfs2_xattr_set(d_inode(dentry), OCFS2_XATTR_INDEX_USER, name, value, size, flags); } diff --git a/fs/omfs/dir.c b/fs/omfs/dir.c index 1b8e9e8..f833bf8 100644 --- a/fs/omfs/dir.c +++ b/fs/omfs/dir.c @@ -110,7 +110,7 @@ int omfs_make_empty(struct inode *inode, struct super_block *sb) static int omfs_add_link(struct dentry *dentry, struct inode *inode) { - struct inode *dir = dentry->d_parent->d_inode; + struct inode *dir = d_inode(dentry->d_parent); const char *name = dentry->d_name.name; int namelen = dentry->d_name.len; struct omfs_inode *oi; @@ -155,7 +155,7 @@ out: static int omfs_delete_entry(struct dentry *dentry) { - struct inode *dir = dentry->d_parent->d_inode; + struct inode *dir = d_inode(dentry->d_parent); struct inode *dirty; const char *name = dentry->d_name.name; int namelen = dentry->d_name.len; @@ -237,7 +237,7 @@ static int omfs_dir_is_empty(struct inode *inode) static int omfs_remove(struct inode *dir, struct dentry *dentry) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); int ret; @@ -373,8 +373,8 @@ static bool omfs_fill_chain(struct inode *dir, struct dir_context *ctx, static int omfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { - struct inode *new_inode = new_dentry->d_inode; - struct inode *old_inode = old_dentry->d_inode; + struct inode *new_inode = d_inode(new_dentry); + struct inode *old_inode = d_inode(old_dentry); int err; if (new_inode) { diff --git a/fs/omfs/file.c b/fs/omfs/file.c index 902e885..d9e26cf 100644 --- a/fs/omfs/file.c +++ b/fs/omfs/file.c @@ -337,8 +337,6 @@ static sector_t omfs_bmap(struct address_space *mapping, sector_t block) const struct file_operations omfs_file_operations = { .llseek = generic_file_llseek, - .read = new_sync_read, - .write = new_sync_write, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, .mmap = generic_file_mmap, @@ -348,7 +346,7 @@ const struct file_operations omfs_file_operations = { static int omfs_setattr(struct dentry *dentry, struct iattr *attr) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); int error; error = inode_change_ok(inode, attr); @@ -231,8 +231,7 @@ int vfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len) return -EINVAL; /* Return error if mode is not supported */ - if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | - FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE)) + if (mode & ~FALLOC_FL_SUPPORTED_MASK) return -EOPNOTSUPP; /* Punch hole and zero range are mutually exclusive */ @@ -250,6 +249,11 @@ int vfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len) (mode & ~FALLOC_FL_COLLAPSE_RANGE)) return -EINVAL; + /* Insert range should only be used exclusively. */ + if ((mode & FALLOC_FL_INSERT_RANGE) && + (mode & ~FALLOC_FL_INSERT_RANGE)) + return -EINVAL; + if (!(file->f_mode & FMODE_WRITE)) return -EBADF; @@ -570,6 +574,7 @@ static int chown_common(struct path *path, uid_t user, gid_t group) uid = make_kuid(current_user_ns(), user); gid = make_kgid(current_user_ns(), group); +retry_deleg: newattrs.ia_valid = ATTR_CTIME; if (user != (uid_t) -1) { if (!uid_valid(uid)) @@ -586,7 +591,6 @@ static int chown_common(struct path *path, uid_t user, gid_t group) if (!S_ISDIR(inode->i_mode)) newattrs.ia_valid |= ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV; -retry_deleg: mutex_lock(&inode->i_mutex); error = security_path_chown(path, uid, gid); if (!error) @@ -734,10 +738,10 @@ static int do_dentry_open(struct file *f, if ((f->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) i_readcount_inc(inode); if ((f->f_mode & FMODE_READ) && - likely(f->f_op->read || f->f_op->aio_read || f->f_op->read_iter)) + likely(f->f_op->read || f->f_op->read_iter)) f->f_mode |= FMODE_CAN_READ; if ((f->f_mode & FMODE_WRITE) && - likely(f->f_op->write || f->f_op->aio_write || f->f_op->write_iter)) + likely(f->f_op->write || f->f_op->write_iter)) f->f_mode |= FMODE_CAN_WRITE; f->f_flags &= ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC); @@ -988,9 +992,6 @@ struct file *file_open_root(struct dentry *dentry, struct vfsmount *mnt, return ERR_PTR(err); if (flags & O_CREAT) return ERR_PTR(-EINVAL); - if (!filename && (flags & O_DIRECTORY)) - if (!dentry->d_inode->i_op->lookup) - return ERR_PTR(-ENOTDIR); return do_file_open_root(dentry, mnt, filename, &op); } EXPORT_SYMBOL(file_open_root); @@ -21,7 +21,6 @@ #include <linux/audit.h> #include <linux/syscalls.h> #include <linux/fcntl.h> -#include <linux/aio.h> #include <asm/uaccess.h> #include <asm/ioctls.h> @@ -628,7 +627,7 @@ static struct vfsmount *pipe_mnt __read_mostly; static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen) { return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]", - dentry->d_inode->i_ino); + d_inode(dentry)->i_ino); } static const struct dentry_operations pipefs_dentry_operations = { @@ -947,9 +946,7 @@ err: const struct file_operations pipefifo_fops = { .open = fifo_open, .llseek = no_llseek, - .read = new_sync_read, .read_iter = pipe_read, - .write = new_sync_write, .write_iter = pipe_write, .poll = pipe_poll, .unlocked_ioctl = pipe_ioctl, @@ -362,6 +362,46 @@ int propagate_mount_busy(struct mount *mnt, int refcnt) } /* + * Clear MNT_LOCKED when it can be shown to be safe. + * + * mount_lock lock must be held for write + */ +void propagate_mount_unlock(struct mount *mnt) +{ + struct mount *parent = mnt->mnt_parent; + struct mount *m, *child; + + BUG_ON(parent == mnt); + + for (m = propagation_next(parent, parent); m; + m = propagation_next(m, parent)) { + child = __lookup_mnt_last(&m->mnt, mnt->mnt_mountpoint); + if (child) + child->mnt.mnt_flags &= ~MNT_LOCKED; + } +} + +/* + * Mark all mounts that the MNT_LOCKED logic will allow to be unmounted. + */ +static void mark_umount_candidates(struct mount *mnt) +{ + struct mount *parent = mnt->mnt_parent; + struct mount *m; + + BUG_ON(parent == mnt); + + for (m = propagation_next(parent, parent); m; + m = propagation_next(m, parent)) { + struct mount *child = __lookup_mnt_last(&m->mnt, + mnt->mnt_mountpoint); + if (child && (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m))) { + SET_MNT_MARK(child); + } + } +} + +/* * NOTE: unmounting 'mnt' naturally propagates to all other mounts its * parent propagates to. */ @@ -378,13 +418,16 @@ static void __propagate_umount(struct mount *mnt) struct mount *child = __lookup_mnt_last(&m->mnt, mnt->mnt_mountpoint); /* - * umount the child only if the child has no - * other children + * umount the child only if the child has no children + * and the child is marked safe to unmount. */ - if (child && list_empty(&child->mnt_mounts)) { + if (!child || !IS_MNT_MARKED(child)) + continue; + CLEAR_MNT_MARK(child); + if (list_empty(&child->mnt_mounts)) { list_del_init(&child->mnt_child); - hlist_del_init_rcu(&child->mnt_hash); - hlist_add_before_rcu(&child->mnt_hash, &mnt->mnt_hash); + child->mnt.mnt_flags |= MNT_UMOUNT; + list_move_tail(&child->mnt_list, &mnt->mnt_list); } } } @@ -396,11 +439,14 @@ static void __propagate_umount(struct mount *mnt) * * vfsmount lock must be held for write */ -int propagate_umount(struct hlist_head *list) +int propagate_umount(struct list_head *list) { struct mount *mnt; - hlist_for_each_entry(mnt, list, mnt_hash) + list_for_each_entry_reverse(mnt, list, mnt_list) + mark_umount_candidates(mnt); + + list_for_each_entry(mnt, list, mnt_list) __propagate_umount(mnt); return 0; } @@ -19,6 +19,9 @@ #define IS_MNT_MARKED(m) ((m)->mnt.mnt_flags & MNT_MARKED) #define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED) #define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED) +#define IS_MNT_LOCKED(m) ((m)->mnt.mnt_flags & MNT_LOCKED) +#define IS_MNT_LOCKED_AND_LAZY(m) \ + (((m)->mnt.mnt_flags & (MNT_LOCKED|MNT_SYNC_UMOUNT)) == MNT_LOCKED) #define CL_EXPIRE 0x01 #define CL_SLAVE 0x02 @@ -40,14 +43,14 @@ static inline void set_mnt_shared(struct mount *mnt) void change_mnt_propagation(struct mount *, int); int propagate_mnt(struct mount *, struct mountpoint *, struct mount *, struct hlist_head *); -int propagate_umount(struct hlist_head *); +int propagate_umount(struct list_head *); int propagate_mount_busy(struct mount *, int); +void propagate_mount_unlock(struct mount *); void mnt_release_group_id(struct mount *); int get_dominating_id(struct mount *mnt, const struct path *root); unsigned int mnt_get_count(struct mount *mnt); void mnt_set_mountpoint(struct mount *, struct mountpoint *, struct mount *); -void umount_tree(struct mount *, int); struct mount *copy_tree(struct mount *, struct dentry *, int); bool is_path_reachable(struct mount *, struct dentry *, const struct path *root); diff --git a/fs/posix_acl.c b/fs/posix_acl.c index 3a48bb7..84bb65b8 100644 --- a/fs/posix_acl.c +++ b/fs/posix_acl.c @@ -774,12 +774,12 @@ posix_acl_xattr_get(struct dentry *dentry, const char *name, struct posix_acl *acl; int error; - if (!IS_POSIXACL(dentry->d_inode)) + if (!IS_POSIXACL(d_backing_inode(dentry))) return -EOPNOTSUPP; if (d_is_symlink(dentry)) return -EOPNOTSUPP; - acl = get_acl(dentry->d_inode, type); + acl = get_acl(d_backing_inode(dentry), type); if (IS_ERR(acl)) return PTR_ERR(acl); if (acl == NULL) @@ -795,7 +795,7 @@ static int posix_acl_xattr_set(struct dentry *dentry, const char *name, const void *value, size_t size, int flags, int type) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_backing_inode(dentry); struct posix_acl *acl = NULL; int ret; @@ -834,7 +834,7 @@ posix_acl_xattr_list(struct dentry *dentry, char *list, size_t list_size, const char *xname; size_t size; - if (!IS_POSIXACL(dentry->d_inode)) + if (!IS_POSIXACL(d_backing_inode(dentry))) return -EOPNOTSUPP; if (d_is_symlink(dentry)) return -EOPNOTSUPP; diff --git a/fs/proc/array.c b/fs/proc/array.c index 1295a00..fd02a9e 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -99,8 +99,8 @@ static inline void task_name(struct seq_file *m, struct task_struct *p) buf = m->buf + m->count; /* Ignore error for now */ - string_escape_str(tcomm, &buf, m->size - m->count, - ESCAPE_SPACE | ESCAPE_SPECIAL, "\n\\"); + buf += string_escape_str(tcomm, buf, m->size - m->count, + ESCAPE_SPACE | ESCAPE_SPECIAL, "\n\\"); m->count = buf - m->buf; seq_putc(m, '\n'); @@ -188,6 +188,24 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns, from_kgid_munged(user_ns, GROUP_AT(group_info, g))); put_cred(cred); +#ifdef CONFIG_PID_NS + seq_puts(m, "\nNStgid:"); + for (g = ns->level; g <= pid->level; g++) + seq_printf(m, "\t%d", + task_tgid_nr_ns(p, pid->numbers[g].ns)); + seq_puts(m, "\nNSpid:"); + for (g = ns->level; g <= pid->level; g++) + seq_printf(m, "\t%d", + task_pid_nr_ns(p, pid->numbers[g].ns)); + seq_puts(m, "\nNSpgid:"); + for (g = ns->level; g <= pid->level; g++) + seq_printf(m, "\t%d", + task_pgrp_nr_ns(p, pid->numbers[g].ns)); + seq_puts(m, "\nNSsid:"); + for (g = ns->level; g <= pid->level; g++) + seq_printf(m, "\t%d", + task_session_nr_ns(p, pid->numbers[g].ns)); +#endif seq_putc(m, '\n'); } @@ -614,7 +632,9 @@ static int children_seq_show(struct seq_file *seq, void *v) pid_t pid; pid = pid_nr_ns(v, inode->i_sb->s_fs_info); - return seq_printf(seq, "%d ", pid); + seq_printf(seq, "%d ", pid); + + return 0; } static void *children_seq_start(struct seq_file *seq, loff_t *pos) diff --git a/fs/proc/base.c b/fs/proc/base.c index 3f3d7ae..093ca14 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -169,7 +169,7 @@ static int get_task_root(struct task_struct *task, struct path *root) static int proc_cwd_link(struct dentry *dentry, struct path *path) { - struct task_struct *task = get_proc_task(dentry->d_inode); + struct task_struct *task = get_proc_task(d_inode(dentry)); int result = -ENOENT; if (task) { @@ -186,7 +186,7 @@ static int proc_cwd_link(struct dentry *dentry, struct path *path) static int proc_root_link(struct dentry *dentry, struct path *path) { - struct task_struct *task = get_proc_task(dentry->d_inode); + struct task_struct *task = get_proc_task(d_inode(dentry)); int result = -ENOENT; if (task) { @@ -238,13 +238,15 @@ static int proc_pid_wchan(struct seq_file *m, struct pid_namespace *ns, wchan = get_wchan(task); - if (lookup_symbol_name(wchan, symname) < 0) + if (lookup_symbol_name(wchan, symname) < 0) { if (!ptrace_may_access(task, PTRACE_MODE_READ)) return 0; - else - return seq_printf(m, "%lu", wchan); - else - return seq_printf(m, "%s", symname); + seq_printf(m, "%lu", wchan); + } else { + seq_printf(m, "%s", symname); + } + + return 0; } #endif /* CONFIG_KALLSYMS */ @@ -309,10 +311,12 @@ static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns, static int proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) { - return seq_printf(m, "%llu %llu %lu\n", - (unsigned long long)task->se.sum_exec_runtime, - (unsigned long long)task->sched_info.run_delay, - task->sched_info.pcount); + seq_printf(m, "%llu %llu %lu\n", + (unsigned long long)task->se.sum_exec_runtime, + (unsigned long long)task->sched_info.run_delay, + task->sched_info.pcount); + + return 0; } #endif @@ -387,7 +391,9 @@ static int proc_oom_score(struct seq_file *m, struct pid_namespace *ns, points = oom_badness(task, NULL, NULL, totalpages) * 1000 / totalpages; read_unlock(&tasklist_lock); - return seq_printf(m, "%lu\n", points); + seq_printf(m, "%lu\n", points); + + return 0; } struct limit_names { @@ -432,15 +438,15 @@ static int proc_pid_limits(struct seq_file *m, struct pid_namespace *ns, * print the file header */ seq_printf(m, "%-25s %-20s %-20s %-10s\n", - "Limit", "Soft Limit", "Hard Limit", "Units"); + "Limit", "Soft Limit", "Hard Limit", "Units"); for (i = 0; i < RLIM_NLIMITS; i++) { if (rlim[i].rlim_cur == RLIM_INFINITY) seq_printf(m, "%-25s %-20s ", - lnames[i].name, "unlimited"); + lnames[i].name, "unlimited"); else seq_printf(m, "%-25s %-20lu ", - lnames[i].name, rlim[i].rlim_cur); + lnames[i].name, rlim[i].rlim_cur); if (rlim[i].rlim_max == RLIM_INFINITY) seq_printf(m, "%-20s ", "unlimited"); @@ -462,7 +468,9 @@ static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns, { long nr; unsigned long args[6], sp, pc; - int res = lock_trace(task); + int res; + + res = lock_trace(task); if (res) return res; @@ -477,7 +485,8 @@ static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns, args[0], args[1], args[2], args[3], args[4], args[5], sp, pc); unlock_trace(task); - return res; + + return 0; } #endif /* CONFIG_HAVE_ARCH_TRACEHOOK */ @@ -505,7 +514,7 @@ static int proc_fd_access_allowed(struct inode *inode) int proc_setattr(struct dentry *dentry, struct iattr *attr) { int error; - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); if (attr->ia_valid & ATTR_MODE) return -EPERM; @@ -1353,7 +1362,7 @@ static int proc_exe_link(struct dentry *dentry, struct path *exe_path) struct mm_struct *mm; struct file *exe_file; - task = get_proc_task(dentry->d_inode); + task = get_proc_task(d_inode(dentry)); if (!task) return -ENOENT; mm = get_task_mm(task); @@ -1373,7 +1382,7 @@ static int proc_exe_link(struct dentry *dentry, struct path *exe_path) static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct path path; int error = -EACCES; @@ -1418,7 +1427,7 @@ static int do_proc_readlink(struct path *path, char __user *buffer, int buflen) static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int buflen) { int error = -EACCES; - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct path path; /* Are we allowed to snoop on the tasks file descriptors? */ @@ -1488,7 +1497,7 @@ out_unlock: int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct task_struct *task; const struct cred *cred; struct pid_namespace *pid = dentry->d_sb->s_fs_info; @@ -1545,7 +1554,7 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags) if (flags & LOOKUP_RCU) return -ECHILD; - inode = dentry->d_inode; + inode = d_inode(dentry); task = get_proc_task(inode); if (task) { @@ -1579,7 +1588,7 @@ int pid_delete_dentry(const struct dentry *dentry) * If so, then don't put the dentry on the lru list, * kill it immediately. */ - return proc_inode_is_dead(dentry->d_inode); + return proc_inode_is_dead(d_inode(dentry)); } const struct dentry_operations pid_dentry_operations = @@ -1617,12 +1626,12 @@ bool proc_fill_cache(struct file *file, struct dir_context *ctx, child = d_alloc(dir, &qname); if (!child) goto end_instantiate; - if (instantiate(dir->d_inode, child, task, ptr) < 0) { + if (instantiate(d_inode(dir), child, task, ptr) < 0) { dput(child); goto end_instantiate; } } - inode = child->d_inode; + inode = d_inode(child); ino = inode->i_ino; type = inode->i_mode >> 12; dput(child); @@ -1665,7 +1674,7 @@ static int map_files_d_revalidate(struct dentry *dentry, unsigned int flags) goto out_notask; } - inode = dentry->d_inode; + inode = d_inode(dentry); task = get_proc_task(inode); if (!task) goto out_notask; @@ -1718,7 +1727,7 @@ static int proc_map_files_get_link(struct dentry *dentry, struct path *path) int rc; rc = -ENOENT; - task = get_proc_task(dentry->d_inode); + task = get_proc_task(d_inode(dentry)); if (!task) goto out; @@ -2002,12 +2011,13 @@ static int show_timer(struct seq_file *m, void *v) notify = timer->it_sigev_notify; seq_printf(m, "ID: %d\n", timer->it_id); - seq_printf(m, "signal: %d/%p\n", timer->sigq->info.si_signo, - timer->sigq->info.si_value.sival_ptr); + seq_printf(m, "signal: %d/%p\n", + timer->sigq->info.si_signo, + timer->sigq->info.si_value.sival_ptr); seq_printf(m, "notify: %s/%s.%d\n", - nstr[notify & ~SIGEV_THREAD_ID], - (notify & SIGEV_THREAD_ID) ? "tid" : "pid", - pid_nr_ns(timer->it_pid, tp->ns)); + nstr[notify & ~SIGEV_THREAD_ID], + (notify & SIGEV_THREAD_ID) ? "tid" : "pid", + pid_nr_ns(timer->it_pid, tp->ns)); seq_printf(m, "ClockID: %d\n", timer->it_clock); return 0; @@ -2352,21 +2362,23 @@ static int do_io_accounting(struct task_struct *task, struct seq_file *m, int wh unlock_task_sighand(task, &flags); } - result = seq_printf(m, - "rchar: %llu\n" - "wchar: %llu\n" - "syscr: %llu\n" - "syscw: %llu\n" - "read_bytes: %llu\n" - "write_bytes: %llu\n" - "cancelled_write_bytes: %llu\n", - (unsigned long long)acct.rchar, - (unsigned long long)acct.wchar, - (unsigned long long)acct.syscr, - (unsigned long long)acct.syscw, - (unsigned long long)acct.read_bytes, - (unsigned long long)acct.write_bytes, - (unsigned long long)acct.cancelled_write_bytes); + seq_printf(m, + "rchar: %llu\n" + "wchar: %llu\n" + "syscr: %llu\n" + "syscw: %llu\n" + "read_bytes: %llu\n" + "write_bytes: %llu\n" + "cancelled_write_bytes: %llu\n", + (unsigned long long)acct.rchar, + (unsigned long long)acct.wchar, + (unsigned long long)acct.syscr, + (unsigned long long)acct.syscw, + (unsigned long long)acct.read_bytes, + (unsigned long long)acct.write_bytes, + (unsigned long long)acct.cancelled_write_bytes); + result = 0; + out_unlock: mutex_unlock(&task->signal->cred_guard_mutex); return result; @@ -2851,13 +2863,13 @@ int proc_pid_readdir(struct file *file, struct dir_context *ctx) return 0; if (pos == TGID_OFFSET - 2) { - struct inode *inode = ns->proc_self->d_inode; + struct inode *inode = d_inode(ns->proc_self); if (!dir_emit(ctx, "self", 4, inode->i_ino, DT_LNK)) return 0; ctx->pos = pos = pos + 1; } if (pos == TGID_OFFSET - 1) { - struct inode *inode = ns->proc_thread_self->d_inode; + struct inode *inode = d_inode(ns->proc_thread_self); if (!dir_emit(ctx, "thread-self", 11, inode->i_ino, DT_LNK)) return 0; ctx->pos = pos = pos + 1; @@ -3176,7 +3188,7 @@ static int proc_task_readdir(struct file *file, struct dir_context *ctx) static int proc_task_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct task_struct *p = get_proc_task(inode); generic_fillattr(inode, stat); diff --git a/fs/proc/fd.c b/fs/proc/fd.c index 8e5ad83..6e5fcd0 100644 --- a/fs/proc/fd.c +++ b/fs/proc/fd.c @@ -8,6 +8,7 @@ #include <linux/security.h> #include <linux/file.h> #include <linux/seq_file.h> +#include <linux/fs.h> #include <linux/proc_fs.h> @@ -48,17 +49,23 @@ static int seq_show(struct seq_file *m, void *v) put_files_struct(files); } - if (!ret) { - seq_printf(m, "pos:\t%lli\nflags:\t0%o\nmnt_id:\t%i\n", - (long long)file->f_pos, f_flags, - real_mount(file->f_path.mnt)->mnt_id); - if (file->f_op->show_fdinfo) - file->f_op->show_fdinfo(m, file); - ret = seq_has_overflowed(m); - fput(file); - } + if (ret) + return ret; - return ret; + seq_printf(m, "pos:\t%lli\nflags:\t0%o\nmnt_id:\t%i\n", + (long long)file->f_pos, f_flags, + real_mount(file->f_path.mnt)->mnt_id); + + show_fd_locks(m, file, files); + if (seq_has_overflowed(m)) + goto out; + + if (file->f_op->show_fdinfo) + file->f_op->show_fdinfo(m, file); + +out: + fput(file); + return 0; } static int seq_fdinfo_open(struct inode *inode, struct file *file) @@ -84,7 +91,7 @@ static int tid_fd_revalidate(struct dentry *dentry, unsigned int flags) if (flags & LOOKUP_RCU) return -ECHILD; - inode = dentry->d_inode; + inode = d_inode(dentry); task = get_proc_task(inode); fd = proc_fd(inode); @@ -144,14 +151,14 @@ static int proc_fd_link(struct dentry *dentry, struct path *path) struct task_struct *task; int ret = -ENOENT; - task = get_proc_task(dentry->d_inode); + task = get_proc_task(d_inode(dentry)); if (task) { files = get_files_struct(task); put_task_struct(task); } if (files) { - int fd = proc_fd(dentry->d_inode); + int fd = proc_fd(d_inode(dentry)); struct file *fd_file; spin_lock(&files->file_lock); diff --git a/fs/proc/generic.c b/fs/proc/generic.c index be65b20..df6327a 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c @@ -101,7 +101,7 @@ static bool pde_subdir_insert(struct proc_dir_entry *dir, static int proc_notify_change(struct dentry *dentry, struct iattr *iattr) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct proc_dir_entry *de = PDE(inode); int error; @@ -120,7 +120,7 @@ static int proc_notify_change(struct dentry *dentry, struct iattr *iattr) static int proc_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct proc_dir_entry *de = PDE(inode); if (de && de->nlink) set_nlink(inode, de->nlink); diff --git a/fs/proc/inode.c b/fs/proc/inode.c index 7697b66..8272aab 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c @@ -396,7 +396,7 @@ static const struct file_operations proc_reg_file_ops_no_compat = { static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd) { - struct proc_dir_entry *pde = PDE(dentry->d_inode); + struct proc_dir_entry *pde = PDE(d_inode(dentry)); if (unlikely(!use_pde(pde))) return ERR_PTR(-EINVAL); nd_set_link(nd, pde->data); diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c index c9eac45..e512642 100644 --- a/fs/proc/namespaces.c +++ b/fs/proc/namespaces.c @@ -32,7 +32,7 @@ static const struct proc_ns_operations *ns_entries[] = { static void *proc_ns_follow_link(struct dentry *dentry, struct nameidata *nd) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); const struct proc_ns_operations *ns_ops = PROC_I(inode)->ns_ops; struct task_struct *task; struct path ns_path; @@ -53,7 +53,7 @@ static void *proc_ns_follow_link(struct dentry *dentry, struct nameidata *nd) static int proc_ns_readlink(struct dentry *dentry, char __user *buffer, int buflen) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); const struct proc_ns_operations *ns_ops = PROC_I(inode)->ns_ops; struct task_struct *task; char name[50]; diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c index 1bde894..350984a 100644 --- a/fs/proc/proc_net.c +++ b/fs/proc/proc_net.c @@ -142,7 +142,7 @@ static struct dentry *proc_tgid_net_lookup(struct inode *dir, static int proc_tgid_net_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct net *net; net = get_proc_task_net(inode); diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index f92d5dd..fea2561 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -604,7 +604,7 @@ static bool proc_sys_fill_cache(struct file *file, return false; } } - inode = child->d_inode; + inode = d_inode(child); ino = inode->i_ino; type = inode->i_mode >> 12; dput(child); @@ -710,7 +710,7 @@ static int proc_sys_permission(struct inode *inode, int mask) static int proc_sys_setattr(struct dentry *dentry, struct iattr *attr) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); int error; if (attr->ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID)) @@ -727,7 +727,7 @@ static int proc_sys_setattr(struct dentry *dentry, struct iattr *attr) static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct ctl_table_header *head = grab_header(inode); struct ctl_table *table = PROC_I(inode)->sysctl_entry; @@ -773,12 +773,12 @@ static int proc_sys_revalidate(struct dentry *dentry, unsigned int flags) { if (flags & LOOKUP_RCU) return -ECHILD; - return !PROC_I(dentry->d_inode)->sysctl->unregistering; + return !PROC_I(d_inode(dentry))->sysctl->unregistering; } static int proc_sys_delete(const struct dentry *dentry) { - return !!PROC_I(dentry->d_inode)->sysctl->unregistering; + return !!PROC_I(d_inode(dentry))->sysctl->unregistering; } static int sysctl_is_seen(struct ctl_table_header *p) @@ -805,7 +805,7 @@ static int proc_sys_compare(const struct dentry *parent, const struct dentry *de /* Although proc doesn't have negative dentries, rcu-walk means * that inode here can be NULL */ /* AV: can it, indeed? */ - inode = ACCESS_ONCE(dentry->d_inode); + inode = d_inode_rcu(dentry); if (!inode) return 1; if (name->len != len) diff --git a/fs/proc/root.c b/fs/proc/root.c index e74ac9f..b7fa4bf 100644 --- a/fs/proc/root.c +++ b/fs/proc/root.c @@ -195,7 +195,7 @@ void __init proc_root_init(void) static int proc_root_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat ) { - generic_fillattr(dentry->d_inode, stat); + generic_fillattr(d_inode(dentry), stat); stat->nlink = proc_root.nlink + nr_processes(); return 0; } diff --git a/fs/proc/self.c b/fs/proc/self.c index 4348bb8..6195b4a 100644 --- a/fs/proc/self.c +++ b/fs/proc/self.c @@ -46,7 +46,7 @@ static unsigned self_inum; int proc_setup_self(struct super_block *s) { - struct inode *root_inode = s->s_root->d_inode; + struct inode *root_inode = d_inode(s->s_root); struct pid_namespace *ns = s->s_fs_info; struct dentry *self; diff --git a/fs/proc/thread_self.c b/fs/proc/thread_self.c index 59075b5..a837199 100644 --- a/fs/proc/thread_self.c +++ b/fs/proc/thread_self.c @@ -47,7 +47,7 @@ static unsigned thread_self_inum; int proc_setup_thread_self(struct super_block *s) { - struct inode *root_inode = s->s_root->d_inode; + struct inode *root_inode = d_inode(s->s_root); struct pid_namespace *ns = s->s_fs_info; struct dentry *thread_self; diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c index b32ce53..dc43b5f 100644 --- a/fs/pstore/inode.c +++ b/fs/pstore/inode.c @@ -190,7 +190,7 @@ static const struct file_operations pstore_file_operations = { */ static int pstore_unlink(struct inode *dir, struct dentry *dentry) { - struct pstore_private *p = dentry->d_inode->i_private; + struct pstore_private *p = d_inode(dentry)->i_private; int err; err = pstore_check_syslog_permissions(p); @@ -199,7 +199,7 @@ static int pstore_unlink(struct inode *dir, struct dentry *dentry) if (p->psi->erase) p->psi->erase(p->type, p->id, p->count, - dentry->d_inode->i_ctime, p->psi); + d_inode(dentry)->i_ctime, p->psi); else return -EPERM; @@ -364,6 +364,9 @@ int pstore_mkfile(enum pstore_type_id type, char *psname, u64 id, int count, case PSTORE_TYPE_PMSG: scnprintf(name, sizeof(name), "pmsg-%s-%lld", psname, id); break; + case PSTORE_TYPE_PPC_OPAL: + sprintf(name, "powerpc-opal-%s-%lld", psname, id); + break; case PSTORE_TYPE_UNKNOWN: scnprintf(name, sizeof(name), "unknown-%s-%lld", psname, id); break; @@ -373,7 +376,7 @@ int pstore_mkfile(enum pstore_type_id type, char *psname, u64 id, int count, break; } - mutex_lock(&root->d_inode->i_mutex); + mutex_lock(&d_inode(root)->i_mutex); dentry = d_alloc_name(root, name); if (!dentry) @@ -393,12 +396,12 @@ int pstore_mkfile(enum pstore_type_id type, char *psname, u64 id, int count, list_add(&private->list, &allpstore); spin_unlock_irqrestore(&allpstore_lock, flags); - mutex_unlock(&root->d_inode->i_mutex); + mutex_unlock(&d_inode(root)->i_mutex); return 0; fail_lockedalloc: - mutex_unlock(&root->d_inode->i_mutex); + mutex_unlock(&d_inode(root)->i_mutex); kfree(private); fail_alloc: iput(inode); diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c index 39d1373..44a549b 100644 --- a/fs/pstore/ram.c +++ b/fs/pstore/ram.c @@ -539,6 +539,9 @@ static int ramoops_probe(struct platform_device *pdev) mem_address = pdata->mem_address; record_size = pdata->record_size; dump_oops = pdata->dump_oops; + ramoops_console_size = pdata->console_size; + ramoops_pmsg_size = pdata->pmsg_size; + ramoops_ftrace_size = pdata->ftrace_size; pr_info("attached 0x%lx@0x%llx, ecc: %d/%d\n", cxt->size, (unsigned long long)cxt->phys_addr, diff --git a/fs/qnx6/inode.c b/fs/qnx6/inode.c index 44e7392..32d2e1a 100644 --- a/fs/qnx6/inode.c +++ b/fs/qnx6/inode.c @@ -182,7 +182,7 @@ static const char *qnx6_checkroot(struct super_block *s) static char match_root[2][3] = {".\0\0", "..\0"}; int i, error = 0; struct qnx6_dir_entry *dir_entry; - struct inode *root = s->s_root->d_inode; + struct inode *root = d_inode(s->s_root); struct address_space *mapping = root->i_mapping; struct page *page = read_mapping_page(mapping, 0, NULL); if (IS_ERR(page)) diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 0ccd4ba..20d1f74 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -900,14 +900,17 @@ static inline struct dquot **i_dquot(struct inode *inode) static int dqinit_needed(struct inode *inode, int type) { + struct dquot * const *dquots; int cnt; if (IS_NOQUOTA(inode)) return 0; + + dquots = i_dquot(inode); if (type != -1) - return !i_dquot(inode)[type]; + return !dquots[type]; for (cnt = 0; cnt < MAXQUOTAS; cnt++) - if (!i_dquot(inode)[cnt]) + if (!dquots[cnt]) return 1; return 0; } @@ -970,12 +973,13 @@ static void add_dquot_ref(struct super_block *sb, int type) static void remove_inode_dquot_ref(struct inode *inode, int type, struct list_head *tofree_head) { - struct dquot *dquot = i_dquot(inode)[type]; + struct dquot **dquots = i_dquot(inode); + struct dquot *dquot = dquots[type]; - i_dquot(inode)[type] = NULL; if (!dquot) return; + dquots[type] = NULL; if (list_empty(&dquot->dq_free)) { /* * The inode still has reference to dquot so it can't be in the @@ -1159,8 +1163,8 @@ static int need_print_warning(struct dquot_warn *warn) return uid_eq(current_fsuid(), warn->w_dq_id.uid); case GRPQUOTA: return in_group_p(warn->w_dq_id.gid); - case PRJQUOTA: /* Never taken... Just make gcc happy */ - return 0; + case PRJQUOTA: + return 1; } return 0; } @@ -1389,16 +1393,21 @@ static int dquot_active(const struct inode *inode) static void __dquot_initialize(struct inode *inode, int type) { int cnt, init_needed = 0; - struct dquot *got[MAXQUOTAS]; + struct dquot **dquots, *got[MAXQUOTAS]; struct super_block *sb = inode->i_sb; qsize_t rsv; if (!dquot_active(inode)) return; + dquots = i_dquot(inode); + /* First get references to structures we might need. */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) { struct kqid qid; + kprojid_t projid; + int rc; + got[cnt] = NULL; if (type != -1 && cnt != type) continue; @@ -1407,8 +1416,12 @@ static void __dquot_initialize(struct inode *inode, int type) * we check it without locking here to avoid unnecessary * dqget()/dqput() calls. */ - if (i_dquot(inode)[cnt]) + if (dquots[cnt]) + continue; + + if (!sb_has_quota_active(sb, cnt)) continue; + init_needed = 1; switch (cnt) { @@ -1418,6 +1431,12 @@ static void __dquot_initialize(struct inode *inode, int type) case GRPQUOTA: qid = make_kqid_gid(inode->i_gid); break; + case PRJQUOTA: + rc = inode->i_sb->dq_op->get_projid(inode, &projid); + if (rc) + continue; + qid = make_kqid_projid(projid); + break; } got[cnt] = dqget(sb, qid); } @@ -1438,8 +1457,8 @@ static void __dquot_initialize(struct inode *inode, int type) /* We could race with quotaon or dqget() could have failed */ if (!got[cnt]) continue; - if (!i_dquot(inode)[cnt]) { - i_dquot(inode)[cnt] = got[cnt]; + if (!dquots[cnt]) { + dquots[cnt] = got[cnt]; got[cnt] = NULL; /* * Make quota reservation system happy if someone @@ -1447,7 +1466,7 @@ static void __dquot_initialize(struct inode *inode, int type) */ rsv = inode_get_rsv_space(inode); if (unlikely(rsv)) - dquot_resv_space(i_dquot(inode)[cnt], rsv); + dquot_resv_space(dquots[cnt], rsv); } } out_err: @@ -1473,12 +1492,13 @@ EXPORT_SYMBOL(dquot_initialize); static void __dquot_drop(struct inode *inode) { int cnt; + struct dquot **dquots = i_dquot(inode); struct dquot *put[MAXQUOTAS]; spin_lock(&dq_data_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - put[cnt] = i_dquot(inode)[cnt]; - i_dquot(inode)[cnt] = NULL; + put[cnt] = dquots[cnt]; + dquots[cnt] = NULL; } spin_unlock(&dq_data_lock); dqput_all(put); @@ -1486,6 +1506,7 @@ static void __dquot_drop(struct inode *inode) void dquot_drop(struct inode *inode) { + struct dquot * const *dquots; int cnt; if (IS_NOQUOTA(inode)) @@ -1498,8 +1519,9 @@ void dquot_drop(struct inode *inode) * must assure that nobody can come after the DQUOT_DROP and * add quota pointers back anyway. */ + dquots = i_dquot(inode); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (i_dquot(inode)[cnt]) + if (dquots[cnt]) break; } @@ -1600,8 +1622,8 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags) { int cnt, ret = 0, index; struct dquot_warn warn[MAXQUOTAS]; - struct dquot **dquots = i_dquot(inode); int reserve = flags & DQUOT_SPACE_RESERVE; + struct dquot **dquots; if (!dquot_active(inode)) { inode_incr_space(inode, number, reserve); @@ -1611,6 +1633,7 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags) for (cnt = 0; cnt < MAXQUOTAS; cnt++) warn[cnt].w_type = QUOTA_NL_NOWARN; + dquots = i_dquot(inode); index = srcu_read_lock(&dquot_srcu); spin_lock(&dq_data_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { @@ -1652,13 +1675,14 @@ int dquot_alloc_inode(struct inode *inode) { int cnt, ret = 0, index; struct dquot_warn warn[MAXQUOTAS]; - struct dquot * const *dquots = i_dquot(inode); + struct dquot * const *dquots; if (!dquot_active(inode)) return 0; for (cnt = 0; cnt < MAXQUOTAS; cnt++) warn[cnt].w_type = QUOTA_NL_NOWARN; + dquots = i_dquot(inode); index = srcu_read_lock(&dquot_srcu); spin_lock(&dq_data_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { @@ -1690,6 +1714,7 @@ EXPORT_SYMBOL(dquot_alloc_inode); */ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number) { + struct dquot **dquots; int cnt, index; if (!dquot_active(inode)) { @@ -1697,18 +1722,18 @@ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number) return 0; } + dquots = i_dquot(inode); index = srcu_read_lock(&dquot_srcu); spin_lock(&dq_data_lock); /* Claim reserved quotas to allocated quotas */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (i_dquot(inode)[cnt]) - dquot_claim_reserved_space(i_dquot(inode)[cnt], - number); + if (dquots[cnt]) + dquot_claim_reserved_space(dquots[cnt], number); } /* Update inode bytes */ inode_claim_rsv_space(inode, number); spin_unlock(&dq_data_lock); - mark_all_dquot_dirty(i_dquot(inode)); + mark_all_dquot_dirty(dquots); srcu_read_unlock(&dquot_srcu, index); return 0; } @@ -1719,6 +1744,7 @@ EXPORT_SYMBOL(dquot_claim_space_nodirty); */ void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number) { + struct dquot **dquots; int cnt, index; if (!dquot_active(inode)) { @@ -1726,18 +1752,18 @@ void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number) return; } + dquots = i_dquot(inode); index = srcu_read_lock(&dquot_srcu); spin_lock(&dq_data_lock); /* Claim reserved quotas to allocated quotas */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (i_dquot(inode)[cnt]) - dquot_reclaim_reserved_space(i_dquot(inode)[cnt], - number); + if (dquots[cnt]) + dquot_reclaim_reserved_space(dquots[cnt], number); } /* Update inode bytes */ inode_reclaim_rsv_space(inode, number); spin_unlock(&dq_data_lock); - mark_all_dquot_dirty(i_dquot(inode)); + mark_all_dquot_dirty(dquots); srcu_read_unlock(&dquot_srcu, index); return; } @@ -1750,7 +1776,7 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags) { unsigned int cnt; struct dquot_warn warn[MAXQUOTAS]; - struct dquot **dquots = i_dquot(inode); + struct dquot **dquots; int reserve = flags & DQUOT_SPACE_RESERVE, index; if (!dquot_active(inode)) { @@ -1758,6 +1784,7 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags) return; } + dquots = i_dquot(inode); index = srcu_read_lock(&dquot_srcu); spin_lock(&dq_data_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { @@ -1793,12 +1820,13 @@ void dquot_free_inode(struct inode *inode) { unsigned int cnt; struct dquot_warn warn[MAXQUOTAS]; - struct dquot * const *dquots = i_dquot(inode); + struct dquot * const *dquots; int index; if (!dquot_active(inode)) return; + dquots = i_dquot(inode); index = srcu_read_lock(&dquot_srcu); spin_lock(&dq_data_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { @@ -2161,7 +2189,8 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id, error = -EROFS; goto out_fmt; } - if (!sb->s_op->quota_write || !sb->s_op->quota_read) { + if (!sb->s_op->quota_write || !sb->s_op->quota_read || + (type == PRJQUOTA && sb->dq_op->get_projid == NULL)) { error = -EINVAL; goto out_fmt; } @@ -2299,7 +2328,7 @@ int dquot_quota_on(struct super_block *sb, int type, int format_id, if (path->dentry->d_sb != sb) error = -EXDEV; else - error = vfs_load_quota_inode(path->dentry->d_inode, type, + error = vfs_load_quota_inode(d_inode(path->dentry), type, format_id, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); return error; @@ -2363,20 +2392,20 @@ int dquot_quota_on_mount(struct super_block *sb, char *qf_name, struct dentry *dentry; int error; - mutex_lock(&sb->s_root->d_inode->i_mutex); + mutex_lock(&d_inode(sb->s_root)->i_mutex); dentry = lookup_one_len(qf_name, sb->s_root, strlen(qf_name)); - mutex_unlock(&sb->s_root->d_inode->i_mutex); + mutex_unlock(&d_inode(sb->s_root)->i_mutex); if (IS_ERR(dentry)) return PTR_ERR(dentry); - if (!dentry->d_inode) { + if (d_really_is_negative(dentry)) { error = -ENOENT; goto out; } error = security_quota_on(dentry); if (!error) - error = vfs_load_quota_inode(dentry->d_inode, type, format_id, + error = vfs_load_quota_inode(d_inode(dentry), type, format_id, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); out: @@ -2614,55 +2643,73 @@ out: EXPORT_SYMBOL(dquot_set_dqblk); /* Generic routine for getting common part of quota file information */ -int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) +int dquot_get_state(struct super_block *sb, struct qc_state *state) { struct mem_dqinfo *mi; + struct qc_type_state *tstate; + struct quota_info *dqopt = sb_dqopt(sb); + int type; mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); - if (!sb_has_quota_active(sb, type)) { - mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); - return -ESRCH; + memset(state, 0, sizeof(*state)); + for (type = 0; type < MAXQUOTAS; type++) { + if (!sb_has_quota_active(sb, type)) + continue; + tstate = state->s_state + type; + mi = sb_dqopt(sb)->info + type; + tstate->flags = QCI_ACCT_ENABLED; + spin_lock(&dq_data_lock); + if (mi->dqi_flags & DQF_SYS_FILE) + tstate->flags |= QCI_SYSFILE; + if (mi->dqi_flags & DQF_ROOT_SQUASH) + tstate->flags |= QCI_ROOT_SQUASH; + if (sb_has_quota_limits_enabled(sb, type)) + tstate->flags |= QCI_LIMITS_ENFORCED; + tstate->spc_timelimit = mi->dqi_bgrace; + tstate->ino_timelimit = mi->dqi_igrace; + tstate->ino = dqopt->files[type]->i_ino; + tstate->blocks = dqopt->files[type]->i_blocks; + tstate->nextents = 1; /* We don't know... */ + spin_unlock(&dq_data_lock); } - mi = sb_dqopt(sb)->info + type; - spin_lock(&dq_data_lock); - ii->dqi_bgrace = mi->dqi_bgrace; - ii->dqi_igrace = mi->dqi_igrace; - ii->dqi_flags = mi->dqi_flags & DQF_GETINFO_MASK; - ii->dqi_valid = IIF_ALL; - spin_unlock(&dq_data_lock); mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); return 0; } -EXPORT_SYMBOL(dquot_get_dqinfo); +EXPORT_SYMBOL(dquot_get_state); /* Generic routine for setting common part of quota file information */ -int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) +int dquot_set_dqinfo(struct super_block *sb, int type, struct qc_info *ii) { struct mem_dqinfo *mi; int err = 0; + if ((ii->i_fieldmask & QC_WARNS_MASK) || + (ii->i_fieldmask & QC_RT_SPC_TIMER)) + return -EINVAL; mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); if (!sb_has_quota_active(sb, type)) { err = -ESRCH; goto out; } mi = sb_dqopt(sb)->info + type; - if (ii->dqi_valid & IIF_FLAGS) { - if (ii->dqi_flags & ~DQF_SETINFO_MASK || - (ii->dqi_flags & DQF_ROOT_SQUASH && + if (ii->i_fieldmask & QC_FLAGS) { + if ((ii->i_flags & QCI_ROOT_SQUASH && mi->dqi_format->qf_fmt_id != QFMT_VFS_OLD)) { err = -EINVAL; goto out; } } spin_lock(&dq_data_lock); - if (ii->dqi_valid & IIF_BGRACE) - mi->dqi_bgrace = ii->dqi_bgrace; - if (ii->dqi_valid & IIF_IGRACE) - mi->dqi_igrace = ii->dqi_igrace; - if (ii->dqi_valid & IIF_FLAGS) - mi->dqi_flags = (mi->dqi_flags & ~DQF_SETINFO_MASK) | - (ii->dqi_flags & DQF_SETINFO_MASK); + if (ii->i_fieldmask & QC_SPC_TIMER) + mi->dqi_bgrace = ii->i_spc_timelimit; + if (ii->i_fieldmask & QC_INO_TIMER) + mi->dqi_igrace = ii->i_ino_timelimit; + if (ii->i_fieldmask & QC_FLAGS) { + if (ii->i_flags & QCI_ROOT_SQUASH) + mi->dqi_flags |= DQF_ROOT_SQUASH; + else + mi->dqi_flags &= ~DQF_ROOT_SQUASH; + } spin_unlock(&dq_data_lock); mark_info_dirty(sb, type); /* Force write to disk */ @@ -2677,7 +2724,7 @@ const struct quotactl_ops dquot_quotactl_ops = { .quota_on = dquot_quota_on, .quota_off = dquot_quota_off, .quota_sync = dquot_quota_sync, - .get_info = dquot_get_dqinfo, + .get_state = dquot_get_state, .set_info = dquot_set_dqinfo, .get_dqblk = dquot_get_dqblk, .set_dqblk = dquot_set_dqblk @@ -2688,7 +2735,7 @@ const struct quotactl_ops dquot_quotactl_sysfile_ops = { .quota_enable = dquot_quota_enable, .quota_disable = dquot_quota_disable, .quota_sync = dquot_quota_sync, - .get_info = dquot_get_dqinfo, + .get_state = dquot_get_state, .set_info = dquot_set_dqinfo, .get_dqblk = dquot_get_dqblk, .set_dqblk = dquot_set_dqblk diff --git a/fs/quota/quota.c b/fs/quota/quota.c index d14a799..86ded73 100644 --- a/fs/quota/quota.c +++ b/fs/quota/quota.c @@ -118,13 +118,30 @@ static int quota_getfmt(struct super_block *sb, int type, void __user *addr) static int quota_getinfo(struct super_block *sb, int type, void __user *addr) { - struct if_dqinfo info; + struct qc_state state; + struct qc_type_state *tstate; + struct if_dqinfo uinfo; int ret; - if (!sb->s_qcop->get_info) + /* This checks whether qc_state has enough entries... */ + BUILD_BUG_ON(MAXQUOTAS > XQM_MAXQUOTAS); + if (!sb->s_qcop->get_state) return -ENOSYS; - ret = sb->s_qcop->get_info(sb, type, &info); - if (!ret && copy_to_user(addr, &info, sizeof(info))) + ret = sb->s_qcop->get_state(sb, &state); + if (ret) + return ret; + tstate = state.s_state + type; + if (!(tstate->flags & QCI_ACCT_ENABLED)) + return -ESRCH; + memset(&uinfo, 0, sizeof(uinfo)); + uinfo.dqi_bgrace = tstate->spc_timelimit; + uinfo.dqi_igrace = tstate->ino_timelimit; + if (tstate->flags & QCI_SYSFILE) + uinfo.dqi_flags |= DQF_SYS_FILE; + if (tstate->flags & QCI_ROOT_SQUASH) + uinfo.dqi_flags |= DQF_ROOT_SQUASH; + uinfo.dqi_valid = IIF_ALL; + if (!ret && copy_to_user(addr, &uinfo, sizeof(uinfo))) return -EFAULT; return ret; } @@ -132,12 +149,31 @@ static int quota_getinfo(struct super_block *sb, int type, void __user *addr) static int quota_setinfo(struct super_block *sb, int type, void __user *addr) { struct if_dqinfo info; + struct qc_info qinfo; if (copy_from_user(&info, addr, sizeof(info))) return -EFAULT; if (!sb->s_qcop->set_info) return -ENOSYS; - return sb->s_qcop->set_info(sb, type, &info); + if (info.dqi_valid & ~(IIF_FLAGS | IIF_BGRACE | IIF_IGRACE)) + return -EINVAL; + memset(&qinfo, 0, sizeof(qinfo)); + if (info.dqi_valid & IIF_FLAGS) { + if (info.dqi_flags & ~DQF_SETINFO_MASK) + return -EINVAL; + if (info.dqi_flags & DQF_ROOT_SQUASH) + qinfo.i_flags |= QCI_ROOT_SQUASH; + qinfo.i_fieldmask |= QC_FLAGS; + } + if (info.dqi_valid & IIF_BGRACE) { + qinfo.i_spc_timelimit = info.dqi_bgrace; + qinfo.i_fieldmask |= QC_SPC_TIMER; + } + if (info.dqi_valid & IIF_IGRACE) { + qinfo.i_ino_timelimit = info.dqi_igrace; + qinfo.i_fieldmask |= QC_INO_TIMER; + } + return sb->s_qcop->set_info(sb, type, &qinfo); } static inline qsize_t qbtos(qsize_t blocks) @@ -252,25 +288,149 @@ static int quota_disable(struct super_block *sb, void __user *addr) return sb->s_qcop->quota_disable(sb, flags); } +static int quota_state_to_flags(struct qc_state *state) +{ + int flags = 0; + + if (state->s_state[USRQUOTA].flags & QCI_ACCT_ENABLED) + flags |= FS_QUOTA_UDQ_ACCT; + if (state->s_state[USRQUOTA].flags & QCI_LIMITS_ENFORCED) + flags |= FS_QUOTA_UDQ_ENFD; + if (state->s_state[GRPQUOTA].flags & QCI_ACCT_ENABLED) + flags |= FS_QUOTA_GDQ_ACCT; + if (state->s_state[GRPQUOTA].flags & QCI_LIMITS_ENFORCED) + flags |= FS_QUOTA_GDQ_ENFD; + if (state->s_state[PRJQUOTA].flags & QCI_ACCT_ENABLED) + flags |= FS_QUOTA_PDQ_ACCT; + if (state->s_state[PRJQUOTA].flags & QCI_LIMITS_ENFORCED) + flags |= FS_QUOTA_PDQ_ENFD; + return flags; +} + +static int quota_getstate(struct super_block *sb, struct fs_quota_stat *fqs) +{ + int type; + struct qc_state state; + int ret; + + ret = sb->s_qcop->get_state(sb, &state); + if (ret < 0) + return ret; + + memset(fqs, 0, sizeof(*fqs)); + fqs->qs_version = FS_QSTAT_VERSION; + fqs->qs_flags = quota_state_to_flags(&state); + /* No quota enabled? */ + if (!fqs->qs_flags) + return -ENOSYS; + fqs->qs_incoredqs = state.s_incoredqs; + /* + * GETXSTATE quotactl has space for just one set of time limits so + * report them for the first enabled quota type + */ + for (type = 0; type < XQM_MAXQUOTAS; type++) + if (state.s_state[type].flags & QCI_ACCT_ENABLED) + break; + BUG_ON(type == XQM_MAXQUOTAS); + fqs->qs_btimelimit = state.s_state[type].spc_timelimit; + fqs->qs_itimelimit = state.s_state[type].ino_timelimit; + fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit; + fqs->qs_bwarnlimit = state.s_state[type].spc_warnlimit; + fqs->qs_iwarnlimit = state.s_state[type].ino_warnlimit; + if (state.s_state[USRQUOTA].flags & QCI_ACCT_ENABLED) { + fqs->qs_uquota.qfs_ino = state.s_state[USRQUOTA].ino; + fqs->qs_uquota.qfs_nblks = state.s_state[USRQUOTA].blocks; + fqs->qs_uquota.qfs_nextents = state.s_state[USRQUOTA].nextents; + } + if (state.s_state[GRPQUOTA].flags & QCI_ACCT_ENABLED) { + fqs->qs_gquota.qfs_ino = state.s_state[GRPQUOTA].ino; + fqs->qs_gquota.qfs_nblks = state.s_state[GRPQUOTA].blocks; + fqs->qs_gquota.qfs_nextents = state.s_state[GRPQUOTA].nextents; + } + if (state.s_state[PRJQUOTA].flags & QCI_ACCT_ENABLED) { + /* + * Q_XGETQSTAT doesn't have room for both group and project + * quotas. So, allow the project quota values to be copied out + * only if there is no group quota information available. + */ + if (!(state.s_state[GRPQUOTA].flags & QCI_ACCT_ENABLED)) { + fqs->qs_gquota.qfs_ino = state.s_state[PRJQUOTA].ino; + fqs->qs_gquota.qfs_nblks = + state.s_state[PRJQUOTA].blocks; + fqs->qs_gquota.qfs_nextents = + state.s_state[PRJQUOTA].nextents; + } + } + return 0; +} + static int quota_getxstate(struct super_block *sb, void __user *addr) { struct fs_quota_stat fqs; int ret; - if (!sb->s_qcop->get_xstate) + if (!sb->s_qcop->get_state) return -ENOSYS; - ret = sb->s_qcop->get_xstate(sb, &fqs); + ret = quota_getstate(sb, &fqs); if (!ret && copy_to_user(addr, &fqs, sizeof(fqs))) return -EFAULT; return ret; } +static int quota_getstatev(struct super_block *sb, struct fs_quota_statv *fqs) +{ + int type; + struct qc_state state; + int ret; + + ret = sb->s_qcop->get_state(sb, &state); + if (ret < 0) + return ret; + + memset(fqs, 0, sizeof(*fqs)); + fqs->qs_version = FS_QSTAT_VERSION; + fqs->qs_flags = quota_state_to_flags(&state); + /* No quota enabled? */ + if (!fqs->qs_flags) + return -ENOSYS; + fqs->qs_incoredqs = state.s_incoredqs; + /* + * GETXSTATV quotactl has space for just one set of time limits so + * report them for the first enabled quota type + */ + for (type = 0; type < XQM_MAXQUOTAS; type++) + if (state.s_state[type].flags & QCI_ACCT_ENABLED) + break; + BUG_ON(type == XQM_MAXQUOTAS); + fqs->qs_btimelimit = state.s_state[type].spc_timelimit; + fqs->qs_itimelimit = state.s_state[type].ino_timelimit; + fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit; + fqs->qs_bwarnlimit = state.s_state[type].spc_warnlimit; + fqs->qs_iwarnlimit = state.s_state[type].ino_warnlimit; + if (state.s_state[USRQUOTA].flags & QCI_ACCT_ENABLED) { + fqs->qs_uquota.qfs_ino = state.s_state[USRQUOTA].ino; + fqs->qs_uquota.qfs_nblks = state.s_state[USRQUOTA].blocks; + fqs->qs_uquota.qfs_nextents = state.s_state[USRQUOTA].nextents; + } + if (state.s_state[GRPQUOTA].flags & QCI_ACCT_ENABLED) { + fqs->qs_gquota.qfs_ino = state.s_state[GRPQUOTA].ino; + fqs->qs_gquota.qfs_nblks = state.s_state[GRPQUOTA].blocks; + fqs->qs_gquota.qfs_nextents = state.s_state[GRPQUOTA].nextents; + } + if (state.s_state[PRJQUOTA].flags & QCI_ACCT_ENABLED) { + fqs->qs_pquota.qfs_ino = state.s_state[PRJQUOTA].ino; + fqs->qs_pquota.qfs_nblks = state.s_state[PRJQUOTA].blocks; + fqs->qs_pquota.qfs_nextents = state.s_state[PRJQUOTA].nextents; + } + return 0; +} + static int quota_getxstatev(struct super_block *sb, void __user *addr) { struct fs_quota_statv fqs; int ret; - if (!sb->s_qcop->get_xstatev) + if (!sb->s_qcop->get_state) return -ENOSYS; memset(&fqs, 0, sizeof(fqs)); @@ -284,7 +444,7 @@ static int quota_getxstatev(struct super_block *sb, void __user *addr) default: return -EINVAL; } - ret = sb->s_qcop->get_xstatev(sb, &fqs); + ret = quota_getstatev(sb, &fqs); if (!ret && copy_to_user(addr, &fqs, sizeof(fqs))) return -EFAULT; return ret; @@ -357,6 +517,30 @@ static void copy_from_xfs_dqblk(struct qc_dqblk *dst, struct fs_disk_quota *src) dst->d_fieldmask |= QC_RT_SPACE; } +static void copy_qcinfo_from_xfs_dqblk(struct qc_info *dst, + struct fs_disk_quota *src) +{ + memset(dst, 0, sizeof(*dst)); + dst->i_spc_timelimit = src->d_btimer; + dst->i_ino_timelimit = src->d_itimer; + dst->i_rt_spc_timelimit = src->d_rtbtimer; + dst->i_ino_warnlimit = src->d_iwarns; + dst->i_spc_warnlimit = src->d_bwarns; + dst->i_rt_spc_warnlimit = src->d_rtbwarns; + if (src->d_fieldmask & FS_DQ_BWARNS) + dst->i_fieldmask |= QC_SPC_WARNS; + if (src->d_fieldmask & FS_DQ_IWARNS) + dst->i_fieldmask |= QC_INO_WARNS; + if (src->d_fieldmask & FS_DQ_RTBWARNS) + dst->i_fieldmask |= QC_RT_SPC_WARNS; + if (src->d_fieldmask & FS_DQ_BTIMER) + dst->i_fieldmask |= QC_SPC_TIMER; + if (src->d_fieldmask & FS_DQ_ITIMER) + dst->i_fieldmask |= QC_INO_TIMER; + if (src->d_fieldmask & FS_DQ_RTBTIMER) + dst->i_fieldmask |= QC_RT_SPC_TIMER; +} + static int quota_setxquota(struct super_block *sb, int type, qid_t id, void __user *addr) { @@ -371,6 +555,21 @@ static int quota_setxquota(struct super_block *sb, int type, qid_t id, qid = make_kqid(current_user_ns(), type, id); if (!qid_valid(qid)) return -EINVAL; + /* Are we actually setting timer / warning limits for all users? */ + if (from_kqid(&init_user_ns, qid) == 0 && + fdq.d_fieldmask & (FS_DQ_WARNS_MASK | FS_DQ_TIMER_MASK)) { + struct qc_info qinfo; + int ret; + + if (!sb->s_qcop->set_info) + return -EINVAL; + copy_qcinfo_from_xfs_dqblk(&qinfo, &fdq); + ret = sb->s_qcop->set_info(sb, type, &qinfo); + if (ret) + return ret; + /* These are already done */ + fdq.d_fieldmask &= ~(FS_DQ_WARNS_MASK | FS_DQ_TIMER_MASK); + } copy_from_xfs_dqblk(&qdq, &fdq); return sb->s_qcop->set_dqblk(sb, qid, &qdq); } diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c index d65877f..58efb83 100644 --- a/fs/quota/quota_tree.c +++ b/fs/quota/quota_tree.c @@ -349,6 +349,13 @@ static inline int dq_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot) { int tmp = QT_TREEOFF; + +#ifdef __QUOTA_QT_PARANOIA + if (info->dqi_blocks <= QT_TREEOFF) { + quota_error(dquot->dq_sb, "Quota tree root isn't allocated!"); + return -EIO; + } +#endif return do_insert_tree(info, dquot, &tmp, 0); } diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c index 9cb10d7..2aa012a 100644 --- a/fs/quota/quota_v2.c +++ b/fs/quota/quota_v2.c @@ -117,12 +117,16 @@ static int v2_read_file_info(struct super_block *sb, int type) qinfo = info->dqi_priv; if (version == 0) { /* limits are stored as unsigned 32-bit data */ - info->dqi_max_spc_limit = 0xffffffffULL << QUOTABLOCK_BITS; + info->dqi_max_spc_limit = 0xffffffffLL << QUOTABLOCK_BITS; info->dqi_max_ino_limit = 0xffffffff; } else { - /* used space is stored as unsigned 64-bit value in bytes */ - info->dqi_max_spc_limit = 0xffffffffffffffffULL; /* 2^64-1 */ - info->dqi_max_ino_limit = 0xffffffffffffffffULL; + /* + * Used space is stored as unsigned 64-bit value in bytes but + * quota core supports only signed 64-bit values so use that + * as a limit + */ + info->dqi_max_spc_limit = 0x7fffffffffffffffLL; /* 2^63-1 */ + info->dqi_max_ino_limit = 0x7fffffffffffffffLL; } info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace); info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace); diff --git a/fs/quota/quotaio_v2.h b/fs/quota/quotaio_v2.h index f1966b4..4e95430 100644 --- a/fs/quota/quotaio_v2.h +++ b/fs/quota/quotaio_v2.h @@ -13,12 +13,14 @@ */ #define V2_INITQMAGICS {\ 0xd9c01f11, /* USRQUOTA */\ - 0xd9c01927 /* GRPQUOTA */\ + 0xd9c01927, /* GRPQUOTA */\ + 0xd9c03f14, /* PRJQUOTA */\ } #define V2_INITQVERSIONS {\ 1, /* USRQUOTA */\ - 1 /* GRPQUOTA */\ + 1, /* GRPQUOTA */\ + 1, /* PRJQUOTA */\ } /* First generic header */ diff --git a/fs/ramfs/file-mmu.c b/fs/ramfs/file-mmu.c index 4f56de8..183a212 100644 --- a/fs/ramfs/file-mmu.c +++ b/fs/ramfs/file-mmu.c @@ -31,9 +31,7 @@ #include "internal.h" const struct file_operations ramfs_file_operations = { - .read = new_sync_read, .read_iter = generic_file_read_iter, - .write = new_sync_write, .write_iter = generic_file_write_iter, .mmap = generic_file_mmap, .fsync = noop_fsync, diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c index f6ab41b..ba1323a 100644 --- a/fs/ramfs/file-nommu.c +++ b/fs/ramfs/file-nommu.c @@ -44,9 +44,7 @@ const struct file_operations ramfs_file_operations = { .mmap_capabilities = ramfs_mmap_capabilities, .mmap = ramfs_nommu_mmap, .get_unmapped_area = ramfs_nommu_get_unmapped_area, - .read = new_sync_read, .read_iter = generic_file_read_iter, - .write = new_sync_write, .write_iter = generic_file_write_iter, .fsync = noop_fsync, .splice_read = generic_file_splice_read, @@ -165,7 +163,7 @@ static int ramfs_nommu_resize(struct inode *inode, loff_t newsize, loff_t size) */ static int ramfs_nommu_setattr(struct dentry *dentry, struct iattr *ia) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); unsigned int old_ia_valid = ia->ia_valid; int ret = 0; diff --git a/fs/read_write.c b/fs/read_write.c index 8e1b687..819ef3f 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -9,7 +9,6 @@ #include <linux/fcntl.h> #include <linux/file.h> #include <linux/uio.h> -#include <linux/aio.h> #include <linux/fsnotify.h> #include <linux/security.h> #include <linux/export.h> @@ -23,13 +22,10 @@ #include <asm/unistd.h> typedef ssize_t (*io_fn_t)(struct file *, char __user *, size_t, loff_t *); -typedef ssize_t (*iov_fn_t)(struct kiocb *, const struct iovec *, - unsigned long, loff_t); typedef ssize_t (*iter_fn_t)(struct kiocb *, struct iov_iter *); const struct file_operations generic_ro_fops = { .llseek = generic_file_llseek, - .read = new_sync_read, .read_iter = generic_file_read_iter, .mmap = generic_file_readonly_mmap, .splice_read = generic_file_splice_read, @@ -343,13 +339,10 @@ ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos) init_sync_kiocb(&kiocb, file); kiocb.ki_pos = *ppos; - kiocb.ki_nbytes = iov_iter_count(iter); iter->type |= READ; ret = file->f_op->read_iter(&kiocb, iter); - if (ret == -EIOCBQUEUED) - ret = wait_on_sync_kiocb(&kiocb); - + BUG_ON(ret == -EIOCBQUEUED); if (ret > 0) *ppos = kiocb.ki_pos; return ret; @@ -366,13 +359,10 @@ ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos) init_sync_kiocb(&kiocb, file); kiocb.ki_pos = *ppos; - kiocb.ki_nbytes = iov_iter_count(iter); iter->type |= WRITE; ret = file->f_op->write_iter(&kiocb, iter); - if (ret == -EIOCBQUEUED) - ret = wait_on_sync_kiocb(&kiocb); - + BUG_ON(ret == -EIOCBQUEUED); if (ret > 0) *ppos = kiocb.ki_pos; return ret; @@ -418,26 +408,7 @@ int rw_verify_area(int read_write, struct file *file, const loff_t *ppos, size_t return count > MAX_RW_COUNT ? MAX_RW_COUNT : count; } -ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos) -{ - struct iovec iov = { .iov_base = buf, .iov_len = len }; - struct kiocb kiocb; - ssize_t ret; - - init_sync_kiocb(&kiocb, filp); - kiocb.ki_pos = *ppos; - kiocb.ki_nbytes = len; - - ret = filp->f_op->aio_read(&kiocb, &iov, 1, kiocb.ki_pos); - if (-EIOCBQUEUED == ret) - ret = wait_on_sync_kiocb(&kiocb); - *ppos = kiocb.ki_pos; - return ret; -} - -EXPORT_SYMBOL(do_sync_read); - -ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos) +static ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos) { struct iovec iov = { .iov_base = buf, .iov_len = len }; struct kiocb kiocb; @@ -446,34 +417,25 @@ ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *p init_sync_kiocb(&kiocb, filp); kiocb.ki_pos = *ppos; - kiocb.ki_nbytes = len; iov_iter_init(&iter, READ, &iov, 1, len); ret = filp->f_op->read_iter(&kiocb, &iter); - if (-EIOCBQUEUED == ret) - ret = wait_on_sync_kiocb(&kiocb); + BUG_ON(ret == -EIOCBQUEUED); *ppos = kiocb.ki_pos; return ret; } -EXPORT_SYMBOL(new_sync_read); - ssize_t __vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { - ssize_t ret; - if (file->f_op->read) - ret = file->f_op->read(file, buf, count, pos); - else if (file->f_op->aio_read) - ret = do_sync_read(file, buf, count, pos); + return file->f_op->read(file, buf, count, pos); else if (file->f_op->read_iter) - ret = new_sync_read(file, buf, count, pos); + return new_sync_read(file, buf, count, pos); else - ret = -EINVAL; - - return ret; + return -EINVAL; } +EXPORT_SYMBOL(__vfs_read); ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { @@ -502,26 +464,7 @@ ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos) EXPORT_SYMBOL(vfs_read); -ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos) -{ - struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len }; - struct kiocb kiocb; - ssize_t ret; - - init_sync_kiocb(&kiocb, filp); - kiocb.ki_pos = *ppos; - kiocb.ki_nbytes = len; - - ret = filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos); - if (-EIOCBQUEUED == ret) - ret = wait_on_sync_kiocb(&kiocb); - *ppos = kiocb.ki_pos; - return ret; -} - -EXPORT_SYMBOL(do_sync_write); - -ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos) +static ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos) { struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len }; struct kiocb kiocb; @@ -530,17 +473,26 @@ ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, lo init_sync_kiocb(&kiocb, filp); kiocb.ki_pos = *ppos; - kiocb.ki_nbytes = len; iov_iter_init(&iter, WRITE, &iov, 1, len); ret = filp->f_op->write_iter(&kiocb, &iter); - if (-EIOCBQUEUED == ret) - ret = wait_on_sync_kiocb(&kiocb); - *ppos = kiocb.ki_pos; + BUG_ON(ret == -EIOCBQUEUED); + if (ret > 0) + *ppos = kiocb.ki_pos; return ret; } -EXPORT_SYMBOL(new_sync_write); +ssize_t __vfs_write(struct file *file, const char __user *p, size_t count, + loff_t *pos) +{ + if (file->f_op->write) + return file->f_op->write(file, p, count, pos); + else if (file->f_op->write_iter) + return new_sync_write(file, p, count, pos); + else + return -EINVAL; +} +EXPORT_SYMBOL(__vfs_write); ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t *pos) { @@ -556,12 +508,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t p = (__force const char __user *)buf; if (count > MAX_RW_COUNT) count = MAX_RW_COUNT; - if (file->f_op->write) - ret = file->f_op->write(file, p, count, pos); - else if (file->f_op->aio_write) - ret = do_sync_write(file, p, count, pos); - else - ret = new_sync_write(file, p, count, pos); + ret = __vfs_write(file, p, count, pos); set_fs(old_fs); if (ret > 0) { fsnotify_modify(file); @@ -588,12 +535,7 @@ ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_ if (ret >= 0) { count = ret; file_start_write(file); - if (file->f_op->write) - ret = file->f_op->write(file, buf, count, pos); - else if (file->f_op->aio_write) - ret = do_sync_write(file, buf, count, pos); - else - ret = new_sync_write(file, buf, count, pos); + ret = __vfs_write(file, buf, count, pos); if (ret > 0) { fsnotify_modify(file); add_wchar(current, ret); @@ -710,60 +652,32 @@ unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to) } EXPORT_SYMBOL(iov_shorten); -static ssize_t do_iter_readv_writev(struct file *filp, int rw, const struct iovec *iov, - unsigned long nr_segs, size_t len, loff_t *ppos, iter_fn_t fn) -{ - struct kiocb kiocb; - struct iov_iter iter; - ssize_t ret; - - init_sync_kiocb(&kiocb, filp); - kiocb.ki_pos = *ppos; - kiocb.ki_nbytes = len; - - iov_iter_init(&iter, rw, iov, nr_segs, len); - ret = fn(&kiocb, &iter); - if (ret == -EIOCBQUEUED) - ret = wait_on_sync_kiocb(&kiocb); - *ppos = kiocb.ki_pos; - return ret; -} - -static ssize_t do_sync_readv_writev(struct file *filp, const struct iovec *iov, - unsigned long nr_segs, size_t len, loff_t *ppos, iov_fn_t fn) +static ssize_t do_iter_readv_writev(struct file *filp, struct iov_iter *iter, + loff_t *ppos, iter_fn_t fn) { struct kiocb kiocb; ssize_t ret; init_sync_kiocb(&kiocb, filp); kiocb.ki_pos = *ppos; - kiocb.ki_nbytes = len; - ret = fn(&kiocb, iov, nr_segs, kiocb.ki_pos); - if (ret == -EIOCBQUEUED) - ret = wait_on_sync_kiocb(&kiocb); + ret = fn(&kiocb, iter); + BUG_ON(ret == -EIOCBQUEUED); *ppos = kiocb.ki_pos; return ret; } /* Do it by hand, with file-ops */ -static ssize_t do_loop_readv_writev(struct file *filp, struct iovec *iov, - unsigned long nr_segs, loff_t *ppos, io_fn_t fn) +static ssize_t do_loop_readv_writev(struct file *filp, struct iov_iter *iter, + loff_t *ppos, io_fn_t fn) { - struct iovec *vector = iov; ssize_t ret = 0; - while (nr_segs > 0) { - void __user *base; - size_t len; + while (iov_iter_count(iter)) { + struct iovec iovec = iov_iter_iovec(iter); ssize_t nr; - base = vector->iov_base; - len = vector->iov_len; - vector++; - nr_segs--; - - nr = fn(filp, base, len, ppos); + nr = fn(filp, iovec.iov_base, iovec.iov_len, ppos); if (nr < 0) { if (!ret) @@ -771,8 +685,9 @@ static ssize_t do_loop_readv_writev(struct file *filp, struct iovec *iov, break; } ret += nr; - if (nr != len) + if (nr != iovec.iov_len) break; + iov_iter_advance(iter, nr); } return ret; @@ -863,48 +778,42 @@ static ssize_t do_readv_writev(int type, struct file *file, size_t tot_len; struct iovec iovstack[UIO_FASTIOV]; struct iovec *iov = iovstack; + struct iov_iter iter; ssize_t ret; io_fn_t fn; - iov_fn_t fnv; iter_fn_t iter_fn; - ret = rw_copy_check_uvector(type, uvector, nr_segs, - ARRAY_SIZE(iovstack), iovstack, &iov); - if (ret <= 0) - goto out; + ret = import_iovec(type, uvector, nr_segs, + ARRAY_SIZE(iovstack), &iov, &iter); + if (ret < 0) + return ret; - tot_len = ret; + tot_len = iov_iter_count(&iter); + if (!tot_len) + goto out; ret = rw_verify_area(type, file, pos, tot_len); if (ret < 0) goto out; - fnv = NULL; if (type == READ) { fn = file->f_op->read; - fnv = file->f_op->aio_read; iter_fn = file->f_op->read_iter; } else { fn = (io_fn_t)file->f_op->write; - fnv = file->f_op->aio_write; iter_fn = file->f_op->write_iter; file_start_write(file); } if (iter_fn) - ret = do_iter_readv_writev(file, type, iov, nr_segs, tot_len, - pos, iter_fn); - else if (fnv) - ret = do_sync_readv_writev(file, iov, nr_segs, tot_len, - pos, fnv); + ret = do_iter_readv_writev(file, &iter, pos, iter_fn); else - ret = do_loop_readv_writev(file, iov, nr_segs, pos, fn); + ret = do_loop_readv_writev(file, &iter, pos, fn); if (type != READ) file_end_write(file); out: - if (iov != iovstack) - kfree(iov); + kfree(iov); if ((ret + (type == READ)) > 0) { if (type == READ) fsnotify_access(file); @@ -1043,48 +952,42 @@ static ssize_t compat_do_readv_writev(int type, struct file *file, compat_ssize_t tot_len; struct iovec iovstack[UIO_FASTIOV]; struct iovec *iov = iovstack; + struct iov_iter iter; ssize_t ret; io_fn_t fn; - iov_fn_t fnv; iter_fn_t iter_fn; - ret = compat_rw_copy_check_uvector(type, uvector, nr_segs, - UIO_FASTIOV, iovstack, &iov); - if (ret <= 0) - goto out; + ret = compat_import_iovec(type, uvector, nr_segs, + UIO_FASTIOV, &iov, &iter); + if (ret < 0) + return ret; - tot_len = ret; + tot_len = iov_iter_count(&iter); + if (!tot_len) + goto out; ret = rw_verify_area(type, file, pos, tot_len); if (ret < 0) goto out; - fnv = NULL; if (type == READ) { fn = file->f_op->read; - fnv = file->f_op->aio_read; iter_fn = file->f_op->read_iter; } else { fn = (io_fn_t)file->f_op->write; - fnv = file->f_op->aio_write; iter_fn = file->f_op->write_iter; file_start_write(file); } if (iter_fn) - ret = do_iter_readv_writev(file, type, iov, nr_segs, tot_len, - pos, iter_fn); - else if (fnv) - ret = do_sync_readv_writev(file, iov, nr_segs, tot_len, - pos, fnv); + ret = do_iter_readv_writev(file, &iter, pos, iter_fn); else - ret = do_loop_readv_writev(file, iov, nr_segs, pos, fn); + ret = do_loop_readv_writev(file, &iter, pos, fn); if (type != READ) file_end_write(file); out: - if (iov != iovstack) - kfree(iov); + kfree(iov); if ((ret + (type == READ)) > 0) { if (type == READ) fsnotify_access(file); diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c index 0a7dc94..4a024e2 100644 --- a/fs/reiserfs/dir.c +++ b/fs/reiserfs/dir.c @@ -53,8 +53,8 @@ static int reiserfs_dir_fsync(struct file *filp, loff_t start, loff_t end, static inline bool is_privroot_deh(struct inode *dir, struct reiserfs_de_head *deh) { struct dentry *privroot = REISERFS_SB(dir->i_sb)->priv_root; - return (privroot->d_inode && - deh->deh_objectid == INODE_PKEY(privroot->d_inode)->k_objectid); + return (d_really_is_positive(privroot) && + deh->deh_objectid == INODE_PKEY(d_inode(privroot))->k_objectid); } int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx) diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c index 751dd3f..96a1bcf 100644 --- a/fs/reiserfs/file.c +++ b/fs/reiserfs/file.c @@ -243,8 +243,6 @@ drop_write_lock: } const struct file_operations reiserfs_file_operations = { - .read = new_sync_read, - .write = new_sync_write, .unlocked_ioctl = reiserfs_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = reiserfs_compat_ioctl, diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index e72401e..f6f2fba 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -18,7 +18,7 @@ #include <linux/writeback.h> #include <linux/quotaops.h> #include <linux/swap.h> -#include <linux/aio.h> +#include <linux/uio.h> int reiserfs_commit_write(struct file *f, struct page *page, unsigned from, unsigned to); @@ -3278,22 +3278,22 @@ static int reiserfs_releasepage(struct page *page, gfp_t unused_gfp_flags) * We thank Mingming Cao for helping us understand in great detail what * to do in this section of the code. */ -static ssize_t reiserfs_direct_IO(int rw, struct kiocb *iocb, - struct iov_iter *iter, loff_t offset) +static ssize_t reiserfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, + loff_t offset) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; size_t count = iov_iter_count(iter); ssize_t ret; - ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, + ret = blockdev_direct_IO(iocb, inode, iter, offset, reiserfs_get_blocks_direct_io); /* * In case of error extending write may have instantiated a few * blocks outside i_size. Trim these off again. */ - if (unlikely((rw & WRITE) && ret < 0)) { + if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) { loff_t isize = i_size_read(inode); loff_t end = offset + count; @@ -3308,7 +3308,7 @@ static ssize_t reiserfs_direct_IO(int rw, struct kiocb *iocb, int reiserfs_setattr(struct dentry *dentry, struct iattr *attr) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); unsigned int ia_valid; int error; diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c index cd11358..b55a074 100644 --- a/fs/reiserfs/namei.c +++ b/fs/reiserfs/namei.c @@ -400,7 +400,7 @@ struct dentry *reiserfs_get_parent(struct dentry *child) struct inode *inode = NULL; struct reiserfs_dir_entry de; INITIALIZE_PATH(path_to_entry); - struct inode *dir = child->d_inode; + struct inode *dir = d_inode(child); if (dir->i_nlink == 0) { return ERR_PTR(-ENOENT); @@ -917,7 +917,7 @@ static int reiserfs_rmdir(struct inode *dir, struct dentry *dentry) goto end_rmdir; } - inode = dentry->d_inode; + inode = d_inode(dentry); reiserfs_update_inode_transaction(inode); reiserfs_update_inode_transaction(dir); @@ -987,7 +987,7 @@ static int reiserfs_unlink(struct inode *dir, struct dentry *dentry) dquot_initialize(dir); - inode = dentry->d_inode; + inode = d_inode(dentry); /* * in this transaction we can be doing at max two balancings and @@ -1174,7 +1174,7 @@ static int reiserfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { int retval; - struct inode *inode = old_dentry->d_inode; + struct inode *inode = d_inode(old_dentry); struct reiserfs_transaction_handle th; /* * We need blocks for transaction + update of quotas for @@ -1311,8 +1311,8 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry, dquot_initialize(old_dir); dquot_initialize(new_dir); - old_inode = old_dentry->d_inode; - new_dentry_inode = new_dentry->d_inode; + old_inode = d_inode(old_dentry); + new_dentry_inode = d_inode(new_dentry); /* * make sure that oldname still exists and points to an object we diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h index bb79cdd..2adcde1 100644 --- a/fs/reiserfs/reiserfs.h +++ b/fs/reiserfs/reiserfs.h @@ -910,7 +910,6 @@ do { \ if (!(cond)) \ reiserfs_panic(NULL, "assertion failure", "(" #cond ") at " \ __FILE__ ":%i:%s: " format "\n", \ - in_interrupt() ? -1 : task_pid_nr(current), \ __LINE__, __func__ , ##args); \ } while (0) diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index 71fbbe3..0111ad0 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -805,7 +805,7 @@ static const struct quotactl_ops reiserfs_qctl_operations = { .quota_on = reiserfs_quota_on, .quota_off = dquot_quota_off, .quota_sync = dquot_quota_sync, - .get_info = dquot_get_dqinfo, + .get_state = dquot_get_state, .set_info = dquot_set_dqinfo, .get_dqblk = dquot_get_dqblk, .set_dqblk = dquot_set_dqblk, @@ -1687,7 +1687,7 @@ static __u32 find_hash_out(struct super_block *s) __u32 hash = DEFAULT_HASH; __u32 deh_hashval, teahash, r5hash, yurahash; - inode = s->s_root->d_inode; + inode = d_inode(s->s_root); make_cpu_key(&key, inode, ~0, TYPE_DIRENTRY, 3); retval = search_by_entry_key(s, &key, &path, &de); @@ -2347,7 +2347,7 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id, err = -EXDEV; goto out; } - inode = path->dentry->d_inode; + inode = d_inode(path->dentry); /* * We must not pack tails for quota files on reiserfs for quota * IO to work diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index 4e781e6..e87f9b5 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c @@ -87,9 +87,9 @@ static int xattr_unlink(struct inode *dir, struct dentry *dentry) BUG_ON(!mutex_is_locked(&dir->i_mutex)); - mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_CHILD); + mutex_lock_nested(&d_inode(dentry)->i_mutex, I_MUTEX_CHILD); error = dir->i_op->unlink(dir, dentry); - mutex_unlock(&dentry->d_inode->i_mutex); + mutex_unlock(&d_inode(dentry)->i_mutex); if (!error) d_delete(dentry); @@ -102,11 +102,11 @@ static int xattr_rmdir(struct inode *dir, struct dentry *dentry) BUG_ON(!mutex_is_locked(&dir->i_mutex)); - mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_CHILD); + mutex_lock_nested(&d_inode(dentry)->i_mutex, I_MUTEX_CHILD); error = dir->i_op->rmdir(dir, dentry); if (!error) - dentry->d_inode->i_flags |= S_DEAD; - mutex_unlock(&dentry->d_inode->i_mutex); + d_inode(dentry)->i_flags |= S_DEAD; + mutex_unlock(&d_inode(dentry)->i_mutex); if (!error) d_delete(dentry); @@ -120,26 +120,26 @@ static struct dentry *open_xa_root(struct super_block *sb, int flags) struct dentry *privroot = REISERFS_SB(sb)->priv_root; struct dentry *xaroot; - if (!privroot->d_inode) + if (d_really_is_negative(privroot)) return ERR_PTR(-ENODATA); - mutex_lock_nested(&privroot->d_inode->i_mutex, I_MUTEX_XATTR); + mutex_lock_nested(&d_inode(privroot)->i_mutex, I_MUTEX_XATTR); xaroot = dget(REISERFS_SB(sb)->xattr_root); if (!xaroot) xaroot = ERR_PTR(-ENODATA); - else if (!xaroot->d_inode) { + else if (d_really_is_negative(xaroot)) { int err = -ENODATA; if (xattr_may_create(flags)) - err = xattr_mkdir(privroot->d_inode, xaroot, 0700); + err = xattr_mkdir(d_inode(privroot), xaroot, 0700); if (err) { dput(xaroot); xaroot = ERR_PTR(err); } } - mutex_unlock(&privroot->d_inode->i_mutex); + mutex_unlock(&d_inode(privroot)->i_mutex); return xaroot; } @@ -156,21 +156,21 @@ static struct dentry *open_xa_dir(const struct inode *inode, int flags) le32_to_cpu(INODE_PKEY(inode)->k_objectid), inode->i_generation); - mutex_lock_nested(&xaroot->d_inode->i_mutex, I_MUTEX_XATTR); + mutex_lock_nested(&d_inode(xaroot)->i_mutex, I_MUTEX_XATTR); xadir = lookup_one_len(namebuf, xaroot, strlen(namebuf)); - if (!IS_ERR(xadir) && !xadir->d_inode) { + if (!IS_ERR(xadir) && d_really_is_negative(xadir)) { int err = -ENODATA; if (xattr_may_create(flags)) - err = xattr_mkdir(xaroot->d_inode, xadir, 0700); + err = xattr_mkdir(d_inode(xaroot), xadir, 0700); if (err) { dput(xadir); xadir = ERR_PTR(err); } } - mutex_unlock(&xaroot->d_inode->i_mutex); + mutex_unlock(&d_inode(xaroot)->i_mutex); dput(xaroot); return xadir; } @@ -195,7 +195,7 @@ fill_with_dentries(struct dir_context *ctx, const char *name, int namelen, container_of(ctx, struct reiserfs_dentry_buf, ctx); struct dentry *dentry; - WARN_ON_ONCE(!mutex_is_locked(&dbuf->xadir->d_inode->i_mutex)); + WARN_ON_ONCE(!mutex_is_locked(&d_inode(dbuf->xadir)->i_mutex)); if (dbuf->count == ARRAY_SIZE(dbuf->dentries)) return -ENOSPC; @@ -207,7 +207,7 @@ fill_with_dentries(struct dir_context *ctx, const char *name, int namelen, dentry = lookup_one_len(name, dbuf->xadir, namelen); if (IS_ERR(dentry)) { return PTR_ERR(dentry); - } else if (!dentry->d_inode) { + } else if (d_really_is_negative(dentry)) { /* A directory entry exists, but no file? */ reiserfs_error(dentry->d_sb, "xattr-20003", "Corrupted directory: xattr %pd listed but " @@ -249,16 +249,16 @@ static int reiserfs_for_each_xattr(struct inode *inode, if (IS_ERR(dir)) { err = PTR_ERR(dir); goto out; - } else if (!dir->d_inode) { + } else if (d_really_is_negative(dir)) { err = 0; goto out_dir; } - mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_XATTR); + mutex_lock_nested(&d_inode(dir)->i_mutex, I_MUTEX_XATTR); buf.xadir = dir; while (1) { - err = reiserfs_readdir_inode(dir->d_inode, &buf.ctx); + err = reiserfs_readdir_inode(d_inode(dir), &buf.ctx); if (err) break; if (!buf.count) @@ -276,7 +276,7 @@ static int reiserfs_for_each_xattr(struct inode *inode, break; buf.count = 0; } - mutex_unlock(&dir->d_inode->i_mutex); + mutex_unlock(&d_inode(dir)->i_mutex); cleanup_dentry_buf(&buf); @@ -298,13 +298,13 @@ static int reiserfs_for_each_xattr(struct inode *inode, if (!err) { int jerror; - mutex_lock_nested(&dir->d_parent->d_inode->i_mutex, + mutex_lock_nested(&d_inode(dir->d_parent)->i_mutex, I_MUTEX_XATTR); err = action(dir, data); reiserfs_write_lock(inode->i_sb); jerror = journal_end(&th); reiserfs_write_unlock(inode->i_sb); - mutex_unlock(&dir->d_parent->d_inode->i_mutex); + mutex_unlock(&d_inode(dir->d_parent)->i_mutex); err = jerror ?: err; } } @@ -319,7 +319,7 @@ out: static int delete_one_xattr(struct dentry *dentry, void *data) { - struct inode *dir = dentry->d_parent->d_inode; + struct inode *dir = d_inode(dentry->d_parent); /* This is the xattr dir, handle specially. */ if (d_is_dir(dentry)) @@ -384,27 +384,27 @@ static struct dentry *xattr_lookup(struct inode *inode, const char *name, if (IS_ERR(xadir)) return ERR_CAST(xadir); - mutex_lock_nested(&xadir->d_inode->i_mutex, I_MUTEX_XATTR); + mutex_lock_nested(&d_inode(xadir)->i_mutex, I_MUTEX_XATTR); xafile = lookup_one_len(name, xadir, strlen(name)); if (IS_ERR(xafile)) { err = PTR_ERR(xafile); goto out; } - if (xafile->d_inode && (flags & XATTR_CREATE)) + if (d_really_is_positive(xafile) && (flags & XATTR_CREATE)) err = -EEXIST; - if (!xafile->d_inode) { + if (d_really_is_negative(xafile)) { err = -ENODATA; if (xattr_may_create(flags)) - err = xattr_create(xadir->d_inode, xafile, + err = xattr_create(d_inode(xadir), xafile, 0700|S_IFREG); } if (err) dput(xafile); out: - mutex_unlock(&xadir->d_inode->i_mutex); + mutex_unlock(&d_inode(xadir)->i_mutex); dput(xadir); if (err) return ERR_PTR(err); @@ -469,21 +469,21 @@ static int lookup_and_delete_xattr(struct inode *inode, const char *name) if (IS_ERR(xadir)) return PTR_ERR(xadir); - mutex_lock_nested(&xadir->d_inode->i_mutex, I_MUTEX_XATTR); + mutex_lock_nested(&d_inode(xadir)->i_mutex, I_MUTEX_XATTR); dentry = lookup_one_len(name, xadir, strlen(name)); if (IS_ERR(dentry)) { err = PTR_ERR(dentry); goto out_dput; } - if (dentry->d_inode) { - err = xattr_unlink(xadir->d_inode, dentry); + if (d_really_is_positive(dentry)) { + err = xattr_unlink(d_inode(xadir), dentry); update_ctime(inode); } dput(dentry); out_dput: - mutex_unlock(&xadir->d_inode->i_mutex); + mutex_unlock(&d_inode(xadir)->i_mutex); dput(xadir); return err; } @@ -533,7 +533,7 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th, else chunk = buffer_size - buffer_pos; - page = reiserfs_get_page(dentry->d_inode, file_pos); + page = reiserfs_get_page(d_inode(dentry), file_pos); if (IS_ERR(page)) { err = PTR_ERR(page); goto out_unlock; @@ -573,18 +573,18 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th, } new_size = buffer_size + sizeof(struct reiserfs_xattr_header); - if (!err && new_size < i_size_read(dentry->d_inode)) { + if (!err && new_size < i_size_read(d_inode(dentry))) { struct iattr newattrs = { .ia_ctime = current_fs_time(inode->i_sb), .ia_size = new_size, .ia_valid = ATTR_SIZE | ATTR_CTIME, }; - mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_XATTR); - inode_dio_wait(dentry->d_inode); + mutex_lock_nested(&d_inode(dentry)->i_mutex, I_MUTEX_XATTR); + inode_dio_wait(d_inode(dentry)); err = reiserfs_setattr(dentry, &newattrs); - mutex_unlock(&dentry->d_inode->i_mutex); + mutex_unlock(&d_inode(dentry)->i_mutex); } else update_ctime(inode); out_unlock: @@ -657,7 +657,7 @@ reiserfs_xattr_get(struct inode *inode, const char *name, void *buffer, down_read(&REISERFS_I(inode)->i_xattr_sem); - isize = i_size_read(dentry->d_inode); + isize = i_size_read(d_inode(dentry)); /* Just return the size needed */ if (buffer == NULL) { @@ -680,7 +680,7 @@ reiserfs_xattr_get(struct inode *inode, const char *name, void *buffer, else chunk = isize - file_pos; - page = reiserfs_get_page(dentry->d_inode, file_pos); + page = reiserfs_get_page(d_inode(dentry), file_pos); if (IS_ERR(page)) { err = PTR_ERR(page); goto out_unlock; @@ -775,7 +775,7 @@ reiserfs_getxattr(struct dentry * dentry, const char *name, void *buffer, handler = find_xattr_handler_prefix(dentry->d_sb->s_xattr, name); - if (!handler || get_inode_sd_version(dentry->d_inode) == STAT_DATA_V1) + if (!handler || get_inode_sd_version(d_inode(dentry)) == STAT_DATA_V1) return -EOPNOTSUPP; return handler->get(dentry, name, buffer, size, handler->flags); @@ -784,7 +784,7 @@ reiserfs_getxattr(struct dentry * dentry, const char *name, void *buffer, /* * Inode operation setxattr() * - * dentry->d_inode->i_mutex down + * d_inode(dentry)->i_mutex down */ int reiserfs_setxattr(struct dentry *dentry, const char *name, const void *value, @@ -794,7 +794,7 @@ reiserfs_setxattr(struct dentry *dentry, const char *name, const void *value, handler = find_xattr_handler_prefix(dentry->d_sb->s_xattr, name); - if (!handler || get_inode_sd_version(dentry->d_inode) == STAT_DATA_V1) + if (!handler || get_inode_sd_version(d_inode(dentry)) == STAT_DATA_V1) return -EOPNOTSUPP; return handler->set(dentry, name, value, size, flags, handler->flags); @@ -803,7 +803,7 @@ reiserfs_setxattr(struct dentry *dentry, const char *name, const void *value, /* * Inode operation removexattr() * - * dentry->d_inode->i_mutex down + * d_inode(dentry)->i_mutex down */ int reiserfs_removexattr(struct dentry *dentry, const char *name) { @@ -811,7 +811,7 @@ int reiserfs_removexattr(struct dentry *dentry, const char *name) handler = find_xattr_handler_prefix(dentry->d_sb->s_xattr, name); - if (!handler || get_inode_sd_version(dentry->d_inode) == STAT_DATA_V1) + if (!handler || get_inode_sd_version(d_inode(dentry)) == STAT_DATA_V1) return -EOPNOTSUPP; return handler->set(dentry, name, NULL, 0, XATTR_REPLACE, handler->flags); @@ -875,14 +875,14 @@ ssize_t reiserfs_listxattr(struct dentry * dentry, char *buffer, size_t size) .size = buffer ? size : 0, }; - if (!dentry->d_inode) + if (d_really_is_negative(dentry)) return -EINVAL; if (!dentry->d_sb->s_xattr || - get_inode_sd_version(dentry->d_inode) == STAT_DATA_V1) + get_inode_sd_version(d_inode(dentry)) == STAT_DATA_V1) return -EOPNOTSUPP; - dir = open_xa_dir(dentry->d_inode, XATTR_REPLACE); + dir = open_xa_dir(d_inode(dentry), XATTR_REPLACE); if (IS_ERR(dir)) { err = PTR_ERR(dir); if (err == -ENODATA) @@ -890,9 +890,9 @@ ssize_t reiserfs_listxattr(struct dentry * dentry, char *buffer, size_t size) goto out; } - mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_XATTR); - err = reiserfs_readdir_inode(dir->d_inode, &buf.ctx); - mutex_unlock(&dir->d_inode->i_mutex); + mutex_lock_nested(&d_inode(dir)->i_mutex, I_MUTEX_XATTR); + err = reiserfs_readdir_inode(d_inode(dir), &buf.ctx); + mutex_unlock(&d_inode(dir)->i_mutex); if (!err) err = buf.pos; @@ -905,12 +905,12 @@ out: static int create_privroot(struct dentry *dentry) { int err; - struct inode *inode = dentry->d_parent->d_inode; + struct inode *inode = d_inode(dentry->d_parent); WARN_ON_ONCE(!mutex_is_locked(&inode->i_mutex)); err = xattr_mkdir(inode, dentry, 0700); - if (err || !dentry->d_inode) { + if (err || d_really_is_negative(dentry)) { reiserfs_warning(dentry->d_sb, "jdm-20006", "xattrs/ACLs enabled and couldn't " "find/create .reiserfs_priv. " @@ -918,7 +918,7 @@ static int create_privroot(struct dentry *dentry) return -EOPNOTSUPP; } - dentry->d_inode->i_flags |= S_PRIVATE; + d_inode(dentry)->i_flags |= S_PRIVATE; reiserfs_info(dentry->d_sb, "Created %s - reserved for xattr " "storage.\n", PRIVROOT_NAME); @@ -997,17 +997,17 @@ int reiserfs_lookup_privroot(struct super_block *s) int err = 0; /* If we don't have the privroot located yet - go find it */ - mutex_lock(&s->s_root->d_inode->i_mutex); + mutex_lock(&d_inode(s->s_root)->i_mutex); dentry = lookup_one_len(PRIVROOT_NAME, s->s_root, strlen(PRIVROOT_NAME)); if (!IS_ERR(dentry)) { REISERFS_SB(s)->priv_root = dentry; d_set_d_op(dentry, &xattr_lookup_poison_ops); - if (dentry->d_inode) - dentry->d_inode->i_flags |= S_PRIVATE; + if (d_really_is_positive(dentry)) + d_inode(dentry)->i_flags |= S_PRIVATE; } else err = PTR_ERR(dentry); - mutex_unlock(&s->s_root->d_inode->i_mutex); + mutex_unlock(&d_inode(s->s_root)->i_mutex); return err; } @@ -1026,15 +1026,15 @@ int reiserfs_xattr_init(struct super_block *s, int mount_flags) if (err) goto error; - if (!privroot->d_inode && !(mount_flags & MS_RDONLY)) { - mutex_lock(&s->s_root->d_inode->i_mutex); + if (d_really_is_negative(privroot) && !(mount_flags & MS_RDONLY)) { + mutex_lock(&d_inode(s->s_root)->i_mutex); err = create_privroot(REISERFS_SB(s)->priv_root); - mutex_unlock(&s->s_root->d_inode->i_mutex); + mutex_unlock(&d_inode(s->s_root)->i_mutex); } - if (privroot->d_inode) { + if (d_really_is_positive(privroot)) { s->s_xattr = reiserfs_xattr_handlers; - mutex_lock(&privroot->d_inode->i_mutex); + mutex_lock(&d_inode(privroot)->i_mutex); if (!REISERFS_SB(s)->xattr_root) { struct dentry *dentry; @@ -1045,7 +1045,7 @@ int reiserfs_xattr_init(struct super_block *s, int mount_flags) else err = PTR_ERR(dentry); } - mutex_unlock(&privroot->d_inode->i_mutex); + mutex_unlock(&d_inode(privroot)->i_mutex); } error: diff --git a/fs/reiserfs/xattr.h b/fs/reiserfs/xattr.h index f620e96..15dde62 100644 --- a/fs/reiserfs/xattr.h +++ b/fs/reiserfs/xattr.h @@ -78,7 +78,7 @@ static inline size_t reiserfs_xattr_jcreate_nblocks(struct inode *inode) if ((REISERFS_I(inode)->i_flags & i_has_xattr_dir) == 0) { nblocks += JOURNAL_BLOCKS_PER_OBJECT(inode->i_sb); - if (!REISERFS_SB(inode->i_sb)->xattr_root->d_inode) + if (d_really_is_negative(REISERFS_SB(inode->i_sb)->xattr_root)) nblocks += JOURNAL_BLOCKS_PER_OBJECT(inode->i_sb); } diff --git a/fs/reiserfs/xattr_security.c b/fs/reiserfs/xattr_security.c index e7f8939..9a3b061 100644 --- a/fs/reiserfs/xattr_security.c +++ b/fs/reiserfs/xattr_security.c @@ -15,10 +15,10 @@ security_get(struct dentry *dentry, const char *name, void *buffer, size_t size, if (strlen(name) < sizeof(XATTR_SECURITY_PREFIX)) return -EINVAL; - if (IS_PRIVATE(dentry->d_inode)) + if (IS_PRIVATE(d_inode(dentry))) return -EPERM; - return reiserfs_xattr_get(dentry->d_inode, name, buffer, size); + return reiserfs_xattr_get(d_inode(dentry), name, buffer, size); } static int @@ -28,10 +28,10 @@ security_set(struct dentry *dentry, const char *name, const void *buffer, if (strlen(name) < sizeof(XATTR_SECURITY_PREFIX)) return -EINVAL; - if (IS_PRIVATE(dentry->d_inode)) + if (IS_PRIVATE(d_inode(dentry))) return -EPERM; - return reiserfs_xattr_set(dentry->d_inode, name, buffer, size, flags); + return reiserfs_xattr_set(d_inode(dentry), name, buffer, size, flags); } static size_t security_list(struct dentry *dentry, char *list, size_t list_len, @@ -39,7 +39,7 @@ static size_t security_list(struct dentry *dentry, char *list, size_t list_len, { const size_t len = namelen + 1; - if (IS_PRIVATE(dentry->d_inode)) + if (IS_PRIVATE(d_inode(dentry))) return 0; if (list && len <= list_len) { diff --git a/fs/reiserfs/xattr_trusted.c b/fs/reiserfs/xattr_trusted.c index 5eeb0c4..e4f1343 100644 --- a/fs/reiserfs/xattr_trusted.c +++ b/fs/reiserfs/xattr_trusted.c @@ -14,10 +14,10 @@ trusted_get(struct dentry *dentry, const char *name, void *buffer, size_t size, if (strlen(name) < sizeof(XATTR_TRUSTED_PREFIX)) return -EINVAL; - if (!capable(CAP_SYS_ADMIN) || IS_PRIVATE(dentry->d_inode)) + if (!capable(CAP_SYS_ADMIN) || IS_PRIVATE(d_inode(dentry))) return -EPERM; - return reiserfs_xattr_get(dentry->d_inode, name, buffer, size); + return reiserfs_xattr_get(d_inode(dentry), name, buffer, size); } static int @@ -27,10 +27,10 @@ trusted_set(struct dentry *dentry, const char *name, const void *buffer, if (strlen(name) < sizeof(XATTR_TRUSTED_PREFIX)) return -EINVAL; - if (!capable(CAP_SYS_ADMIN) || IS_PRIVATE(dentry->d_inode)) + if (!capable(CAP_SYS_ADMIN) || IS_PRIVATE(d_inode(dentry))) return -EPERM; - return reiserfs_xattr_set(dentry->d_inode, name, buffer, size, flags); + return reiserfs_xattr_set(d_inode(dentry), name, buffer, size, flags); } static size_t trusted_list(struct dentry *dentry, char *list, size_t list_size, @@ -38,7 +38,7 @@ static size_t trusted_list(struct dentry *dentry, char *list, size_t list_size, { const size_t len = name_len + 1; - if (!capable(CAP_SYS_ADMIN) || IS_PRIVATE(dentry->d_inode)) + if (!capable(CAP_SYS_ADMIN) || IS_PRIVATE(d_inode(dentry))) return 0; if (list && len <= list_size) { diff --git a/fs/reiserfs/xattr_user.c b/fs/reiserfs/xattr_user.c index e50eab0..d0b08d3 100644 --- a/fs/reiserfs/xattr_user.c +++ b/fs/reiserfs/xattr_user.c @@ -15,7 +15,7 @@ user_get(struct dentry *dentry, const char *name, void *buffer, size_t size, return -EINVAL; if (!reiserfs_xattrs_user(dentry->d_sb)) return -EOPNOTSUPP; - return reiserfs_xattr_get(dentry->d_inode, name, buffer, size); + return reiserfs_xattr_get(d_inode(dentry), name, buffer, size); } static int @@ -27,7 +27,7 @@ user_set(struct dentry *dentry, const char *name, const void *buffer, if (!reiserfs_xattrs_user(dentry->d_sb)) return -EOPNOTSUPP; - return reiserfs_xattr_set(dentry->d_inode, name, buffer, size, flags); + return reiserfs_xattr_set(d_inode(dentry), name, buffer, size, flags); } static size_t user_list(struct dentry *dentry, char *list, size_t list_size, diff --git a/fs/romfs/mmap-nommu.c b/fs/romfs/mmap-nommu.c index 7da9e21..1118a0d 100644 --- a/fs/romfs/mmap-nommu.c +++ b/fs/romfs/mmap-nommu.c @@ -81,7 +81,6 @@ static unsigned romfs_mmap_capabilities(struct file *file) const struct file_operations romfs_ro_fops = { .llseek = generic_file_llseek, - .read = new_sync_read, .read_iter = generic_file_read_iter, .splice_read = generic_file_splice_read, .mmap = romfs_mmap, diff --git a/fs/splice.c b/fs/splice.c index 7968da9..476024b 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -32,7 +32,6 @@ #include <linux/gfp.h> #include <linux/socket.h> #include <linux/compat.h> -#include <linux/aio.h> #include "internal.h" /* @@ -524,6 +523,9 @@ ssize_t generic_file_splice_read(struct file *in, loff_t *ppos, loff_t isize, left; int ret; + if (IS_DAX(in->f_mapping->host)) + return default_file_splice_read(in, ppos, pipe, len, flags); + isize = i_size_read(in->f_mapping->host); if (unlikely(*ppos >= isize)) return 0; @@ -1534,34 +1536,29 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *uiov, struct iovec iovstack[UIO_FASTIOV]; struct iovec *iov = iovstack; struct iov_iter iter; - ssize_t count; pipe = get_pipe_info(file); if (!pipe) return -EBADF; - ret = rw_copy_check_uvector(READ, uiov, nr_segs, - ARRAY_SIZE(iovstack), iovstack, &iov); - if (ret <= 0) - goto out; - - count = ret; - iov_iter_init(&iter, READ, iov, nr_segs, count); + ret = import_iovec(READ, uiov, nr_segs, + ARRAY_SIZE(iovstack), &iov, &iter); + if (ret < 0) + return ret; + sd.total_len = iov_iter_count(&iter); sd.len = 0; - sd.total_len = count; sd.flags = flags; sd.u.data = &iter; sd.pos = 0; - pipe_lock(pipe); - ret = __splice_from_pipe(pipe, &sd, pipe_to_user); - pipe_unlock(pipe); - -out: - if (iov != iovstack) - kfree(iov); + if (sd.total_len) { + pipe_lock(pipe); + ret = __splice_from_pipe(pipe, &sd, pipe_to_user); + pipe_unlock(pipe); + } + kfree(iov); return ret; } diff --git a/fs/squashfs/export.c b/fs/squashfs/export.c index 5e1101f..8073b65 100644 --- a/fs/squashfs/export.c +++ b/fs/squashfs/export.c @@ -110,7 +110,7 @@ static struct dentry *squashfs_fh_to_parent(struct super_block *sb, static struct dentry *squashfs_get_parent(struct dentry *child) { - struct inode *inode = child->d_inode; + struct inode *inode = d_inode(child); unsigned int parent_ino = squashfs_i(inode)->parent; return squashfs_export_iget(inode->i_sb, parent_ino); diff --git a/fs/squashfs/xattr.c b/fs/squashfs/xattr.c index 92fcde7..e5e0ddf 100644 --- a/fs/squashfs/xattr.c +++ b/fs/squashfs/xattr.c @@ -39,7 +39,7 @@ static const struct xattr_handler *squashfs_xattr_handler(int); ssize_t squashfs_listxattr(struct dentry *d, char *buffer, size_t buffer_size) { - struct inode *inode = d->d_inode; + struct inode *inode = d_inode(d); struct super_block *sb = inode->i_sb; struct squashfs_sb_info *msblk = sb->s_fs_info; u64 start = SQUASHFS_XATTR_BLK(squashfs_i(inode)->xattr) @@ -229,7 +229,7 @@ static int squashfs_user_get(struct dentry *d, const char *name, void *buffer, if (name[0] == '\0') return -EINVAL; - return squashfs_xattr_get(d->d_inode, SQUASHFS_XATTR_USER, name, + return squashfs_xattr_get(d_inode(d), SQUASHFS_XATTR_USER, name, buffer, size); } @@ -259,7 +259,7 @@ static int squashfs_trusted_get(struct dentry *d, const char *name, if (name[0] == '\0') return -EINVAL; - return squashfs_xattr_get(d->d_inode, SQUASHFS_XATTR_TRUSTED, name, + return squashfs_xattr_get(d_inode(d), SQUASHFS_XATTR_TRUSTED, name, buffer, size); } @@ -286,7 +286,7 @@ static int squashfs_security_get(struct dentry *d, const char *name, if (name[0] == '\0') return -EINVAL; - return squashfs_xattr_get(d->d_inode, SQUASHFS_XATTR_SECURITY, name, + return squashfs_xattr_get(d_inode(d), SQUASHFS_XATTR_SECURITY, name, buffer, size); } @@ -51,7 +51,7 @@ EXPORT_SYMBOL(generic_fillattr); */ int vfs_getattr_nosec(struct path *path, struct kstat *stat) { - struct inode *inode = path->dentry->d_inode; + struct inode *inode = d_backing_inode(path->dentry); if (inode->i_op->getattr) return inode->i_op->getattr(path->mnt, path->dentry, stat); @@ -66,7 +66,7 @@ int vfs_getattr(struct path *path, struct kstat *stat) { int retval; - retval = security_inode_getattr(path->mnt, path->dentry); + retval = security_inode_getattr(path); if (retval) return retval; return vfs_getattr_nosec(path, stat); @@ -326,7 +326,7 @@ SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname, retry: error = user_path_at_empty(dfd, pathname, lookup_flags, &path, &empty); if (!error) { - struct inode *inode = path.dentry->d_inode; + struct inode *inode = d_backing_inode(path.dentry); error = empty ? -ENOENT : -EINVAL; if (inode->i_op->readlink) { @@ -224,7 +224,7 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags) s->s_maxbytes = MAX_NON_LFS; s->s_op = &default_op; s->s_time_gran = 1000000000; - s->cleancache_poolid = -1; + s->cleancache_poolid = CLEANCACHE_NO_POOL; s->s_shrink.seeks = DEFAULT_SEEKS; s->s_shrink.scan_objects = super_cache_scan; diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c index 2554d88..b400c04 100644 --- a/fs/sysfs/group.c +++ b/fs/sysfs/group.c @@ -41,7 +41,7 @@ static int create_files(struct kernfs_node *parent, struct kobject *kobj, if (grp->attrs) { for (i = 0, attr = grp->attrs; *attr && !error; i++, attr++) { - umode_t mode = 0; + umode_t mode = (*attr)->mode; /* * In update mode, we're changing the permissions or @@ -55,9 +55,14 @@ static int create_files(struct kernfs_node *parent, struct kobject *kobj, if (!mode) continue; } + + WARN(mode & ~(SYSFS_PREALLOC | 0664), + "Attribute %s: Invalid permissions 0%o\n", + (*attr)->name, mode); + + mode &= SYSFS_PREALLOC | 0664; error = sysfs_add_file_mode_ns(parent, *attr, false, - (*attr)->mode | mode, - NULL); + mode, NULL); if (unlikely(error)) break; } diff --git a/fs/sysv/dir.c b/fs/sysv/dir.c index d42291d..8f3555f 100644 --- a/fs/sysv/dir.c +++ b/fs/sysv/dir.c @@ -132,7 +132,7 @@ struct sysv_dir_entry *sysv_find_entry(struct dentry *dentry, struct page **res_ { const char * name = dentry->d_name.name; int namelen = dentry->d_name.len; - struct inode * dir = dentry->d_parent->d_inode; + struct inode * dir = d_inode(dentry->d_parent); unsigned long start, n; unsigned long npages = dir_pages(dir); struct page *page = NULL; @@ -176,7 +176,7 @@ found: int sysv_add_link(struct dentry *dentry, struct inode *inode) { - struct inode *dir = dentry->d_parent->d_inode; + struct inode *dir = d_inode(dentry->d_parent); const char * name = dentry->d_name.name; int namelen = dentry->d_name.len; struct page *page = NULL; diff --git a/fs/sysv/file.c b/fs/sysv/file.c index b00811c..82ddc09 100644 --- a/fs/sysv/file.c +++ b/fs/sysv/file.c @@ -21,9 +21,7 @@ */ const struct file_operations sysv_file_operations = { .llseek = generic_file_llseek, - .read = new_sync_read, .read_iter = generic_file_read_iter, - .write = new_sync_write, .write_iter = generic_file_write_iter, .mmap = generic_file_mmap, .fsync = generic_file_fsync, @@ -32,7 +30,7 @@ const struct file_operations sysv_file_operations = { static int sysv_setattr(struct dentry *dentry, struct iattr *attr) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); int error; error = inode_change_ok(inode, attr); diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c index 66bc316..2fde40a 100644 --- a/fs/sysv/itree.c +++ b/fs/sysv/itree.c @@ -443,7 +443,7 @@ static unsigned sysv_nblocks(struct super_block *s, loff_t size) int sysv_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { struct super_block *s = dentry->d_sb; - generic_fillattr(dentry->d_inode, stat); + generic_fillattr(d_inode(dentry), stat); stat->blocks = (s->s_blocksize / 512) * sysv_nblocks(s, stat->size); stat->blksize = s->s_blocksize; return 0; diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c index 731b2bb..11e83ed 100644 --- a/fs/sysv/namei.c +++ b/fs/sysv/namei.c @@ -118,7 +118,7 @@ out_fail: static int sysv_link(struct dentry * old_dentry, struct inode * dir, struct dentry * dentry) { - struct inode *inode = old_dentry->d_inode; + struct inode *inode = d_inode(old_dentry); inode->i_ctime = CURRENT_TIME_SEC; inode_inc_link_count(inode); @@ -166,7 +166,7 @@ out_dir: static int sysv_unlink(struct inode * dir, struct dentry * dentry) { - struct inode * inode = dentry->d_inode; + struct inode * inode = d_inode(dentry); struct page * page; struct sysv_dir_entry * de; int err = -ENOENT; @@ -187,7 +187,7 @@ out: static int sysv_rmdir(struct inode * dir, struct dentry * dentry) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); int err = -ENOTEMPTY; if (sysv_empty_dir(inode)) { @@ -208,8 +208,8 @@ static int sysv_rmdir(struct inode * dir, struct dentry * dentry) static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry, struct inode * new_dir, struct dentry * new_dentry) { - struct inode * old_inode = old_dentry->d_inode; - struct inode * new_inode = new_dentry->d_inode; + struct inode * old_inode = d_inode(old_dentry); + struct inode * new_inode = d_inode(new_dentry); struct page * dir_page = NULL; struct sysv_dir_entry * dir_de = NULL; struct page * old_page; diff --git a/fs/sysv/symlink.c b/fs/sysv/symlink.c index 00d2f8a..d3fa0d7 100644 --- a/fs/sysv/symlink.c +++ b/fs/sysv/symlink.c @@ -10,7 +10,7 @@ static void *sysv_follow_link(struct dentry *dentry, struct nameidata *nd) { - nd_set_link(nd, (char *)SYSV_I(dentry->d_inode)->i_data); + nd_set_link(nd, (char *)SYSV_I(d_inode(dentry))->i_data); return NULL; } diff --git a/fs/tracefs/Makefile b/fs/tracefs/Makefile new file mode 100644 index 0000000..82fa35b --- /dev/null +++ b/fs/tracefs/Makefile @@ -0,0 +1,4 @@ +tracefs-objs := inode.o + +obj-$(CONFIG_TRACING) += tracefs.o + diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c new file mode 100644 index 0000000..d92bdf3 --- /dev/null +++ b/fs/tracefs/inode.c @@ -0,0 +1,650 @@ +/* + * inode.c - part of tracefs, a pseudo file system for activating tracing + * + * Based on debugfs by: Greg Kroah-Hartman <greg@kroah.com> + * + * Copyright (C) 2014 Red Hat Inc, author: Steven Rostedt <srostedt@redhat.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * tracefs is the file system that is used by the tracing infrastructure. + * + */ + +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/mount.h> +#include <linux/kobject.h> +#include <linux/namei.h> +#include <linux/tracefs.h> +#include <linux/fsnotify.h> +#include <linux/seq_file.h> +#include <linux/parser.h> +#include <linux/magic.h> +#include <linux/slab.h> + +#define TRACEFS_DEFAULT_MODE 0700 + +static struct vfsmount *tracefs_mount; +static int tracefs_mount_count; +static bool tracefs_registered; + +static ssize_t default_read_file(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + return 0; +} + +static ssize_t default_write_file(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + return count; +} + +static const struct file_operations tracefs_file_operations = { + .read = default_read_file, + .write = default_write_file, + .open = simple_open, + .llseek = noop_llseek, +}; + +static struct tracefs_dir_ops { + int (*mkdir)(const char *name); + int (*rmdir)(const char *name); +} tracefs_ops; + +static char *get_dname(struct dentry *dentry) +{ + const char *dname; + char *name; + int len = dentry->d_name.len; + + dname = dentry->d_name.name; + name = kmalloc(len + 1, GFP_KERNEL); + if (!name) + return NULL; + memcpy(name, dname, len); + name[len] = 0; + return name; +} + +static int tracefs_syscall_mkdir(struct inode *inode, struct dentry *dentry, umode_t mode) +{ + char *name; + int ret; + + name = get_dname(dentry); + if (!name) + return -ENOMEM; + + /* + * The mkdir call can call the generic functions that create + * the files within the tracefs system. It is up to the individual + * mkdir routine to handle races. + */ + mutex_unlock(&inode->i_mutex); + ret = tracefs_ops.mkdir(name); + mutex_lock(&inode->i_mutex); + + kfree(name); + + return ret; +} + +static int tracefs_syscall_rmdir(struct inode *inode, struct dentry *dentry) +{ + char *name; + int ret; + + name = get_dname(dentry); + if (!name) + return -ENOMEM; + + /* + * The rmdir call can call the generic functions that create + * the files within the tracefs system. It is up to the individual + * rmdir routine to handle races. + * This time we need to unlock not only the parent (inode) but + * also the directory that is being deleted. + */ + mutex_unlock(&inode->i_mutex); + mutex_unlock(&dentry->d_inode->i_mutex); + + ret = tracefs_ops.rmdir(name); + + mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT); + mutex_lock(&dentry->d_inode->i_mutex); + + kfree(name); + + return ret; +} + +static const struct inode_operations tracefs_dir_inode_operations = { + .lookup = simple_lookup, + .mkdir = tracefs_syscall_mkdir, + .rmdir = tracefs_syscall_rmdir, +}; + +static struct inode *tracefs_get_inode(struct super_block *sb) +{ + struct inode *inode = new_inode(sb); + if (inode) { + inode->i_ino = get_next_ino(); + inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; + } + return inode; +} + +struct tracefs_mount_opts { + kuid_t uid; + kgid_t gid; + umode_t mode; +}; + +enum { + Opt_uid, + Opt_gid, + Opt_mode, + Opt_err +}; + +static const match_table_t tokens = { + {Opt_uid, "uid=%u"}, + {Opt_gid, "gid=%u"}, + {Opt_mode, "mode=%o"}, + {Opt_err, NULL} +}; + +struct tracefs_fs_info { + struct tracefs_mount_opts mount_opts; +}; + +static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts) +{ + substring_t args[MAX_OPT_ARGS]; + int option; + int token; + kuid_t uid; + kgid_t gid; + char *p; + + opts->mode = TRACEFS_DEFAULT_MODE; + + while ((p = strsep(&data, ",")) != NULL) { + if (!*p) + continue; + + token = match_token(p, tokens, args); + switch (token) { + case Opt_uid: + if (match_int(&args[0], &option)) + return -EINVAL; + uid = make_kuid(current_user_ns(), option); + if (!uid_valid(uid)) + return -EINVAL; + opts->uid = uid; + break; + case Opt_gid: + if (match_int(&args[0], &option)) + return -EINVAL; + gid = make_kgid(current_user_ns(), option); + if (!gid_valid(gid)) + return -EINVAL; + opts->gid = gid; + break; + case Opt_mode: + if (match_octal(&args[0], &option)) + return -EINVAL; + opts->mode = option & S_IALLUGO; + break; + /* + * We might like to report bad mount options here; + * but traditionally tracefs has ignored all mount options + */ + } + } + + return 0; +} + +static int tracefs_apply_options(struct super_block *sb) +{ + struct tracefs_fs_info *fsi = sb->s_fs_info; + struct inode *inode = sb->s_root->d_inode; + struct tracefs_mount_opts *opts = &fsi->mount_opts; + + inode->i_mode &= ~S_IALLUGO; + inode->i_mode |= opts->mode; + + inode->i_uid = opts->uid; + inode->i_gid = opts->gid; + + return 0; +} + +static int tracefs_remount(struct super_block *sb, int *flags, char *data) +{ + int err; + struct tracefs_fs_info *fsi = sb->s_fs_info; + + sync_filesystem(sb); + err = tracefs_parse_options(data, &fsi->mount_opts); + if (err) + goto fail; + + tracefs_apply_options(sb); + +fail: + return err; +} + +static int tracefs_show_options(struct seq_file *m, struct dentry *root) +{ + struct tracefs_fs_info *fsi = root->d_sb->s_fs_info; + struct tracefs_mount_opts *opts = &fsi->mount_opts; + + if (!uid_eq(opts->uid, GLOBAL_ROOT_UID)) + seq_printf(m, ",uid=%u", + from_kuid_munged(&init_user_ns, opts->uid)); + if (!gid_eq(opts->gid, GLOBAL_ROOT_GID)) + seq_printf(m, ",gid=%u", + from_kgid_munged(&init_user_ns, opts->gid)); + if (opts->mode != TRACEFS_DEFAULT_MODE) + seq_printf(m, ",mode=%o", opts->mode); + + return 0; +} + +static const struct super_operations tracefs_super_operations = { + .statfs = simple_statfs, + .remount_fs = tracefs_remount, + .show_options = tracefs_show_options, +}; + +static int trace_fill_super(struct super_block *sb, void *data, int silent) +{ + static struct tree_descr trace_files[] = {{""}}; + struct tracefs_fs_info *fsi; + int err; + + save_mount_options(sb, data); + + fsi = kzalloc(sizeof(struct tracefs_fs_info), GFP_KERNEL); + sb->s_fs_info = fsi; + if (!fsi) { + err = -ENOMEM; + goto fail; + } + + err = tracefs_parse_options(data, &fsi->mount_opts); + if (err) + goto fail; + + err = simple_fill_super(sb, TRACEFS_MAGIC, trace_files); + if (err) + goto fail; + + sb->s_op = &tracefs_super_operations; + + tracefs_apply_options(sb); + + return 0; + +fail: + kfree(fsi); + sb->s_fs_info = NULL; + return err; +} + +static struct dentry *trace_mount(struct file_system_type *fs_type, + int flags, const char *dev_name, + void *data) +{ + return mount_single(fs_type, flags, data, trace_fill_super); +} + +static struct file_system_type trace_fs_type = { + .owner = THIS_MODULE, + .name = "tracefs", + .mount = trace_mount, + .kill_sb = kill_litter_super, +}; +MODULE_ALIAS_FS("tracefs"); + +static struct dentry *start_creating(const char *name, struct dentry *parent) +{ + struct dentry *dentry; + int error; + + pr_debug("tracefs: creating file '%s'\n",name); + + error = simple_pin_fs(&trace_fs_type, &tracefs_mount, + &tracefs_mount_count); + if (error) + return ERR_PTR(error); + + /* If the parent is not specified, we create it in the root. + * We need the root dentry to do this, which is in the super + * block. A pointer to that is in the struct vfsmount that we + * have around. + */ + if (!parent) + parent = tracefs_mount->mnt_root; + + mutex_lock(&parent->d_inode->i_mutex); + dentry = lookup_one_len(name, parent, strlen(name)); + if (!IS_ERR(dentry) && dentry->d_inode) { + dput(dentry); + dentry = ERR_PTR(-EEXIST); + } + if (IS_ERR(dentry)) + mutex_unlock(&parent->d_inode->i_mutex); + return dentry; +} + +static struct dentry *failed_creating(struct dentry *dentry) +{ + mutex_unlock(&dentry->d_parent->d_inode->i_mutex); + dput(dentry); + simple_release_fs(&tracefs_mount, &tracefs_mount_count); + return NULL; +} + +static struct dentry *end_creating(struct dentry *dentry) +{ + mutex_unlock(&dentry->d_parent->d_inode->i_mutex); + return dentry; +} + +/** + * tracefs_create_file - create a file in the tracefs filesystem + * @name: a pointer to a string containing the name of the file to create. + * @mode: the permission that the file should have. + * @parent: a pointer to the parent dentry for this file. This should be a + * directory dentry if set. If this parameter is NULL, then the + * file will be created in the root of the tracefs filesystem. + * @data: a pointer to something that the caller will want to get to later + * on. The inode.i_private pointer will point to this value on + * the open() call. + * @fops: a pointer to a struct file_operations that should be used for + * this file. + * + * This is the basic "create a file" function for tracefs. It allows for a + * wide range of flexibility in creating a file, or a directory (if you want + * to create a directory, the tracefs_create_dir() function is + * recommended to be used instead.) + * + * This function will return a pointer to a dentry if it succeeds. This + * pointer must be passed to the tracefs_remove() function when the file is + * to be removed (no automatic cleanup happens if your module is unloaded, + * you are responsible here.) If an error occurs, %NULL will be returned. + * + * If tracefs is not enabled in the kernel, the value -%ENODEV will be + * returned. + */ +struct dentry *tracefs_create_file(const char *name, umode_t mode, + struct dentry *parent, void *data, + const struct file_operations *fops) +{ + struct dentry *dentry; + struct inode *inode; + + if (!(mode & S_IFMT)) + mode |= S_IFREG; + BUG_ON(!S_ISREG(mode)); + dentry = start_creating(name, parent); + + if (IS_ERR(dentry)) + return NULL; + + inode = tracefs_get_inode(dentry->d_sb); + if (unlikely(!inode)) + return failed_creating(dentry); + + inode->i_mode = mode; + inode->i_fop = fops ? fops : &tracefs_file_operations; + inode->i_private = data; + d_instantiate(dentry, inode); + fsnotify_create(dentry->d_parent->d_inode, dentry); + return end_creating(dentry); +} + +static struct dentry *__create_dir(const char *name, struct dentry *parent, + const struct inode_operations *ops) +{ + struct dentry *dentry = start_creating(name, parent); + struct inode *inode; + + if (IS_ERR(dentry)) + return NULL; + + inode = tracefs_get_inode(dentry->d_sb); + if (unlikely(!inode)) + return failed_creating(dentry); + + inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO; + inode->i_op = ops; + inode->i_fop = &simple_dir_operations; + + /* directory inodes start off with i_nlink == 2 (for "." entry) */ + inc_nlink(inode); + d_instantiate(dentry, inode); + inc_nlink(dentry->d_parent->d_inode); + fsnotify_mkdir(dentry->d_parent->d_inode, dentry); + return end_creating(dentry); +} + +/** + * tracefs_create_dir - create a directory in the tracefs filesystem + * @name: a pointer to a string containing the name of the directory to + * create. + * @parent: a pointer to the parent dentry for this file. This should be a + * directory dentry if set. If this parameter is NULL, then the + * directory will be created in the root of the tracefs filesystem. + * + * This function creates a directory in tracefs with the given name. + * + * This function will return a pointer to a dentry if it succeeds. This + * pointer must be passed to the tracefs_remove() function when the file is + * to be removed. If an error occurs, %NULL will be returned. + * + * If tracing is not enabled in the kernel, the value -%ENODEV will be + * returned. + */ +struct dentry *tracefs_create_dir(const char *name, struct dentry *parent) +{ + return __create_dir(name, parent, &simple_dir_inode_operations); +} + +/** + * tracefs_create_instance_dir - create the tracing instances directory + * @name: The name of the instances directory to create + * @parent: The parent directory that the instances directory will exist + * @mkdir: The function to call when a mkdir is performed. + * @rmdir: The function to call when a rmdir is performed. + * + * Only one instances directory is allowed. + * + * The instances directory is special as it allows for mkdir and rmdir to + * to be done by userspace. When a mkdir or rmdir is performed, the inode + * locks are released and the methhods passed in (@mkdir and @rmdir) are + * called without locks and with the name of the directory being created + * within the instances directory. + * + * Returns the dentry of the instances directory. + */ +struct dentry *tracefs_create_instance_dir(const char *name, struct dentry *parent, + int (*mkdir)(const char *name), + int (*rmdir)(const char *name)) +{ + struct dentry *dentry; + + /* Only allow one instance of the instances directory. */ + if (WARN_ON(tracefs_ops.mkdir || tracefs_ops.rmdir)) + return NULL; + + dentry = __create_dir(name, parent, &tracefs_dir_inode_operations); + if (!dentry) + return NULL; + + tracefs_ops.mkdir = mkdir; + tracefs_ops.rmdir = rmdir; + + return dentry; +} + +static inline int tracefs_positive(struct dentry *dentry) +{ + return dentry->d_inode && !d_unhashed(dentry); +} + +static int __tracefs_remove(struct dentry *dentry, struct dentry *parent) +{ + int ret = 0; + + if (tracefs_positive(dentry)) { + if (dentry->d_inode) { + dget(dentry); + switch (dentry->d_inode->i_mode & S_IFMT) { + case S_IFDIR: + ret = simple_rmdir(parent->d_inode, dentry); + break; + default: + simple_unlink(parent->d_inode, dentry); + break; + } + if (!ret) + d_delete(dentry); + dput(dentry); + } + } + return ret; +} + +/** + * tracefs_remove - removes a file or directory from the tracefs filesystem + * @dentry: a pointer to a the dentry of the file or directory to be + * removed. + * + * This function removes a file or directory in tracefs that was previously + * created with a call to another tracefs function (like + * tracefs_create_file() or variants thereof.) + */ +void tracefs_remove(struct dentry *dentry) +{ + struct dentry *parent; + int ret; + + if (IS_ERR_OR_NULL(dentry)) + return; + + parent = dentry->d_parent; + if (!parent || !parent->d_inode) + return; + + mutex_lock(&parent->d_inode->i_mutex); + ret = __tracefs_remove(dentry, parent); + mutex_unlock(&parent->d_inode->i_mutex); + if (!ret) + simple_release_fs(&tracefs_mount, &tracefs_mount_count); +} + +/** + * tracefs_remove_recursive - recursively removes a directory + * @dentry: a pointer to a the dentry of the directory to be removed. + * + * This function recursively removes a directory tree in tracefs that + * was previously created with a call to another tracefs function + * (like tracefs_create_file() or variants thereof.) + */ +void tracefs_remove_recursive(struct dentry *dentry) +{ + struct dentry *child, *parent; + + if (IS_ERR_OR_NULL(dentry)) + return; + + parent = dentry->d_parent; + if (!parent || !parent->d_inode) + return; + + parent = dentry; + down: + mutex_lock(&parent->d_inode->i_mutex); + loop: + /* + * The parent->d_subdirs is protected by the d_lock. Outside that + * lock, the child can be unlinked and set to be freed which can + * use the d_u.d_child as the rcu head and corrupt this list. + */ + spin_lock(&parent->d_lock); + list_for_each_entry(child, &parent->d_subdirs, d_child) { + if (!tracefs_positive(child)) + continue; + + /* perhaps simple_empty(child) makes more sense */ + if (!list_empty(&child->d_subdirs)) { + spin_unlock(&parent->d_lock); + mutex_unlock(&parent->d_inode->i_mutex); + parent = child; + goto down; + } + + spin_unlock(&parent->d_lock); + + if (!__tracefs_remove(child, parent)) + simple_release_fs(&tracefs_mount, &tracefs_mount_count); + + /* + * The parent->d_lock protects agaist child from unlinking + * from d_subdirs. When releasing the parent->d_lock we can + * no longer trust that the next pointer is valid. + * Restart the loop. We'll skip this one with the + * tracefs_positive() check. + */ + goto loop; + } + spin_unlock(&parent->d_lock); + + mutex_unlock(&parent->d_inode->i_mutex); + child = parent; + parent = parent->d_parent; + mutex_lock(&parent->d_inode->i_mutex); + + if (child != dentry) + /* go up */ + goto loop; + + if (!__tracefs_remove(child, parent)) + simple_release_fs(&tracefs_mount, &tracefs_mount_count); + mutex_unlock(&parent->d_inode->i_mutex); +} + +/** + * tracefs_initialized - Tells whether tracefs has been registered + */ +bool tracefs_initialized(void) +{ + return tracefs_registered; +} + +static struct kobject *trace_kobj; + +static int __init tracefs_init(void) +{ + int retval; + + trace_kobj = kobject_create_and_add("tracing", kernel_kobj); + if (!trace_kobj) + return -EINVAL; + + retval = register_filesystem(&trace_fs_type); + if (!retval) + tracefs_registered = true; + + return retval; +} +core_initcall(tracefs_init); diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c index eb997e9..11a11b3 100644 --- a/fs/ubifs/budget.c +++ b/fs/ubifs/budget.c @@ -509,7 +509,7 @@ again: c->bi.nospace_rp = 1; smp_wmb(); } else - ubifs_err("cannot budget space, error %d", err); + ubifs_err(c, "cannot budget space, error %d", err); return err; } diff --git a/fs/ubifs/commit.c b/fs/ubifs/commit.c index 26b69b2..63f5661 100644 --- a/fs/ubifs/commit.c +++ b/fs/ubifs/commit.c @@ -225,7 +225,7 @@ out_cancel: out_up: up_write(&c->commit_sem); out: - ubifs_err("commit failed, error %d", err); + ubifs_err(c, "commit failed, error %d", err); spin_lock(&c->cs_lock); c->cmt_state = COMMIT_BROKEN; wake_up(&c->cmt_wq); @@ -289,7 +289,7 @@ int ubifs_bg_thread(void *info) int err; struct ubifs_info *c = info; - ubifs_msg("background thread \"%s\" started, PID %d", + ubifs_msg(c, "background thread \"%s\" started, PID %d", c->bgt_name, current->pid); set_freezable(); @@ -324,7 +324,7 @@ int ubifs_bg_thread(void *info) cond_resched(); } - ubifs_msg("background thread \"%s\" stops", c->bgt_name); + ubifs_msg(c, "background thread \"%s\" stops", c->bgt_name); return 0; } @@ -712,13 +712,13 @@ out: return 0; out_dump: - ubifs_err("dumping index node (iip=%d)", i->iip); + ubifs_err(c, "dumping index node (iip=%d)", i->iip); ubifs_dump_node(c, idx); list_del(&i->list); kfree(i); if (!list_empty(&list)) { i = list_entry(list.prev, struct idx_node, list); - ubifs_err("dumping parent index node"); + ubifs_err(c, "dumping parent index node"); ubifs_dump_node(c, &i->idx); } out_free: @@ -727,7 +727,7 @@ out_free: list_del(&i->list); kfree(i); } - ubifs_err("failed, error %d", err); + ubifs_err(c, "failed, error %d", err); if (err > 0) err = -EINVAL; return err; diff --git a/fs/ubifs/compress.c b/fs/ubifs/compress.c index 2bfa095..565cb56 100644 --- a/fs/ubifs/compress.c +++ b/fs/ubifs/compress.c @@ -92,8 +92,8 @@ struct ubifs_compressor *ubifs_compressors[UBIFS_COMPR_TYPES_CNT]; * Note, if the input buffer was not compressed, it is copied to the output * buffer and %UBIFS_COMPR_NONE is returned in @compr_type. */ -void ubifs_compress(const void *in_buf, int in_len, void *out_buf, int *out_len, - int *compr_type) +void ubifs_compress(const struct ubifs_info *c, const void *in_buf, + int in_len, void *out_buf, int *out_len, int *compr_type) { int err; struct ubifs_compressor *compr = ubifs_compressors[*compr_type]; @@ -112,9 +112,9 @@ void ubifs_compress(const void *in_buf, int in_len, void *out_buf, int *out_len, if (compr->comp_mutex) mutex_unlock(compr->comp_mutex); if (unlikely(err)) { - ubifs_warn("cannot compress %d bytes, compressor %s, error %d, leave data uncompressed", + ubifs_warn(c, "cannot compress %d bytes, compressor %s, error %d, leave data uncompressed", in_len, compr->name, err); - goto no_compr; + goto no_compr; } /* @@ -144,21 +144,21 @@ no_compr: * The length of the uncompressed data is returned in @out_len. This functions * returns %0 on success or a negative error code on failure. */ -int ubifs_decompress(const void *in_buf, int in_len, void *out_buf, - int *out_len, int compr_type) +int ubifs_decompress(const struct ubifs_info *c, const void *in_buf, + int in_len, void *out_buf, int *out_len, int compr_type) { int err; struct ubifs_compressor *compr; if (unlikely(compr_type < 0 || compr_type >= UBIFS_COMPR_TYPES_CNT)) { - ubifs_err("invalid compression type %d", compr_type); + ubifs_err(c, "invalid compression type %d", compr_type); return -EINVAL; } compr = ubifs_compressors[compr_type]; if (unlikely(!compr->capi_name)) { - ubifs_err("%s compression is not compiled in", compr->name); + ubifs_err(c, "%s compression is not compiled in", compr->name); return -EINVAL; } @@ -175,7 +175,7 @@ int ubifs_decompress(const void *in_buf, int in_len, void *out_buf, if (compr->decomp_mutex) mutex_unlock(compr->decomp_mutex); if (err) - ubifs_err("cannot decompress %d bytes, compressor %s, error %d", + ubifs_err(c, "cannot decompress %d bytes, compressor %s, error %d", in_len, compr->name, err); return err; @@ -193,8 +193,8 @@ static int __init compr_init(struct ubifs_compressor *compr) if (compr->capi_name) { compr->cc = crypto_alloc_comp(compr->capi_name, 0, 0); if (IS_ERR(compr->cc)) { - ubifs_err("cannot initialize compressor %s, error %ld", - compr->name, PTR_ERR(compr->cc)); + pr_err("UBIFS error (pid %d): cannot initialize compressor %s, error %ld", + current->pid, compr->name, PTR_ERR(compr->cc)); return PTR_ERR(compr->cc); } } diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 4cfb3e8..4c46a98 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -746,7 +746,7 @@ void ubifs_dump_lprops(struct ubifs_info *c) for (lnum = c->main_first; lnum < c->leb_cnt; lnum++) { err = ubifs_read_one_lp(c, lnum, &lp); if (err) { - ubifs_err("cannot read lprops for LEB %d", lnum); + ubifs_err(c, "cannot read lprops for LEB %d", lnum); continue; } @@ -819,13 +819,13 @@ void ubifs_dump_leb(const struct ubifs_info *c, int lnum) buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL); if (!buf) { - ubifs_err("cannot allocate memory for dumping LEB %d", lnum); + ubifs_err(c, "cannot allocate memory for dumping LEB %d", lnum); return; } sleb = ubifs_scan(c, lnum, 0, buf, 0); if (IS_ERR(sleb)) { - ubifs_err("scan error %d", (int)PTR_ERR(sleb)); + ubifs_err(c, "scan error %d", (int)PTR_ERR(sleb)); goto out; } @@ -1032,7 +1032,7 @@ int dbg_check_space_info(struct ubifs_info *c) spin_unlock(&c->space_lock); if (free != d->saved_free) { - ubifs_err("free space changed from %lld to %lld", + ubifs_err(c, "free space changed from %lld to %lld", d->saved_free, free); goto out; } @@ -1040,15 +1040,15 @@ int dbg_check_space_info(struct ubifs_info *c) return 0; out: - ubifs_msg("saved lprops statistics dump"); + ubifs_msg(c, "saved lprops statistics dump"); ubifs_dump_lstats(&d->saved_lst); - ubifs_msg("saved budgeting info dump"); + ubifs_msg(c, "saved budgeting info dump"); ubifs_dump_budg(c, &d->saved_bi); - ubifs_msg("saved idx_gc_cnt %d", d->saved_idx_gc_cnt); - ubifs_msg("current lprops statistics dump"); + ubifs_msg(c, "saved idx_gc_cnt %d", d->saved_idx_gc_cnt); + ubifs_msg(c, "current lprops statistics dump"); ubifs_get_lp_stats(c, &lst); ubifs_dump_lstats(&lst); - ubifs_msg("current budgeting info dump"); + ubifs_msg(c, "current budgeting info dump"); ubifs_dump_budg(c, &c->bi); dump_stack(); return -EINVAL; @@ -1077,9 +1077,9 @@ int dbg_check_synced_i_size(const struct ubifs_info *c, struct inode *inode) mutex_lock(&ui->ui_mutex); spin_lock(&ui->ui_lock); if (ui->ui_size != ui->synced_i_size && !ui->dirty) { - ubifs_err("ui_size is %lld, synced_i_size is %lld, but inode is clean", + ubifs_err(c, "ui_size is %lld, synced_i_size is %lld, but inode is clean", ui->ui_size, ui->synced_i_size); - ubifs_err("i_ino %lu, i_mode %#x, i_size %lld", inode->i_ino, + ubifs_err(c, "i_ino %lu, i_mode %#x, i_size %lld", inode->i_ino, inode->i_mode, i_size_read(inode)); dump_stack(); err = -EINVAL; @@ -1140,7 +1140,7 @@ int dbg_check_dir(struct ubifs_info *c, const struct inode *dir) kfree(pdent); if (i_size_read(dir) != size) { - ubifs_err("directory inode %lu has size %llu, but calculated size is %llu", + ubifs_err(c, "directory inode %lu has size %llu, but calculated size is %llu", dir->i_ino, (unsigned long long)i_size_read(dir), (unsigned long long)size); ubifs_dump_inode(c, dir); @@ -1148,7 +1148,7 @@ int dbg_check_dir(struct ubifs_info *c, const struct inode *dir) return -EINVAL; } if (dir->i_nlink != nlink) { - ubifs_err("directory inode %lu has nlink %u, but calculated nlink is %u", + ubifs_err(c, "directory inode %lu has nlink %u, but calculated nlink is %u", dir->i_ino, dir->i_nlink, nlink); ubifs_dump_inode(c, dir); dump_stack(); @@ -1207,10 +1207,10 @@ static int dbg_check_key_order(struct ubifs_info *c, struct ubifs_zbranch *zbr1, err = 1; key_read(c, &dent1->key, &key); if (keys_cmp(c, &zbr1->key, &key)) { - ubifs_err("1st entry at %d:%d has key %s", zbr1->lnum, + ubifs_err(c, "1st entry at %d:%d has key %s", zbr1->lnum, zbr1->offs, dbg_snprintf_key(c, &key, key_buf, DBG_KEY_BUF_LEN)); - ubifs_err("but it should have key %s according to tnc", + ubifs_err(c, "but it should have key %s according to tnc", dbg_snprintf_key(c, &zbr1->key, key_buf, DBG_KEY_BUF_LEN)); ubifs_dump_node(c, dent1); @@ -1219,10 +1219,10 @@ static int dbg_check_key_order(struct ubifs_info *c, struct ubifs_zbranch *zbr1, key_read(c, &dent2->key, &key); if (keys_cmp(c, &zbr2->key, &key)) { - ubifs_err("2nd entry at %d:%d has key %s", zbr1->lnum, + ubifs_err(c, "2nd entry at %d:%d has key %s", zbr1->lnum, zbr1->offs, dbg_snprintf_key(c, &key, key_buf, DBG_KEY_BUF_LEN)); - ubifs_err("but it should have key %s according to tnc", + ubifs_err(c, "but it should have key %s according to tnc", dbg_snprintf_key(c, &zbr2->key, key_buf, DBG_KEY_BUF_LEN)); ubifs_dump_node(c, dent2); @@ -1238,14 +1238,14 @@ static int dbg_check_key_order(struct ubifs_info *c, struct ubifs_zbranch *zbr1, goto out_free; } if (cmp == 0 && nlen1 == nlen2) - ubifs_err("2 xent/dent nodes with the same name"); + ubifs_err(c, "2 xent/dent nodes with the same name"); else - ubifs_err("bad order of colliding key %s", + ubifs_err(c, "bad order of colliding key %s", dbg_snprintf_key(c, &key, key_buf, DBG_KEY_BUF_LEN)); - ubifs_msg("first node at %d:%d\n", zbr1->lnum, zbr1->offs); + ubifs_msg(c, "first node at %d:%d\n", zbr1->lnum, zbr1->offs); ubifs_dump_node(c, dent1); - ubifs_msg("second node at %d:%d\n", zbr2->lnum, zbr2->offs); + ubifs_msg(c, "second node at %d:%d\n", zbr2->lnum, zbr2->offs); ubifs_dump_node(c, dent2); out_free: @@ -1447,11 +1447,11 @@ static int dbg_check_znode(struct ubifs_info *c, struct ubifs_zbranch *zbr) return 0; out: - ubifs_err("failed, error %d", err); - ubifs_msg("dump of the znode"); + ubifs_err(c, "failed, error %d", err); + ubifs_msg(c, "dump of the znode"); ubifs_dump_znode(c, znode); if (zp) { - ubifs_msg("dump of the parent znode"); + ubifs_msg(c, "dump of the parent znode"); ubifs_dump_znode(c, zp); } dump_stack(); @@ -1518,9 +1518,9 @@ int dbg_check_tnc(struct ubifs_info *c, int extra) if (err < 0) return err; if (err) { - ubifs_msg("first znode"); + ubifs_msg(c, "first znode"); ubifs_dump_znode(c, prev); - ubifs_msg("second znode"); + ubifs_msg(c, "second znode"); ubifs_dump_znode(c, znode); return -EINVAL; } @@ -1529,13 +1529,13 @@ int dbg_check_tnc(struct ubifs_info *c, int extra) if (extra) { if (clean_cnt != atomic_long_read(&c->clean_zn_cnt)) { - ubifs_err("incorrect clean_zn_cnt %ld, calculated %ld", + ubifs_err(c, "incorrect clean_zn_cnt %ld, calculated %ld", atomic_long_read(&c->clean_zn_cnt), clean_cnt); return -EINVAL; } if (dirty_cnt != atomic_long_read(&c->dirty_zn_cnt)) { - ubifs_err("incorrect dirty_zn_cnt %ld, calculated %ld", + ubifs_err(c, "incorrect dirty_zn_cnt %ld, calculated %ld", atomic_long_read(&c->dirty_zn_cnt), dirty_cnt); return -EINVAL; @@ -1608,7 +1608,7 @@ int dbg_walk_index(struct ubifs_info *c, dbg_leaf_callback leaf_cb, if (znode_cb) { err = znode_cb(c, znode, priv); if (err) { - ubifs_err("znode checking function returned error %d", + ubifs_err(c, "znode checking function returned error %d", err); ubifs_dump_znode(c, znode); goto out_dump; @@ -1619,7 +1619,7 @@ int dbg_walk_index(struct ubifs_info *c, dbg_leaf_callback leaf_cb, zbr = &znode->zbranch[idx]; err = leaf_cb(c, zbr, priv); if (err) { - ubifs_err("leaf checking function returned error %d, for leaf at LEB %d:%d", + ubifs_err(c, "leaf checking function returned error %d, for leaf at LEB %d:%d", err, zbr->lnum, zbr->offs); goto out_dump; } @@ -1675,7 +1675,7 @@ out_dump: zbr = &znode->parent->zbranch[znode->iip]; else zbr = &c->zroot; - ubifs_msg("dump of znode at LEB %d:%d", zbr->lnum, zbr->offs); + ubifs_msg(c, "dump of znode at LEB %d:%d", zbr->lnum, zbr->offs); ubifs_dump_znode(c, znode); out_unlock: mutex_unlock(&c->tnc_mutex); @@ -1722,12 +1722,12 @@ int dbg_check_idx_size(struct ubifs_info *c, long long idx_size) err = dbg_walk_index(c, NULL, add_size, &calc); if (err) { - ubifs_err("error %d while walking the index", err); + ubifs_err(c, "error %d while walking the index", err); return err; } if (calc != idx_size) { - ubifs_err("index size check failed: calculated size is %lld, should be %lld", + ubifs_err(c, "index size check failed: calculated size is %lld, should be %lld", calc, idx_size); dump_stack(); return -EINVAL; @@ -1814,7 +1814,7 @@ static struct fsck_inode *add_inode(struct ubifs_info *c, } if (inum > c->highest_inum) { - ubifs_err("too high inode number, max. is %lu", + ubifs_err(c, "too high inode number, max. is %lu", (unsigned long)c->highest_inum); return ERR_PTR(-EINVAL); } @@ -1921,17 +1921,17 @@ static struct fsck_inode *read_add_inode(struct ubifs_info *c, ino_key_init(c, &key, inum); err = ubifs_lookup_level0(c, &key, &znode, &n); if (!err) { - ubifs_err("inode %lu not found in index", (unsigned long)inum); + ubifs_err(c, "inode %lu not found in index", (unsigned long)inum); return ERR_PTR(-ENOENT); } else if (err < 0) { - ubifs_err("error %d while looking up inode %lu", + ubifs_err(c, "error %d while looking up inode %lu", err, (unsigned long)inum); return ERR_PTR(err); } zbr = &znode->zbranch[n]; if (zbr->len < UBIFS_INO_NODE_SZ) { - ubifs_err("bad node %lu node length %d", + ubifs_err(c, "bad node %lu node length %d", (unsigned long)inum, zbr->len); return ERR_PTR(-EINVAL); } @@ -1942,7 +1942,7 @@ static struct fsck_inode *read_add_inode(struct ubifs_info *c, err = ubifs_tnc_read_node(c, zbr, ino); if (err) { - ubifs_err("cannot read inode node at LEB %d:%d, error %d", + ubifs_err(c, "cannot read inode node at LEB %d:%d, error %d", zbr->lnum, zbr->offs, err); kfree(ino); return ERR_PTR(err); @@ -1951,7 +1951,7 @@ static struct fsck_inode *read_add_inode(struct ubifs_info *c, fscki = add_inode(c, fsckd, ino); kfree(ino); if (IS_ERR(fscki)) { - ubifs_err("error %ld while adding inode %lu node", + ubifs_err(c, "error %ld while adding inode %lu node", PTR_ERR(fscki), (unsigned long)inum); return fscki; } @@ -1985,7 +1985,7 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr, struct fsck_inode *fscki; if (zbr->len < UBIFS_CH_SZ) { - ubifs_err("bad leaf length %d (LEB %d:%d)", + ubifs_err(c, "bad leaf length %d (LEB %d:%d)", zbr->len, zbr->lnum, zbr->offs); return -EINVAL; } @@ -1996,7 +1996,7 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr, err = ubifs_tnc_read_node(c, zbr, node); if (err) { - ubifs_err("cannot read leaf node at LEB %d:%d, error %d", + ubifs_err(c, "cannot read leaf node at LEB %d:%d, error %d", zbr->lnum, zbr->offs, err); goto out_free; } @@ -2006,7 +2006,7 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr, fscki = add_inode(c, priv, node); if (IS_ERR(fscki)) { err = PTR_ERR(fscki); - ubifs_err("error %d while adding inode node", err); + ubifs_err(c, "error %d while adding inode node", err); goto out_dump; } goto out; @@ -2014,7 +2014,7 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr, if (type != UBIFS_DENT_KEY && type != UBIFS_XENT_KEY && type != UBIFS_DATA_KEY) { - ubifs_err("unexpected node type %d at LEB %d:%d", + ubifs_err(c, "unexpected node type %d at LEB %d:%d", type, zbr->lnum, zbr->offs); err = -EINVAL; goto out_free; @@ -2022,7 +2022,7 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr, ch = node; if (le64_to_cpu(ch->sqnum) > c->max_sqnum) { - ubifs_err("too high sequence number, max. is %llu", + ubifs_err(c, "too high sequence number, max. is %llu", c->max_sqnum); err = -EINVAL; goto out_dump; @@ -2042,7 +2042,7 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr, fscki = read_add_inode(c, priv, inum); if (IS_ERR(fscki)) { err = PTR_ERR(fscki); - ubifs_err("error %d while processing data node and trying to find inode node %lu", + ubifs_err(c, "error %d while processing data node and trying to find inode node %lu", err, (unsigned long)inum); goto out_dump; } @@ -2052,7 +2052,7 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr, blk_offs <<= UBIFS_BLOCK_SHIFT; blk_offs += le32_to_cpu(dn->size); if (blk_offs > fscki->size) { - ubifs_err("data node at LEB %d:%d is not within inode size %lld", + ubifs_err(c, "data node at LEB %d:%d is not within inode size %lld", zbr->lnum, zbr->offs, fscki->size); err = -EINVAL; goto out_dump; @@ -2076,7 +2076,7 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr, fscki = read_add_inode(c, priv, inum); if (IS_ERR(fscki)) { err = PTR_ERR(fscki); - ubifs_err("error %d while processing entry node and trying to find inode node %lu", + ubifs_err(c, "error %d while processing entry node and trying to find inode node %lu", err, (unsigned long)inum); goto out_dump; } @@ -2088,7 +2088,7 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr, fscki1 = read_add_inode(c, priv, inum); if (IS_ERR(fscki1)) { err = PTR_ERR(fscki1); - ubifs_err("error %d while processing entry node and trying to find parent inode node %lu", + ubifs_err(c, "error %d while processing entry node and trying to find parent inode node %lu", err, (unsigned long)inum); goto out_dump; } @@ -2111,7 +2111,7 @@ out: return 0; out_dump: - ubifs_msg("dump of node at LEB %d:%d", zbr->lnum, zbr->offs); + ubifs_msg(c, "dump of node at LEB %d:%d", zbr->lnum, zbr->offs); ubifs_dump_node(c, node); out_free: kfree(node); @@ -2162,52 +2162,52 @@ static int check_inodes(struct ubifs_info *c, struct fsck_data *fsckd) */ if (fscki->inum != UBIFS_ROOT_INO && fscki->references != 1) { - ubifs_err("directory inode %lu has %d direntries which refer it, but should be 1", + ubifs_err(c, "directory inode %lu has %d direntries which refer it, but should be 1", (unsigned long)fscki->inum, fscki->references); goto out_dump; } if (fscki->inum == UBIFS_ROOT_INO && fscki->references != 0) { - ubifs_err("root inode %lu has non-zero (%d) direntries which refer it", + ubifs_err(c, "root inode %lu has non-zero (%d) direntries which refer it", (unsigned long)fscki->inum, fscki->references); goto out_dump; } if (fscki->calc_sz != fscki->size) { - ubifs_err("directory inode %lu size is %lld, but calculated size is %lld", + ubifs_err(c, "directory inode %lu size is %lld, but calculated size is %lld", (unsigned long)fscki->inum, fscki->size, fscki->calc_sz); goto out_dump; } if (fscki->calc_cnt != fscki->nlink) { - ubifs_err("directory inode %lu nlink is %d, but calculated nlink is %d", + ubifs_err(c, "directory inode %lu nlink is %d, but calculated nlink is %d", (unsigned long)fscki->inum, fscki->nlink, fscki->calc_cnt); goto out_dump; } } else { if (fscki->references != fscki->nlink) { - ubifs_err("inode %lu nlink is %d, but calculated nlink is %d", + ubifs_err(c, "inode %lu nlink is %d, but calculated nlink is %d", (unsigned long)fscki->inum, fscki->nlink, fscki->references); goto out_dump; } } if (fscki->xattr_sz != fscki->calc_xsz) { - ubifs_err("inode %lu has xattr size %u, but calculated size is %lld", + ubifs_err(c, "inode %lu has xattr size %u, but calculated size is %lld", (unsigned long)fscki->inum, fscki->xattr_sz, fscki->calc_xsz); goto out_dump; } if (fscki->xattr_cnt != fscki->calc_xcnt) { - ubifs_err("inode %lu has %u xattrs, but calculated count is %lld", + ubifs_err(c, "inode %lu has %u xattrs, but calculated count is %lld", (unsigned long)fscki->inum, fscki->xattr_cnt, fscki->calc_xcnt); goto out_dump; } if (fscki->xattr_nms != fscki->calc_xnms) { - ubifs_err("inode %lu has xattr names' size %u, but calculated names' size is %lld", + ubifs_err(c, "inode %lu has xattr names' size %u, but calculated names' size is %lld", (unsigned long)fscki->inum, fscki->xattr_nms, fscki->calc_xnms); goto out_dump; @@ -2221,11 +2221,11 @@ out_dump: ino_key_init(c, &key, fscki->inum); err = ubifs_lookup_level0(c, &key, &znode, &n); if (!err) { - ubifs_err("inode %lu not found in index", + ubifs_err(c, "inode %lu not found in index", (unsigned long)fscki->inum); return -ENOENT; } else if (err < 0) { - ubifs_err("error %d while looking up inode %lu", + ubifs_err(c, "error %d while looking up inode %lu", err, (unsigned long)fscki->inum); return err; } @@ -2237,13 +2237,13 @@ out_dump: err = ubifs_tnc_read_node(c, zbr, ino); if (err) { - ubifs_err("cannot read inode node at LEB %d:%d, error %d", + ubifs_err(c, "cannot read inode node at LEB %d:%d, error %d", zbr->lnum, zbr->offs, err); kfree(ino); return err; } - ubifs_msg("dump of the inode %lu sitting in LEB %d:%d", + ubifs_msg(c, "dump of the inode %lu sitting in LEB %d:%d", (unsigned long)fscki->inum, zbr->lnum, zbr->offs); ubifs_dump_node(c, ino); kfree(ino); @@ -2284,7 +2284,7 @@ int dbg_check_filesystem(struct ubifs_info *c) return 0; out_free: - ubifs_err("file-system check failed with error %d", err); + ubifs_err(c, "file-system check failed with error %d", err); dump_stack(); free_inodes(&fsckd); return err; @@ -2315,12 +2315,12 @@ int dbg_check_data_nodes_order(struct ubifs_info *c, struct list_head *head) sb = container_of(cur->next, struct ubifs_scan_node, list); if (sa->type != UBIFS_DATA_NODE) { - ubifs_err("bad node type %d", sa->type); + ubifs_err(c, "bad node type %d", sa->type); ubifs_dump_node(c, sa->node); return -EINVAL; } if (sb->type != UBIFS_DATA_NODE) { - ubifs_err("bad node type %d", sb->type); + ubifs_err(c, "bad node type %d", sb->type); ubifs_dump_node(c, sb->node); return -EINVAL; } @@ -2331,7 +2331,7 @@ int dbg_check_data_nodes_order(struct ubifs_info *c, struct list_head *head) if (inuma < inumb) continue; if (inuma > inumb) { - ubifs_err("larger inum %lu goes before inum %lu", + ubifs_err(c, "larger inum %lu goes before inum %lu", (unsigned long)inuma, (unsigned long)inumb); goto error_dump; } @@ -2340,11 +2340,11 @@ int dbg_check_data_nodes_order(struct ubifs_info *c, struct list_head *head) blkb = key_block(c, &sb->key); if (blka > blkb) { - ubifs_err("larger block %u goes before %u", blka, blkb); + ubifs_err(c, "larger block %u goes before %u", blka, blkb); goto error_dump; } if (blka == blkb) { - ubifs_err("two data nodes for the same block"); + ubifs_err(c, "two data nodes for the same block"); goto error_dump; } } @@ -2383,19 +2383,19 @@ int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head) if (sa->type != UBIFS_INO_NODE && sa->type != UBIFS_DENT_NODE && sa->type != UBIFS_XENT_NODE) { - ubifs_err("bad node type %d", sa->type); + ubifs_err(c, "bad node type %d", sa->type); ubifs_dump_node(c, sa->node); return -EINVAL; } if (sa->type != UBIFS_INO_NODE && sa->type != UBIFS_DENT_NODE && sa->type != UBIFS_XENT_NODE) { - ubifs_err("bad node type %d", sb->type); + ubifs_err(c, "bad node type %d", sb->type); ubifs_dump_node(c, sb->node); return -EINVAL; } if (sa->type != UBIFS_INO_NODE && sb->type == UBIFS_INO_NODE) { - ubifs_err("non-inode node goes before inode node"); + ubifs_err(c, "non-inode node goes before inode node"); goto error_dump; } @@ -2405,7 +2405,7 @@ int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head) if (sa->type == UBIFS_INO_NODE && sb->type == UBIFS_INO_NODE) { /* Inode nodes are sorted in descending size order */ if (sa->len < sb->len) { - ubifs_err("smaller inode node goes first"); + ubifs_err(c, "smaller inode node goes first"); goto error_dump; } continue; @@ -2421,7 +2421,7 @@ int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head) if (inuma < inumb) continue; if (inuma > inumb) { - ubifs_err("larger inum %lu goes before inum %lu", + ubifs_err(c, "larger inum %lu goes before inum %lu", (unsigned long)inuma, (unsigned long)inumb); goto error_dump; } @@ -2430,7 +2430,7 @@ int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head) hashb = key_block(c, &sb->key); if (hasha > hashb) { - ubifs_err("larger hash %u goes before %u", + ubifs_err(c, "larger hash %u goes before %u", hasha, hashb); goto error_dump; } @@ -2439,9 +2439,9 @@ int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head) return 0; error_dump: - ubifs_msg("dumping first node"); + ubifs_msg(c, "dumping first node"); ubifs_dump_node(c, sa->node); - ubifs_msg("dumping second node"); + ubifs_msg(c, "dumping second node"); ubifs_dump_node(c, sb->node); return -EINVAL; return 0; @@ -2470,13 +2470,13 @@ static int power_cut_emulated(struct ubifs_info *c, int lnum, int write) delay = prandom_u32() % 60000; d->pc_timeout = jiffies; d->pc_timeout += msecs_to_jiffies(delay); - ubifs_warn("failing after %lums", delay); + ubifs_warn(c, "failing after %lums", delay); } else { d->pc_delay = 2; delay = prandom_u32() % 10000; /* Fail within 10000 operations */ d->pc_cnt_max = delay; - ubifs_warn("failing after %lu calls", delay); + ubifs_warn(c, "failing after %lu calls", delay); } } @@ -2494,55 +2494,55 @@ static int power_cut_emulated(struct ubifs_info *c, int lnum, int write) return 0; if (chance(19, 20)) return 0; - ubifs_warn("failing in super block LEB %d", lnum); + ubifs_warn(c, "failing in super block LEB %d", lnum); } else if (lnum == UBIFS_MST_LNUM || lnum == UBIFS_MST_LNUM + 1) { if (chance(19, 20)) return 0; - ubifs_warn("failing in master LEB %d", lnum); + ubifs_warn(c, "failing in master LEB %d", lnum); } else if (lnum >= UBIFS_LOG_LNUM && lnum <= c->log_last) { if (write && chance(99, 100)) return 0; if (chance(399, 400)) return 0; - ubifs_warn("failing in log LEB %d", lnum); + ubifs_warn(c, "failing in log LEB %d", lnum); } else if (lnum >= c->lpt_first && lnum <= c->lpt_last) { if (write && chance(7, 8)) return 0; if (chance(19, 20)) return 0; - ubifs_warn("failing in LPT LEB %d", lnum); + ubifs_warn(c, "failing in LPT LEB %d", lnum); } else if (lnum >= c->orph_first && lnum <= c->orph_last) { if (write && chance(1, 2)) return 0; if (chance(9, 10)) return 0; - ubifs_warn("failing in orphan LEB %d", lnum); + ubifs_warn(c, "failing in orphan LEB %d", lnum); } else if (lnum == c->ihead_lnum) { if (chance(99, 100)) return 0; - ubifs_warn("failing in index head LEB %d", lnum); + ubifs_warn(c, "failing in index head LEB %d", lnum); } else if (c->jheads && lnum == c->jheads[GCHD].wbuf.lnum) { if (chance(9, 10)) return 0; - ubifs_warn("failing in GC head LEB %d", lnum); + ubifs_warn(c, "failing in GC head LEB %d", lnum); } else if (write && !RB_EMPTY_ROOT(&c->buds) && !ubifs_search_bud(c, lnum)) { if (chance(19, 20)) return 0; - ubifs_warn("failing in non-bud LEB %d", lnum); + ubifs_warn(c, "failing in non-bud LEB %d", lnum); } else if (c->cmt_state == COMMIT_RUNNING_BACKGROUND || c->cmt_state == COMMIT_RUNNING_REQUIRED) { if (chance(999, 1000)) return 0; - ubifs_warn("failing in bud LEB %d commit running", lnum); + ubifs_warn(c, "failing in bud LEB %d commit running", lnum); } else { if (chance(9999, 10000)) return 0; - ubifs_warn("failing in bud LEB %d commit not running", lnum); + ubifs_warn(c, "failing in bud LEB %d commit not running", lnum); } d->pc_happened = 1; - ubifs_warn("========== Power cut emulated =========="); + ubifs_warn(c, "========== Power cut emulated =========="); dump_stack(); return 1; } @@ -2557,7 +2557,7 @@ static int corrupt_data(const struct ubifs_info *c, const void *buf, /* Corruption span max to end of write unit */ to = min(len, ALIGN(from + 1, c->max_write_size)); - ubifs_warn("filled bytes %u-%u with %s", from, to - 1, + ubifs_warn(c, "filled bytes %u-%u with %s", from, to - 1, ffs ? "0xFFs" : "random data"); if (ffs) @@ -2579,7 +2579,7 @@ int dbg_leb_write(struct ubifs_info *c, int lnum, const void *buf, failing = power_cut_emulated(c, lnum, 1); if (failing) { len = corrupt_data(c, buf, len); - ubifs_warn("actually write %d bytes to LEB %d:%d (the buffer was corrupted)", + ubifs_warn(c, "actually write %d bytes to LEB %d:%d (the buffer was corrupted)", len, lnum, offs); } err = ubi_leb_write(c->ubi, lnum, buf, offs, len); @@ -2909,7 +2909,7 @@ out_remove: debugfs_remove_recursive(d->dfs_dir); out: err = dent ? PTR_ERR(dent) : -ENODEV; - ubifs_err("cannot create \"%s\" debugfs file or directory, error %d\n", + ubifs_err(c, "cannot create \"%s\" debugfs file or directory, error %d\n", fname, err); return err; } @@ -3063,8 +3063,8 @@ out_remove: debugfs_remove_recursive(dfs_rootdir); out: err = dent ? PTR_ERR(dent) : -ENODEV; - ubifs_err("cannot create \"%s\" debugfs file or directory, error %d\n", - fname, err); + pr_err("UBIFS error (pid %d): cannot create \"%s\" debugfs file or directory, error %d\n", + current->pid, fname, err); return err; } diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index 0fa6c80..27060fc 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c @@ -146,12 +146,12 @@ struct inode *ubifs_new_inode(struct ubifs_info *c, const struct inode *dir, if (c->highest_inum >= INUM_WARN_WATERMARK) { if (c->highest_inum >= INUM_WATERMARK) { spin_unlock(&c->cnt_lock); - ubifs_err("out of inode numbers"); + ubifs_err(c, "out of inode numbers"); make_bad_inode(inode); iput(inode); return ERR_PTR(-EINVAL); } - ubifs_warn("running out of inode numbers (current %lu, max %d)", + ubifs_warn(c, "running out of inode numbers (current %lu, max %u)", (unsigned long)c->highest_inum, INUM_WATERMARK); } @@ -222,7 +222,7 @@ static struct dentry *ubifs_lookup(struct inode *dir, struct dentry *dentry, * checking. */ err = PTR_ERR(inode); - ubifs_err("dead directory entry '%pd', error %d", + ubifs_err(c, "dead directory entry '%pd', error %d", dentry, err); ubifs_ro_mode(c, err); goto out; @@ -272,7 +272,7 @@ static int ubifs_create(struct inode *dir, struct dentry *dentry, umode_t mode, err = ubifs_init_security(dir, inode, &dentry->d_name); if (err) - goto out_cancel; + goto out_inode; mutex_lock(&dir_ui->ui_mutex); dir->i_size += sz_change; @@ -292,11 +292,12 @@ out_cancel: dir->i_size -= sz_change; dir_ui->ui_size = dir->i_size; mutex_unlock(&dir_ui->ui_mutex); +out_inode: make_bad_inode(inode); iput(inode); out_budg: ubifs_release_budget(c, &req); - ubifs_err("cannot create regular file, error %d", err); + ubifs_err(c, "cannot create regular file, error %d", err); return err; } @@ -449,7 +450,7 @@ static int ubifs_readdir(struct file *file, struct dir_context *ctx) out: if (err != -ENOENT) { - ubifs_err("cannot find next direntry, error %d", err); + ubifs_err(c, "cannot find next direntry, error %d", err); return err; } @@ -498,7 +499,7 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct ubifs_info *c = dir->i_sb->s_fs_info; - struct inode *inode = old_dentry->d_inode; + struct inode *inode = d_inode(old_dentry); struct ubifs_inode *ui = ubifs_inode(inode); struct ubifs_inode *dir_ui = ubifs_inode(dir); int err, sz_change = CALC_DENT_SIZE(dentry->d_name.len); @@ -553,7 +554,7 @@ out_cancel: static int ubifs_unlink(struct inode *dir, struct dentry *dentry) { struct ubifs_info *c = dir->i_sb->s_fs_info; - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct ubifs_inode *dir_ui = ubifs_inode(dir); int sz_change = CALC_DENT_SIZE(dentry->d_name.len); int err, budgeted = 1; @@ -645,7 +646,7 @@ static int check_dir_empty(struct ubifs_info *c, struct inode *dir) static int ubifs_rmdir(struct inode *dir, struct dentry *dentry) { struct ubifs_info *c = dir->i_sb->s_fs_info; - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); int sz_change = CALC_DENT_SIZE(dentry->d_name.len); int err, budgeted = 1; struct ubifs_inode *dir_ui = ubifs_inode(dir); @@ -661,7 +662,7 @@ static int ubifs_rmdir(struct inode *dir, struct dentry *dentry) inode->i_ino, dir->i_ino); ubifs_assert(mutex_is_locked(&dir->i_mutex)); ubifs_assert(mutex_is_locked(&inode->i_mutex)); - err = check_dir_empty(c, dentry->d_inode); + err = check_dir_empty(c, d_inode(dentry)); if (err) return err; @@ -732,7 +733,7 @@ static int ubifs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) err = ubifs_init_security(dir, inode, &dentry->d_name); if (err) - goto out_cancel; + goto out_inode; mutex_lock(&dir_ui->ui_mutex); insert_inode_hash(inode); @@ -743,7 +744,7 @@ static int ubifs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) dir->i_mtime = dir->i_ctime = inode->i_ctime; err = ubifs_jnl_update(c, dir, &dentry->d_name, inode, 0, 0); if (err) { - ubifs_err("cannot create directory, error %d", err); + ubifs_err(c, "cannot create directory, error %d", err); goto out_cancel; } mutex_unlock(&dir_ui->ui_mutex); @@ -757,6 +758,7 @@ out_cancel: dir_ui->ui_size = dir->i_size; drop_nlink(dir); mutex_unlock(&dir_ui->ui_mutex); +out_inode: make_bad_inode(inode); iput(inode); out_budg: @@ -816,7 +818,7 @@ static int ubifs_mknod(struct inode *dir, struct dentry *dentry, err = ubifs_init_security(dir, inode, &dentry->d_name); if (err) - goto out_cancel; + goto out_inode; mutex_lock(&dir_ui->ui_mutex); dir->i_size += sz_change; @@ -836,6 +838,7 @@ out_cancel: dir->i_size -= sz_change; dir_ui->ui_size = dir->i_size; mutex_unlock(&dir_ui->ui_mutex); +out_inode: make_bad_inode(inode); iput(inode); out_budg: @@ -896,7 +899,7 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry, err = ubifs_init_security(dir, inode, &dentry->d_name); if (err) - goto out_cancel; + goto out_inode; mutex_lock(&dir_ui->ui_mutex); dir->i_size += sz_change; @@ -967,8 +970,8 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct ubifs_info *c = old_dir->i_sb->s_fs_info; - struct inode *old_inode = old_dentry->d_inode; - struct inode *new_inode = new_dentry->d_inode; + struct inode *old_inode = d_inode(old_dentry); + struct inode *new_inode = d_inode(new_dentry); struct ubifs_inode *old_inode_ui = ubifs_inode(old_inode); int err, release, sync = 0, move = (new_dir != old_dir); int is_dir = S_ISDIR(old_inode->i_mode); @@ -1133,7 +1136,7 @@ int ubifs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { loff_t size; - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct ubifs_inode *ui = ubifs_inode(inode); mutex_lock(&ui->ui_mutex); diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index e627c0a..35efc10 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -50,7 +50,6 @@ */ #include "ubifs.h" -#include <linux/aio.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/slab.h> @@ -80,7 +79,7 @@ static int read_block(struct inode *inode, void *addr, unsigned int block, dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ; out_len = UBIFS_BLOCK_SIZE; - err = ubifs_decompress(&dn->data, dlen, addr, &out_len, + err = ubifs_decompress(c, &dn->data, dlen, addr, &out_len, le16_to_cpu(dn->compr_type)); if (err || len != out_len) goto dump; @@ -96,7 +95,7 @@ static int read_block(struct inode *inode, void *addr, unsigned int block, return 0; dump: - ubifs_err("bad data node (block %u, inode %lu)", + ubifs_err(c, "bad data node (block %u, inode %lu)", block, inode->i_ino); ubifs_dump_node(c, dn); return -EINVAL; @@ -161,13 +160,14 @@ static int do_readpage(struct page *page) addr += UBIFS_BLOCK_SIZE; } if (err) { + struct ubifs_info *c = inode->i_sb->s_fs_info; if (err == -ENOENT) { /* Not found, so it must be a hole */ SetPageChecked(page); dbg_gen("hole"); goto out_free; } - ubifs_err("cannot read page %lu of inode %lu, error %d", + ubifs_err(c, "cannot read page %lu of inode %lu, error %d", page->index, inode->i_ino, err); goto error; } @@ -650,7 +650,7 @@ static int populate_page(struct ubifs_info *c, struct page *page, dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ; out_len = UBIFS_BLOCK_SIZE; - err = ubifs_decompress(&dn->data, dlen, addr, &out_len, + err = ubifs_decompress(c, &dn->data, dlen, addr, &out_len, le16_to_cpu(dn->compr_type)); if (err || len != out_len) goto out_err; @@ -698,7 +698,7 @@ out_err: SetPageError(page); flush_dcache_page(page); kunmap(page); - ubifs_err("bad data node (block %u, inode %lu)", + ubifs_err(c, "bad data node (block %u, inode %lu)", page_block, inode->i_ino); return -EINVAL; } @@ -802,7 +802,7 @@ out_free: return ret; out_warn: - ubifs_warn("ignoring error %d and skipping bulk-read", err); + ubifs_warn(c, "ignoring error %d and skipping bulk-read", err); goto out_free; out_bu_off: @@ -930,7 +930,7 @@ static int do_writepage(struct page *page, int len) } if (err) { SetPageError(page); - ubifs_err("cannot write page %lu of inode %lu, error %d", + ubifs_err(c, "cannot write page %lu of inode %lu, error %d", page->index, inode->i_ino, err); ubifs_ro_mode(c, err); } @@ -1257,7 +1257,7 @@ static int do_setattr(struct ubifs_info *c, struct inode *inode, int ubifs_setattr(struct dentry *dentry, struct iattr *attr) { int err; - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct ubifs_info *c = inode->i_sb->s_fs_info; dbg_gen("ino %lu, mode %#x, ia_valid %#x", @@ -1302,7 +1302,7 @@ static void ubifs_invalidatepage(struct page *page, unsigned int offset, static void *ubifs_follow_link(struct dentry *dentry, struct nameidata *nd) { - struct ubifs_inode *ui = ubifs_inode(dentry->d_inode); + struct ubifs_inode *ui = ubifs_inode(d_inode(dentry)); nd_set_link(nd, ui->data); return NULL; @@ -1485,7 +1485,7 @@ static int ubifs_vm_page_mkwrite(struct vm_area_struct *vma, err = ubifs_budget_space(c, &req); if (unlikely(err)) { if (err == -ENOSPC) - ubifs_warn("out of space for mmapped file (inode number %lu)", + ubifs_warn(c, "out of space for mmapped file (inode number %lu)", inode->i_ino); return VM_FAULT_SIGBUS; } @@ -1581,8 +1581,6 @@ const struct inode_operations ubifs_symlink_inode_operations = { const struct file_operations ubifs_file_operations = { .llseek = generic_file_llseek, - .read = new_sync_read, - .write = new_sync_write, .read_iter = generic_file_read_iter, .write_iter = ubifs_write_iter, .mmap = ubifs_file_mmap, diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c index fb08b0c..97be412 100644 --- a/fs/ubifs/io.c +++ b/fs/ubifs/io.c @@ -85,7 +85,7 @@ void ubifs_ro_mode(struct ubifs_info *c, int err) c->ro_error = 1; c->no_chk_data_crc = 0; c->vfs_sb->s_flags |= MS_RDONLY; - ubifs_warn("switched to read-only mode, error %d", err); + ubifs_warn(c, "switched to read-only mode, error %d", err); dump_stack(); } } @@ -107,7 +107,7 @@ int ubifs_leb_read(const struct ubifs_info *c, int lnum, void *buf, int offs, * @even_ebadmsg is true. */ if (err && (err != -EBADMSG || even_ebadmsg)) { - ubifs_err("reading %d bytes from LEB %d:%d failed, error %d", + ubifs_err(c, "reading %d bytes from LEB %d:%d failed, error %d", len, lnum, offs, err); dump_stack(); } @@ -127,7 +127,7 @@ int ubifs_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs, else err = dbg_leb_write(c, lnum, buf, offs, len); if (err) { - ubifs_err("writing %d bytes to LEB %d:%d failed, error %d", + ubifs_err(c, "writing %d bytes to LEB %d:%d failed, error %d", len, lnum, offs, err); ubifs_ro_mode(c, err); dump_stack(); @@ -147,7 +147,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len) else err = dbg_leb_change(c, lnum, buf, len); if (err) { - ubifs_err("changing %d bytes in LEB %d failed, error %d", + ubifs_err(c, "changing %d bytes in LEB %d failed, error %d", len, lnum, err); ubifs_ro_mode(c, err); dump_stack(); @@ -167,7 +167,7 @@ int ubifs_leb_unmap(struct ubifs_info *c, int lnum) else err = dbg_leb_unmap(c, lnum); if (err) { - ubifs_err("unmap LEB %d failed, error %d", lnum, err); + ubifs_err(c, "unmap LEB %d failed, error %d", lnum, err); ubifs_ro_mode(c, err); dump_stack(); } @@ -186,7 +186,7 @@ int ubifs_leb_map(struct ubifs_info *c, int lnum) else err = dbg_leb_map(c, lnum); if (err) { - ubifs_err("mapping LEB %d failed, error %d", lnum, err); + ubifs_err(c, "mapping LEB %d failed, error %d", lnum, err); ubifs_ro_mode(c, err); dump_stack(); } @@ -199,7 +199,7 @@ int ubifs_is_mapped(const struct ubifs_info *c, int lnum) err = ubi_is_mapped(c->ubi, lnum); if (err < 0) { - ubifs_err("ubi_is_mapped failed for LEB %d, error %d", + ubifs_err(c, "ubi_is_mapped failed for LEB %d, error %d", lnum, err); dump_stack(); } @@ -247,7 +247,7 @@ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, magic = le32_to_cpu(ch->magic); if (magic != UBIFS_NODE_MAGIC) { if (!quiet) - ubifs_err("bad magic %#08x, expected %#08x", + ubifs_err(c, "bad magic %#08x, expected %#08x", magic, UBIFS_NODE_MAGIC); err = -EUCLEAN; goto out; @@ -256,7 +256,7 @@ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, type = ch->node_type; if (type < 0 || type >= UBIFS_NODE_TYPES_CNT) { if (!quiet) - ubifs_err("bad node type %d", type); + ubifs_err(c, "bad node type %d", type); goto out; } @@ -279,7 +279,7 @@ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, node_crc = le32_to_cpu(ch->crc); if (crc != node_crc) { if (!quiet) - ubifs_err("bad CRC: calculated %#08x, read %#08x", + ubifs_err(c, "bad CRC: calculated %#08x, read %#08x", crc, node_crc); err = -EUCLEAN; goto out; @@ -289,10 +289,10 @@ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, out_len: if (!quiet) - ubifs_err("bad node length %d", node_len); + ubifs_err(c, "bad node length %d", node_len); out: if (!quiet) { - ubifs_err("bad node at LEB %d:%d", lnum, offs); + ubifs_err(c, "bad node at LEB %d:%d", lnum, offs); ubifs_dump_node(c, buf); dump_stack(); } @@ -355,11 +355,11 @@ static unsigned long long next_sqnum(struct ubifs_info *c) if (unlikely(sqnum >= SQNUM_WARN_WATERMARK)) { if (sqnum >= SQNUM_WATERMARK) { - ubifs_err("sequence number overflow %llu, end of life", + ubifs_err(c, "sequence number overflow %llu, end of life", sqnum); ubifs_ro_mode(c, -EINVAL); } - ubifs_warn("running out of sequence numbers, end of life soon"); + ubifs_warn(c, "running out of sequence numbers, end of life soon"); } return sqnum; @@ -636,7 +636,7 @@ int ubifs_bg_wbufs_sync(struct ubifs_info *c) err = ubifs_wbuf_sync_nolock(wbuf); mutex_unlock(&wbuf->io_mutex); if (err) { - ubifs_err("cannot sync write-buffer, error %d", err); + ubifs_err(c, "cannot sync write-buffer, error %d", err); ubifs_ro_mode(c, err); goto out_timers; } @@ -833,7 +833,7 @@ exit: return 0; out: - ubifs_err("cannot write %d bytes to LEB %d:%d, error %d", + ubifs_err(c, "cannot write %d bytes to LEB %d:%d, error %d", len, wbuf->lnum, wbuf->offs, err); ubifs_dump_node(c, buf); dump_stack(); @@ -932,27 +932,27 @@ int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len, } if (type != ch->node_type) { - ubifs_err("bad node type (%d but expected %d)", + ubifs_err(c, "bad node type (%d but expected %d)", ch->node_type, type); goto out; } err = ubifs_check_node(c, buf, lnum, offs, 0, 0); if (err) { - ubifs_err("expected node type %d", type); + ubifs_err(c, "expected node type %d", type); return err; } rlen = le32_to_cpu(ch->len); if (rlen != len) { - ubifs_err("bad node length %d, expected %d", rlen, len); + ubifs_err(c, "bad node length %d, expected %d", rlen, len); goto out; } return 0; out: - ubifs_err("bad node at LEB %d:%d", lnum, offs); + ubifs_err(c, "bad node at LEB %d:%d", lnum, offs); ubifs_dump_node(c, buf); dump_stack(); return -EINVAL; diff --git a/fs/ubifs/ioctl.c b/fs/ubifs/ioctl.c index 648b143..3c7b29d 100644 --- a/fs/ubifs/ioctl.c +++ b/fs/ubifs/ioctl.c @@ -138,7 +138,7 @@ static int setflags(struct inode *inode, int flags) return err; out_unlock: - ubifs_err("can't modify inode %lu attributes", inode->i_ino); + ubifs_err(c, "can't modify inode %lu attributes", inode->i_ino); mutex_unlock(&ui->ui_mutex); ubifs_release_budget(c, &req); return err; diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c index f6ac3f2..0b9da5b 100644 --- a/fs/ubifs/journal.c +++ b/fs/ubifs/journal.c @@ -363,11 +363,11 @@ again: * This should not happen unless the journal size limitations * are too tough. */ - ubifs_err("stuck in space allocation"); + ubifs_err(c, "stuck in space allocation"); err = -ENOSPC; goto out; } else if (cmt_retries > 32) - ubifs_warn("too many space allocation re-tries (%d)", + ubifs_warn(c, "too many space allocation re-tries (%d)", cmt_retries); dbg_jnl("-EAGAIN, commit and retry (retried %d times)", @@ -380,7 +380,7 @@ again: goto again; out: - ubifs_err("cannot reserve %d bytes in jhead %d, error %d", + ubifs_err(c, "cannot reserve %d bytes in jhead %d, error %d", len, jhead, err); if (err == -ENOSPC) { /* This are some budgeting problems, print useful information */ @@ -731,7 +731,7 @@ int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode, compr_type = ui->compr_type; out_len = dlen - UBIFS_DATA_NODE_SZ; - ubifs_compress(buf, len, &data->data, &out_len, &compr_type); + ubifs_compress(c, buf, len, &data->data, &out_len, &compr_type); ubifs_assert(out_len <= UBIFS_BLOCK_SIZE); dlen = UBIFS_DATA_NODE_SZ + out_len; @@ -930,8 +930,8 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir, union ubifs_key key; struct ubifs_dent_node *dent, *dent2; int err, dlen1, dlen2, ilen, lnum, offs, len; - const struct inode *old_inode = old_dentry->d_inode; - const struct inode *new_inode = new_dentry->d_inode; + const struct inode *old_inode = d_inode(old_dentry); + const struct inode *new_inode = d_inode(new_dentry); int aligned_dlen1, aligned_dlen2, plen = UBIFS_INO_NODE_SZ; int last_reference = !!(new_inode && new_inode->i_nlink == 0); int move = (old_dir != new_dir); @@ -1100,7 +1100,8 @@ out_free: * This function is used when an inode is truncated and the last data node of * the inode has to be re-compressed and re-written. */ -static int recomp_data_node(struct ubifs_data_node *dn, int *new_len) +static int recomp_data_node(const struct ubifs_info *c, + struct ubifs_data_node *dn, int *new_len) { void *buf; int err, len, compr_type, out_len; @@ -1112,11 +1113,11 @@ static int recomp_data_node(struct ubifs_data_node *dn, int *new_len) len = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ; compr_type = le16_to_cpu(dn->compr_type); - err = ubifs_decompress(&dn->data, len, buf, &out_len, compr_type); + err = ubifs_decompress(c, &dn->data, len, buf, &out_len, compr_type); if (err) goto out; - ubifs_compress(buf, *new_len, &dn->data, &out_len, &compr_type); + ubifs_compress(c, buf, *new_len, &dn->data, &out_len, &compr_type); ubifs_assert(out_len <= UBIFS_BLOCK_SIZE); dn->compr_type = cpu_to_le16(compr_type); dn->size = cpu_to_le32(*new_len); @@ -1191,7 +1192,7 @@ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode, int compr_type = le16_to_cpu(dn->compr_type); if (compr_type != UBIFS_COMPR_NONE) { - err = recomp_data_node(dn, &dlen); + err = recomp_data_node(c, dn, &dlen); if (err) goto out_free; } else { diff --git a/fs/ubifs/log.c b/fs/ubifs/log.c index c14628f..8c795e6 100644 --- a/fs/ubifs/log.c +++ b/fs/ubifs/log.c @@ -696,7 +696,7 @@ int ubifs_consolidate_log(struct ubifs_info *c) destroy_done_tree(&done_tree); vfree(buf); if (write_lnum == c->lhead_lnum) { - ubifs_err("log is too full"); + ubifs_err(c, "log is too full"); return -EINVAL; } /* Unmap remaining LEBs */ @@ -743,7 +743,7 @@ static int dbg_check_bud_bytes(struct ubifs_info *c) bud_bytes += c->leb_size - bud->start; if (c->bud_bytes != bud_bytes) { - ubifs_err("bad bud_bytes %lld, calculated %lld", + ubifs_err(c, "bad bud_bytes %lld, calculated %lld", c->bud_bytes, bud_bytes); err = -EINVAL; } diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c index 46190a7..a0011aa 100644 --- a/fs/ubifs/lprops.c +++ b/fs/ubifs/lprops.c @@ -682,7 +682,7 @@ int ubifs_change_one_lp(struct ubifs_info *c, int lnum, int free, int dirty, out: ubifs_release_lprops(c); if (err) - ubifs_err("cannot change properties of LEB %d, error %d", + ubifs_err(c, "cannot change properties of LEB %d, error %d", lnum, err); return err; } @@ -721,7 +721,7 @@ int ubifs_update_one_lp(struct ubifs_info *c, int lnum, int free, int dirty, out: ubifs_release_lprops(c); if (err) - ubifs_err("cannot update properties of LEB %d, error %d", + ubifs_err(c, "cannot update properties of LEB %d, error %d", lnum, err); return err; } @@ -746,7 +746,7 @@ int ubifs_read_one_lp(struct ubifs_info *c, int lnum, struct ubifs_lprops *lp) lpp = ubifs_lpt_lookup(c, lnum); if (IS_ERR(lpp)) { err = PTR_ERR(lpp); - ubifs_err("cannot read properties of LEB %d, error %d", + ubifs_err(c, "cannot read properties of LEB %d, error %d", lnum, err); goto out; } @@ -873,13 +873,13 @@ int dbg_check_cats(struct ubifs_info *c) list_for_each_entry(lprops, &c->empty_list, list) { if (lprops->free != c->leb_size) { - ubifs_err("non-empty LEB %d on empty list (free %d dirty %d flags %d)", + ubifs_err(c, "non-empty LEB %d on empty list (free %d dirty %d flags %d)", lprops->lnum, lprops->free, lprops->dirty, lprops->flags); return -EINVAL; } if (lprops->flags & LPROPS_TAKEN) { - ubifs_err("taken LEB %d on empty list (free %d dirty %d flags %d)", + ubifs_err(c, "taken LEB %d on empty list (free %d dirty %d flags %d)", lprops->lnum, lprops->free, lprops->dirty, lprops->flags); return -EINVAL; @@ -889,13 +889,13 @@ int dbg_check_cats(struct ubifs_info *c) i = 0; list_for_each_entry(lprops, &c->freeable_list, list) { if (lprops->free + lprops->dirty != c->leb_size) { - ubifs_err("non-freeable LEB %d on freeable list (free %d dirty %d flags %d)", + ubifs_err(c, "non-freeable LEB %d on freeable list (free %d dirty %d flags %d)", lprops->lnum, lprops->free, lprops->dirty, lprops->flags); return -EINVAL; } if (lprops->flags & LPROPS_TAKEN) { - ubifs_err("taken LEB %d on freeable list (free %d dirty %d flags %d)", + ubifs_err(c, "taken LEB %d on freeable list (free %d dirty %d flags %d)", lprops->lnum, lprops->free, lprops->dirty, lprops->flags); return -EINVAL; @@ -903,7 +903,7 @@ int dbg_check_cats(struct ubifs_info *c) i += 1; } if (i != c->freeable_cnt) { - ubifs_err("freeable list count %d expected %d", i, + ubifs_err(c, "freeable list count %d expected %d", i, c->freeable_cnt); return -EINVAL; } @@ -912,26 +912,26 @@ int dbg_check_cats(struct ubifs_info *c) list_for_each(pos, &c->idx_gc) i += 1; if (i != c->idx_gc_cnt) { - ubifs_err("idx_gc list count %d expected %d", i, + ubifs_err(c, "idx_gc list count %d expected %d", i, c->idx_gc_cnt); return -EINVAL; } list_for_each_entry(lprops, &c->frdi_idx_list, list) { if (lprops->free + lprops->dirty != c->leb_size) { - ubifs_err("non-freeable LEB %d on frdi_idx list (free %d dirty %d flags %d)", + ubifs_err(c, "non-freeable LEB %d on frdi_idx list (free %d dirty %d flags %d)", lprops->lnum, lprops->free, lprops->dirty, lprops->flags); return -EINVAL; } if (lprops->flags & LPROPS_TAKEN) { - ubifs_err("taken LEB %d on frdi_idx list (free %d dirty %d flags %d)", + ubifs_err(c, "taken LEB %d on frdi_idx list (free %d dirty %d flags %d)", lprops->lnum, lprops->free, lprops->dirty, lprops->flags); return -EINVAL; } if (!(lprops->flags & LPROPS_INDEX)) { - ubifs_err("non-index LEB %d on frdi_idx list (free %d dirty %d flags %d)", + ubifs_err(c, "non-index LEB %d on frdi_idx list (free %d dirty %d flags %d)", lprops->lnum, lprops->free, lprops->dirty, lprops->flags); return -EINVAL; @@ -944,15 +944,15 @@ int dbg_check_cats(struct ubifs_info *c) for (i = 0; i < heap->cnt; i++) { lprops = heap->arr[i]; if (!lprops) { - ubifs_err("null ptr in LPT heap cat %d", cat); + ubifs_err(c, "null ptr in LPT heap cat %d", cat); return -EINVAL; } if (lprops->hpos != i) { - ubifs_err("bad ptr in LPT heap cat %d", cat); + ubifs_err(c, "bad ptr in LPT heap cat %d", cat); return -EINVAL; } if (lprops->flags & LPROPS_TAKEN) { - ubifs_err("taken LEB in LPT heap cat %d", cat); + ubifs_err(c, "taken LEB in LPT heap cat %d", cat); return -EINVAL; } } @@ -988,7 +988,7 @@ void dbg_check_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat, goto out; } if (lprops != lp) { - ubifs_err("lprops %zx lp %zx lprops->lnum %d lp->lnum %d", + ubifs_err(c, "lprops %zx lp %zx lprops->lnum %d lp->lnum %d", (size_t)lprops, (size_t)lp, lprops->lnum, lp->lnum); err = 4; @@ -1008,7 +1008,7 @@ void dbg_check_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat, } out: if (err) { - ubifs_err("failed cat %d hpos %d err %d", cat, i, err); + ubifs_err(c, "failed cat %d hpos %d err %d", cat, i, err); dump_stack(); ubifs_dump_heap(c, heap, cat); } @@ -1039,7 +1039,7 @@ static int scan_check_cb(struct ubifs_info *c, if (cat != LPROPS_UNCAT) { cat = ubifs_categorize_lprops(c, lp); if (cat != (lp->flags & LPROPS_CAT_MASK)) { - ubifs_err("bad LEB category %d expected %d", + ubifs_err(c, "bad LEB category %d expected %d", (lp->flags & LPROPS_CAT_MASK), cat); return -EINVAL; } @@ -1074,7 +1074,7 @@ static int scan_check_cb(struct ubifs_info *c, } } if (!found) { - ubifs_err("bad LPT list (category %d)", cat); + ubifs_err(c, "bad LPT list (category %d)", cat); return -EINVAL; } } @@ -1086,7 +1086,7 @@ static int scan_check_cb(struct ubifs_info *c, if ((lp->hpos != -1 && heap->arr[lp->hpos]->lnum != lnum) || lp != heap->arr[lp->hpos]) { - ubifs_err("bad LPT heap (category %d)", cat); + ubifs_err(c, "bad LPT heap (category %d)", cat); return -EINVAL; } } @@ -1133,7 +1133,7 @@ static int scan_check_cb(struct ubifs_info *c, is_idx = (snod->type == UBIFS_IDX_NODE) ? 1 : 0; if (is_idx && snod->type != UBIFS_IDX_NODE) { - ubifs_err("indexing node in data LEB %d:%d", + ubifs_err(c, "indexing node in data LEB %d:%d", lnum, snod->offs); goto out_destroy; } @@ -1159,7 +1159,7 @@ static int scan_check_cb(struct ubifs_info *c, if (free > c->leb_size || free < 0 || dirty > c->leb_size || dirty < 0) { - ubifs_err("bad calculated accounting for LEB %d: free %d, dirty %d", + ubifs_err(c, "bad calculated accounting for LEB %d: free %d, dirty %d", lnum, free, dirty); goto out_destroy; } @@ -1206,13 +1206,13 @@ static int scan_check_cb(struct ubifs_info *c, /* Free but not unmapped LEB, it's fine */ is_idx = 0; else { - ubifs_err("indexing node without indexing flag"); + ubifs_err(c, "indexing node without indexing flag"); goto out_print; } } if (!is_idx && (lp->flags & LPROPS_INDEX)) { - ubifs_err("data node with indexing flag"); + ubifs_err(c, "data node with indexing flag"); goto out_print; } @@ -1241,7 +1241,7 @@ static int scan_check_cb(struct ubifs_info *c, return LPT_SCAN_CONTINUE; out_print: - ubifs_err("bad accounting of LEB %d: free %d, dirty %d flags %#x, should be free %d, dirty %d", + ubifs_err(c, "bad accounting of LEB %d: free %d, dirty %d flags %#x, should be free %d, dirty %d", lnum, lp->free, lp->dirty, lp->flags, free, dirty); ubifs_dump_leb(c, lnum); out_destroy: @@ -1293,11 +1293,11 @@ int dbg_check_lprops(struct ubifs_info *c) lst.total_free != c->lst.total_free || lst.total_dirty != c->lst.total_dirty || lst.total_used != c->lst.total_used) { - ubifs_err("bad overall accounting"); - ubifs_err("calculated: empty_lebs %d, idx_lebs %d, total_free %lld, total_dirty %lld, total_used %lld", + ubifs_err(c, "bad overall accounting"); + ubifs_err(c, "calculated: empty_lebs %d, idx_lebs %d, total_free %lld, total_dirty %lld, total_used %lld", lst.empty_lebs, lst.idx_lebs, lst.total_free, lst.total_dirty, lst.total_used); - ubifs_err("read from lprops: empty_lebs %d, idx_lebs %d, total_free %lld, total_dirty %lld, total_used %lld", + ubifs_err(c, "read from lprops: empty_lebs %d, idx_lebs %d, total_free %lld, total_dirty %lld, total_used %lld", c->lst.empty_lebs, c->lst.idx_lebs, c->lst.total_free, c->lst.total_dirty, c->lst.total_used); err = -EINVAL; @@ -1306,10 +1306,10 @@ int dbg_check_lprops(struct ubifs_info *c) if (lst.total_dead != c->lst.total_dead || lst.total_dark != c->lst.total_dark) { - ubifs_err("bad dead/dark space accounting"); - ubifs_err("calculated: total_dead %lld, total_dark %lld", + ubifs_err(c, "bad dead/dark space accounting"); + ubifs_err(c, "calculated: total_dead %lld, total_dark %lld", lst.total_dead, lst.total_dark); - ubifs_err("read from lprops: total_dead %lld, total_dark %lld", + ubifs_err(c, "read from lprops: total_dead %lld, total_dark %lld", c->lst.total_dead, c->lst.total_dark); err = -EINVAL; goto out; diff --git a/fs/ubifs/lpt.c b/fs/ubifs/lpt.c index 421bd0a..dc9f27e 100644 --- a/fs/ubifs/lpt.c +++ b/fs/ubifs/lpt.c @@ -145,13 +145,13 @@ int ubifs_calc_lpt_geom(struct ubifs_info *c) sz = c->lpt_sz * 2; /* Must have at least 2 times the size */ lebs_needed = div_u64(sz + c->leb_size - 1, c->leb_size); if (lebs_needed > c->lpt_lebs) { - ubifs_err("too few LPT LEBs"); + ubifs_err(c, "too few LPT LEBs"); return -EINVAL; } /* Verify that ltab fits in a single LEB (since ltab is a single node */ if (c->ltab_sz > c->leb_size) { - ubifs_err("LPT ltab too big"); + ubifs_err(c, "LPT ltab too big"); return -EINVAL; } @@ -213,7 +213,7 @@ static int calc_dflt_lpt_geom(struct ubifs_info *c, int *main_lebs, continue; } if (c->ltab_sz > c->leb_size) { - ubifs_err("LPT ltab too big"); + ubifs_err(c, "LPT ltab too big"); return -EINVAL; } *main_lebs = c->main_lebs; @@ -911,7 +911,7 @@ static void replace_cats(struct ubifs_info *c, struct ubifs_pnode *old_pnode, * * This function returns %0 on success and a negative error code on failure. */ -static int check_lpt_crc(void *buf, int len) +static int check_lpt_crc(const struct ubifs_info *c, void *buf, int len) { int pos = 0; uint8_t *addr = buf; @@ -921,8 +921,8 @@ static int check_lpt_crc(void *buf, int len) calc_crc = crc16(-1, buf + UBIFS_LPT_CRC_BYTES, len - UBIFS_LPT_CRC_BYTES); if (crc != calc_crc) { - ubifs_err("invalid crc in LPT node: crc %hx calc %hx", crc, - calc_crc); + ubifs_err(c, "invalid crc in LPT node: crc %hx calc %hx", + crc, calc_crc); dump_stack(); return -EINVAL; } @@ -938,14 +938,15 @@ static int check_lpt_crc(void *buf, int len) * * This function returns %0 on success and a negative error code on failure. */ -static int check_lpt_type(uint8_t **addr, int *pos, int type) +static int check_lpt_type(const struct ubifs_info *c, uint8_t **addr, + int *pos, int type) { int node_type; node_type = ubifs_unpack_bits(addr, pos, UBIFS_LPT_TYPE_BITS); if (node_type != type) { - ubifs_err("invalid type (%d) in LPT node type %d", node_type, - type); + ubifs_err(c, "invalid type (%d) in LPT node type %d", + node_type, type); dump_stack(); return -EINVAL; } @@ -966,7 +967,7 @@ static int unpack_pnode(const struct ubifs_info *c, void *buf, uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES; int i, pos = 0, err; - err = check_lpt_type(&addr, &pos, UBIFS_LPT_PNODE); + err = check_lpt_type(c, &addr, &pos, UBIFS_LPT_PNODE); if (err) return err; if (c->big_lpt) @@ -985,7 +986,7 @@ static int unpack_pnode(const struct ubifs_info *c, void *buf, lprops->flags = 0; lprops->flags |= ubifs_categorize_lprops(c, lprops); } - err = check_lpt_crc(buf, c->pnode_sz); + err = check_lpt_crc(c, buf, c->pnode_sz); return err; } @@ -1003,7 +1004,7 @@ int ubifs_unpack_nnode(const struct ubifs_info *c, void *buf, uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES; int i, pos = 0, err; - err = check_lpt_type(&addr, &pos, UBIFS_LPT_NNODE); + err = check_lpt_type(c, &addr, &pos, UBIFS_LPT_NNODE); if (err) return err; if (c->big_lpt) @@ -1019,7 +1020,7 @@ int ubifs_unpack_nnode(const struct ubifs_info *c, void *buf, nnode->nbranch[i].offs = ubifs_unpack_bits(&addr, &pos, c->lpt_offs_bits); } - err = check_lpt_crc(buf, c->nnode_sz); + err = check_lpt_crc(c, buf, c->nnode_sz); return err; } @@ -1035,7 +1036,7 @@ static int unpack_ltab(const struct ubifs_info *c, void *buf) uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES; int i, pos = 0, err; - err = check_lpt_type(&addr, &pos, UBIFS_LPT_LTAB); + err = check_lpt_type(c, &addr, &pos, UBIFS_LPT_LTAB); if (err) return err; for (i = 0; i < c->lpt_lebs; i++) { @@ -1051,7 +1052,7 @@ static int unpack_ltab(const struct ubifs_info *c, void *buf) c->ltab[i].tgc = 0; c->ltab[i].cmt = 0; } - err = check_lpt_crc(buf, c->ltab_sz); + err = check_lpt_crc(c, buf, c->ltab_sz); return err; } @@ -1067,7 +1068,7 @@ static int unpack_lsave(const struct ubifs_info *c, void *buf) uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES; int i, pos = 0, err; - err = check_lpt_type(&addr, &pos, UBIFS_LPT_LSAVE); + err = check_lpt_type(c, &addr, &pos, UBIFS_LPT_LSAVE); if (err) return err; for (i = 0; i < c->lsave_cnt; i++) { @@ -1077,7 +1078,7 @@ static int unpack_lsave(const struct ubifs_info *c, void *buf) return -EINVAL; c->lsave[i] = lnum; } - err = check_lpt_crc(buf, c->lsave_sz); + err = check_lpt_crc(c, buf, c->lsave_sz); return err; } @@ -1243,7 +1244,7 @@ int ubifs_read_nnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip) return 0; out: - ubifs_err("error %d reading nnode at %d:%d", err, lnum, offs); + ubifs_err(c, "error %d reading nnode at %d:%d", err, lnum, offs); dump_stack(); kfree(nnode); return err; @@ -1308,10 +1309,10 @@ static int read_pnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip) return 0; out: - ubifs_err("error %d reading pnode at %d:%d", err, lnum, offs); + ubifs_err(c, "error %d reading pnode at %d:%d", err, lnum, offs); ubifs_dump_pnode(c, pnode, parent, iip); dump_stack(); - ubifs_err("calc num: %d", calc_pnode_num_from_parent(c, parent, iip)); + ubifs_err(c, "calc num: %d", calc_pnode_num_from_parent(c, parent, iip)); kfree(pnode); return err; } @@ -2095,7 +2096,7 @@ static int dbg_chk_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode, int i; if (pnode->num != col) { - ubifs_err("pnode num %d expected %d parent num %d iip %d", + ubifs_err(c, "pnode num %d expected %d parent num %d iip %d", pnode->num, col, pnode->parent->num, pnode->iip); return -EINVAL; } @@ -2110,13 +2111,13 @@ static int dbg_chk_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode, if (lnum >= c->leb_cnt) continue; if (lprops->lnum != lnum) { - ubifs_err("bad LEB number %d expected %d", + ubifs_err(c, "bad LEB number %d expected %d", lprops->lnum, lnum); return -EINVAL; } if (lprops->flags & LPROPS_TAKEN) { if (cat != LPROPS_UNCAT) { - ubifs_err("LEB %d taken but not uncat %d", + ubifs_err(c, "LEB %d taken but not uncat %d", lprops->lnum, cat); return -EINVAL; } @@ -2129,7 +2130,7 @@ static int dbg_chk_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode, case LPROPS_FRDI_IDX: break; default: - ubifs_err("LEB %d index but cat %d", + ubifs_err(c, "LEB %d index but cat %d", lprops->lnum, cat); return -EINVAL; } @@ -2142,7 +2143,7 @@ static int dbg_chk_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode, case LPROPS_FREEABLE: break; default: - ubifs_err("LEB %d not index but cat %d", + ubifs_err(c, "LEB %d not index but cat %d", lprops->lnum, cat); return -EINVAL; } @@ -2183,14 +2184,14 @@ static int dbg_chk_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode, break; } if (!found) { - ubifs_err("LEB %d cat %d not found in cat heap/list", + ubifs_err(c, "LEB %d cat %d not found in cat heap/list", lprops->lnum, cat); return -EINVAL; } switch (cat) { case LPROPS_EMPTY: if (lprops->free != c->leb_size) { - ubifs_err("LEB %d cat %d free %d dirty %d", + ubifs_err(c, "LEB %d cat %d free %d dirty %d", lprops->lnum, cat, lprops->free, lprops->dirty); return -EINVAL; @@ -2199,7 +2200,7 @@ static int dbg_chk_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode, case LPROPS_FREEABLE: case LPROPS_FRDI_IDX: if (lprops->free + lprops->dirty != c->leb_size) { - ubifs_err("LEB %d cat %d free %d dirty %d", + ubifs_err(c, "LEB %d cat %d free %d dirty %d", lprops->lnum, cat, lprops->free, lprops->dirty); return -EINVAL; @@ -2236,7 +2237,7 @@ int dbg_check_lpt_nodes(struct ubifs_info *c, struct ubifs_cnode *cnode, /* cnode is a nnode */ num = calc_nnode_num(row, col); if (cnode->num != num) { - ubifs_err("nnode num %d expected %d parent num %d iip %d", + ubifs_err(c, "nnode num %d expected %d parent num %d iip %d", cnode->num, num, (nnode ? nnode->num : 0), cnode->iip); return -EINVAL; diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c index d9c0292..ce89bdc 100644 --- a/fs/ubifs/lpt_commit.c +++ b/fs/ubifs/lpt_commit.c @@ -319,7 +319,7 @@ static int layout_cnodes(struct ubifs_info *c) return 0; no_space: - ubifs_err("LPT out of space at LEB %d:%d needing %d, done_ltab %d, done_lsave %d", + ubifs_err(c, "LPT out of space at LEB %d:%d needing %d, done_ltab %d, done_lsave %d", lnum, offs, len, done_ltab, done_lsave); ubifs_dump_lpt_info(c); ubifs_dump_lpt_lebs(c); @@ -543,7 +543,7 @@ static int write_cnodes(struct ubifs_info *c) return 0; no_space: - ubifs_err("LPT out of space mismatch at LEB %d:%d needing %d, done_ltab %d, done_lsave %d", + ubifs_err(c, "LPT out of space mismatch at LEB %d:%d needing %d, done_ltab %d, done_lsave %d", lnum, offs, len, done_ltab, done_lsave); ubifs_dump_lpt_info(c); ubifs_dump_lpt_lebs(c); @@ -1638,7 +1638,7 @@ static int dbg_check_ltab_lnum(struct ubifs_info *c, int lnum) buf = p = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL); if (!buf) { - ubifs_err("cannot allocate memory for ltab checking"); + ubifs_err(c, "cannot allocate memory for ltab checking"); return 0; } @@ -1660,18 +1660,18 @@ static int dbg_check_ltab_lnum(struct ubifs_info *c, int lnum) continue; } if (!dbg_is_all_ff(p, len)) { - ubifs_err("invalid empty space in LEB %d at %d", + ubifs_err(c, "invalid empty space in LEB %d at %d", lnum, c->leb_size - len); err = -EINVAL; } i = lnum - c->lpt_first; if (len != c->ltab[i].free) { - ubifs_err("invalid free space in LEB %d (free %d, expected %d)", + ubifs_err(c, "invalid free space in LEB %d (free %d, expected %d)", lnum, len, c->ltab[i].free); err = -EINVAL; } if (dirty != c->ltab[i].dirty) { - ubifs_err("invalid dirty space in LEB %d (dirty %d, expected %d)", + ubifs_err(c, "invalid dirty space in LEB %d (dirty %d, expected %d)", lnum, dirty, c->ltab[i].dirty); err = -EINVAL; } @@ -1725,7 +1725,7 @@ int dbg_check_ltab(struct ubifs_info *c) for (lnum = c->lpt_first; lnum <= c->lpt_last; lnum++) { err = dbg_check_ltab_lnum(c, lnum); if (err) { - ubifs_err("failed at LEB %d", lnum); + ubifs_err(c, "failed at LEB %d", lnum); return err; } } @@ -1757,7 +1757,7 @@ int dbg_chk_lpt_free_spc(struct ubifs_info *c) free += c->leb_size; } if (free < c->lpt_sz) { - ubifs_err("LPT space error: free %lld lpt_sz %lld", + ubifs_err(c, "LPT space error: free %lld lpt_sz %lld", free, c->lpt_sz); ubifs_dump_lpt_info(c); ubifs_dump_lpt_lebs(c); @@ -1797,12 +1797,12 @@ int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len) d->chk_lpt_lebs = 0; d->chk_lpt_wastage = 0; if (c->dirty_pn_cnt > c->pnode_cnt) { - ubifs_err("dirty pnodes %d exceed max %d", + ubifs_err(c, "dirty pnodes %d exceed max %d", c->dirty_pn_cnt, c->pnode_cnt); err = -EINVAL; } if (c->dirty_nn_cnt > c->nnode_cnt) { - ubifs_err("dirty nnodes %d exceed max %d", + ubifs_err(c, "dirty nnodes %d exceed max %d", c->dirty_nn_cnt, c->nnode_cnt); err = -EINVAL; } @@ -1820,22 +1820,22 @@ int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len) chk_lpt_sz *= d->chk_lpt_lebs; chk_lpt_sz += len - c->nhead_offs; if (d->chk_lpt_sz != chk_lpt_sz) { - ubifs_err("LPT wrote %lld but space used was %lld", + ubifs_err(c, "LPT wrote %lld but space used was %lld", d->chk_lpt_sz, chk_lpt_sz); err = -EINVAL; } if (d->chk_lpt_sz > c->lpt_sz) { - ubifs_err("LPT wrote %lld but lpt_sz is %lld", + ubifs_err(c, "LPT wrote %lld but lpt_sz is %lld", d->chk_lpt_sz, c->lpt_sz); err = -EINVAL; } if (d->chk_lpt_sz2 && d->chk_lpt_sz != d->chk_lpt_sz2) { - ubifs_err("LPT layout size %lld but wrote %lld", + ubifs_err(c, "LPT layout size %lld but wrote %lld", d->chk_lpt_sz, d->chk_lpt_sz2); err = -EINVAL; } if (d->chk_lpt_sz2 && d->new_nhead_offs != len) { - ubifs_err("LPT new nhead offs: expected %d was %d", + ubifs_err(c, "LPT new nhead offs: expected %d was %d", d->new_nhead_offs, len); err = -EINVAL; } @@ -1845,7 +1845,7 @@ int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len) if (c->big_lpt) lpt_sz += c->lsave_sz; if (d->chk_lpt_sz - d->chk_lpt_wastage > lpt_sz) { - ubifs_err("LPT chk_lpt_sz %lld + waste %lld exceeds %lld", + ubifs_err(c, "LPT chk_lpt_sz %lld + waste %lld exceeds %lld", d->chk_lpt_sz, d->chk_lpt_wastage, lpt_sz); err = -EINVAL; } @@ -1887,7 +1887,7 @@ static void dump_lpt_leb(const struct ubifs_info *c, int lnum) pr_err("(pid %d) start dumping LEB %d\n", current->pid, lnum); buf = p = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL); if (!buf) { - ubifs_err("cannot allocate memory to dump LPT"); + ubifs_err(c, "cannot allocate memory to dump LPT"); return; } @@ -1962,7 +1962,7 @@ static void dump_lpt_leb(const struct ubifs_info *c, int lnum) pr_err("LEB %d:%d, lsave len\n", lnum, offs); break; default: - ubifs_err("LPT node type %d not recognized", node_type); + ubifs_err(c, "LPT node type %d not recognized", node_type); goto out; } diff --git a/fs/ubifs/master.c b/fs/ubifs/master.c index 1a4bb9e..c6a5e39 100644 --- a/fs/ubifs/master.c +++ b/fs/ubifs/master.c @@ -82,7 +82,7 @@ out: return -EUCLEAN; out_dump: - ubifs_err("unexpected node type %d master LEB %d:%d", + ubifs_err(c, "unexpected node type %d master LEB %d:%d", snod->type, lnum, snod->offs); ubifs_scan_destroy(sleb); return -EINVAL; @@ -240,7 +240,7 @@ static int validate_master(const struct ubifs_info *c) return 0; out: - ubifs_err("bad master node at offset %d error %d", c->mst_offs, err); + ubifs_err(c, "bad master node at offset %d error %d", c->mst_offs, err); ubifs_dump_node(c, c->mst_node); return -EINVAL; } @@ -316,7 +316,7 @@ int ubifs_read_master(struct ubifs_info *c) if (c->leb_cnt < old_leb_cnt || c->leb_cnt < UBIFS_MIN_LEB_CNT) { - ubifs_err("bad leb_cnt on master node"); + ubifs_err(c, "bad leb_cnt on master node"); ubifs_dump_node(c, c->mst_node); return -EINVAL; } diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c index 4409f48..caf2d12 100644 --- a/fs/ubifs/orphan.c +++ b/fs/ubifs/orphan.c @@ -88,7 +88,7 @@ int ubifs_add_orphan(struct ubifs_info *c, ino_t inum) else if (inum > o->inum) p = &(*p)->rb_right; else { - ubifs_err("orphaned twice"); + ubifs_err(c, "orphaned twice"); spin_unlock(&c->orphan_lock); kfree(orphan); return 0; @@ -155,7 +155,7 @@ void ubifs_delete_orphan(struct ubifs_info *c, ino_t inum) } } spin_unlock(&c->orphan_lock); - ubifs_err("missing orphan ino %lu", (unsigned long)inum); + ubifs_err(c, "missing orphan ino %lu", (unsigned long)inum); dump_stack(); } @@ -287,7 +287,7 @@ static int write_orph_node(struct ubifs_info *c, int atomic) * We limit the number of orphans so that this should * never happen. */ - ubifs_err("out of space in orphan area"); + ubifs_err(c, "out of space in orphan area"); return -EINVAL; } } @@ -397,7 +397,7 @@ static int consolidate(struct ubifs_info *c) * We limit the number of orphans so that this should * never happen. */ - ubifs_err("out of space in orphan area"); + ubifs_err(c, "out of space in orphan area"); err = -EINVAL; } spin_unlock(&c->orphan_lock); @@ -569,7 +569,7 @@ static int do_kill_orphans(struct ubifs_info *c, struct ubifs_scan_leb *sleb, list_for_each_entry(snod, &sleb->nodes, list) { if (snod->type != UBIFS_ORPH_NODE) { - ubifs_err("invalid node type %d in orphan area at %d:%d", + ubifs_err(c, "invalid node type %d in orphan area at %d:%d", snod->type, sleb->lnum, snod->offs); ubifs_dump_node(c, snod->node); return -EINVAL; @@ -596,7 +596,7 @@ static int do_kill_orphans(struct ubifs_info *c, struct ubifs_scan_leb *sleb, * number. That makes this orphan node, out of date. */ if (!first) { - ubifs_err("out of order commit number %llu in orphan node at %d:%d", + ubifs_err(c, "out of order commit number %llu in orphan node at %d:%d", cmt_no, sleb->lnum, snod->offs); ubifs_dump_node(c, snod->node); return -EINVAL; @@ -831,20 +831,20 @@ static int dbg_orphan_check(struct ubifs_info *c, struct ubifs_zbranch *zbr, if (inum != ci->last_ino) { /* Lowest node type is the inode node, so it comes first */ if (key_type(c, &zbr->key) != UBIFS_INO_KEY) - ubifs_err("found orphan node ino %lu, type %d", + ubifs_err(c, "found orphan node ino %lu, type %d", (unsigned long)inum, key_type(c, &zbr->key)); ci->last_ino = inum; ci->tot_inos += 1; err = ubifs_tnc_read_node(c, zbr, ci->node); if (err) { - ubifs_err("node read failed, error %d", err); + ubifs_err(c, "node read failed, error %d", err); return err; } if (ci->node->nlink == 0) /* Must be recorded as an orphan */ if (!dbg_find_check_orphan(&ci->root, inum) && !dbg_find_orphan(c, inum)) { - ubifs_err("missing orphan, ino %lu", + ubifs_err(c, "missing orphan, ino %lu", (unsigned long)inum); ci->missing += 1; } @@ -887,7 +887,7 @@ static int dbg_scan_orphans(struct ubifs_info *c, struct check_info *ci) buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL); if (!buf) { - ubifs_err("cannot allocate memory to check orphans"); + ubifs_err(c, "cannot allocate memory to check orphans"); return 0; } @@ -925,7 +925,7 @@ static int dbg_check_orphans(struct ubifs_info *c) ci.root = RB_ROOT; ci.node = kmalloc(UBIFS_MAX_INO_NODE_SZ, GFP_NOFS); if (!ci.node) { - ubifs_err("out of memory"); + ubifs_err(c, "out of memory"); return -ENOMEM; } @@ -935,12 +935,12 @@ static int dbg_check_orphans(struct ubifs_info *c) err = dbg_walk_index(c, &dbg_orphan_check, NULL, &ci); if (err) { - ubifs_err("cannot scan TNC, error %d", err); + ubifs_err(c, "cannot scan TNC, error %d", err); goto out; } if (ci.missing) { - ubifs_err("%lu missing orphan(s)", ci.missing); + ubifs_err(c, "%lu missing orphan(s)", ci.missing); err = -EINVAL; goto out; } diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c index c640938..695fc71 100644 --- a/fs/ubifs/recovery.c +++ b/fs/ubifs/recovery.c @@ -305,7 +305,7 @@ int ubifs_recover_master_node(struct ubifs_info *c) mst = mst2; } - ubifs_msg("recovered master node from LEB %d", + ubifs_msg(c, "recovered master node from LEB %d", (mst == mst1 ? UBIFS_MST_LNUM : UBIFS_MST_LNUM + 1)); memcpy(c->mst_node, mst, UBIFS_MST_NODE_SZ); @@ -360,13 +360,13 @@ int ubifs_recover_master_node(struct ubifs_info *c) out_err: err = -EINVAL; out_free: - ubifs_err("failed to recover master node"); + ubifs_err(c, "failed to recover master node"); if (mst1) { - ubifs_err("dumping first master node"); + ubifs_err(c, "dumping first master node"); ubifs_dump_node(c, mst1); } if (mst2) { - ubifs_err("dumping second master node"); + ubifs_err(c, "dumping second master node"); ubifs_dump_node(c, mst2); } vfree(buf2); @@ -682,7 +682,7 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, ret, lnum, offs); break; } else { - ubifs_err("unexpected return value %d", ret); + ubifs_err(c, "unexpected return value %d", ret); err = -EINVAL; goto error; } @@ -702,7 +702,7 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, * See header comment for this file for more * explanations about the reasons we have this check. */ - ubifs_err("corrupt empty space LEB %d:%d, corruption starts at %d", + ubifs_err(c, "corrupt empty space LEB %d:%d, corruption starts at %d", lnum, offs, corruption); /* Make sure we dump interesting non-0xFF data */ offs += corruption; @@ -788,13 +788,13 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, corrupted_rescan: /* Re-scan the corrupted data with verbose messages */ - ubifs_err("corruption %d", ret); + ubifs_err(c, "corruption %d", ret); ubifs_scan_a_node(c, buf, len, lnum, offs, 1); corrupted: ubifs_scanned_corruption(c, lnum, offs, buf); err = -EUCLEAN; error: - ubifs_err("LEB %d scanning failed", lnum); + ubifs_err(c, "LEB %d scanning failed", lnum); ubifs_scan_destroy(sleb); return ERR_PTR(err); } @@ -826,15 +826,15 @@ static int get_cs_sqnum(struct ubifs_info *c, int lnum, int offs, goto out_free; ret = ubifs_scan_a_node(c, cs_node, UBIFS_CS_NODE_SZ, lnum, offs, 0); if (ret != SCANNED_A_NODE) { - ubifs_err("Not a valid node"); + ubifs_err(c, "Not a valid node"); goto out_err; } if (cs_node->ch.node_type != UBIFS_CS_NODE) { - ubifs_err("Node a CS node, type is %d", cs_node->ch.node_type); + ubifs_err(c, "Node a CS node, type is %d", cs_node->ch.node_type); goto out_err; } if (le64_to_cpu(cs_node->cmt_no) != c->cmt_no) { - ubifs_err("CS node cmt_no %llu != current cmt_no %llu", + ubifs_err(c, "CS node cmt_no %llu != current cmt_no %llu", (unsigned long long)le64_to_cpu(cs_node->cmt_no), c->cmt_no); goto out_err; @@ -847,7 +847,7 @@ static int get_cs_sqnum(struct ubifs_info *c, int lnum, int offs, out_err: err = -EINVAL; out_free: - ubifs_err("failed to get CS sqnum"); + ubifs_err(c, "failed to get CS sqnum"); kfree(cs_node); return err; } @@ -899,7 +899,7 @@ struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum, } } if (snod->sqnum > cs_sqnum) { - ubifs_err("unrecoverable log corruption in LEB %d", + ubifs_err(c, "unrecoverable log corruption in LEB %d", lnum); ubifs_scan_destroy(sleb); return ERR_PTR(-EUCLEAN); @@ -975,11 +975,8 @@ int ubifs_recover_inl_heads(struct ubifs_info *c, void *sbuf) return err; dbg_rcvry("checking LPT head at %d:%d", c->nhead_lnum, c->nhead_offs); - err = recover_head(c, c->nhead_lnum, c->nhead_offs, sbuf); - if (err) - return err; - return 0; + return recover_head(c, c->nhead_lnum, c->nhead_offs, sbuf); } /** @@ -1004,10 +1001,7 @@ static int clean_an_unclean_leb(struct ubifs_info *c, if (len == 0) { /* Nothing to read, just unmap it */ - err = ubifs_leb_unmap(c, lnum); - if (err) - return err; - return 0; + return ubifs_leb_unmap(c, lnum); } err = ubifs_leb_read(c, lnum, buf, offs, len, 0); @@ -1043,7 +1037,7 @@ static int clean_an_unclean_leb(struct ubifs_info *c, } if (ret == SCANNED_EMPTY_SPACE) { - ubifs_err("unexpected empty space at %d:%d", + ubifs_err(c, "unexpected empty space at %d:%d", lnum, offs); return -EUCLEAN; } @@ -1137,7 +1131,7 @@ static int grab_empty_leb(struct ubifs_info *c) */ lnum = ubifs_find_free_leb_for_idx(c); if (lnum < 0) { - ubifs_err("could not find an empty LEB"); + ubifs_err(c, "could not find an empty LEB"); ubifs_dump_lprops(c); ubifs_dump_budg(c, &c->bi); return lnum; @@ -1217,7 +1211,7 @@ int ubifs_rcvry_gc_commit(struct ubifs_info *c) } mutex_unlock(&wbuf->io_mutex); if (err < 0) { - ubifs_err("GC failed, error %d", err); + ubifs_err(c, "GC failed, error %d", err); if (err == -EAGAIN) err = -EINVAL; return err; @@ -1464,7 +1458,7 @@ static int fix_size_in_place(struct ubifs_info *c, struct size_entry *e) return 0; out: - ubifs_warn("inode %lu failed to fix size %lld -> %lld error %d", + ubifs_warn(c, "inode %lu failed to fix size %lld -> %lld error %d", (unsigned long)e->inum, e->i_size, e->d_size, err); return err; } diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c index 9b40a1c..3ca4540 100644 --- a/fs/ubifs/replay.c +++ b/fs/ubifs/replay.c @@ -458,13 +458,13 @@ int ubifs_validate_entry(struct ubifs_info *c, nlen > UBIFS_MAX_NLEN || dent->name[nlen] != 0 || strnlen(dent->name, nlen) != nlen || le64_to_cpu(dent->inum) > MAX_INUM) { - ubifs_err("bad %s node", key_type == UBIFS_DENT_KEY ? + ubifs_err(c, "bad %s node", key_type == UBIFS_DENT_KEY ? "directory entry" : "extended attribute entry"); return -EINVAL; } if (key_type != UBIFS_DENT_KEY && key_type != UBIFS_XENT_KEY) { - ubifs_err("bad key type %d", key_type); + ubifs_err(c, "bad key type %d", key_type); return -EINVAL; } @@ -589,7 +589,7 @@ static int replay_bud(struct ubifs_info *c, struct bud_entry *b) cond_resched(); if (snod->sqnum >= SQNUM_WATERMARK) { - ubifs_err("file system's life ended"); + ubifs_err(c, "file system's life ended"); goto out_dump; } @@ -647,7 +647,7 @@ static int replay_bud(struct ubifs_info *c, struct bud_entry *b) if (old_size < 0 || old_size > c->max_inode_sz || new_size < 0 || new_size > c->max_inode_sz || old_size <= new_size) { - ubifs_err("bad truncation node"); + ubifs_err(c, "bad truncation node"); goto out_dump; } @@ -662,7 +662,7 @@ static int replay_bud(struct ubifs_info *c, struct bud_entry *b) break; } default: - ubifs_err("unexpected node type %d in bud LEB %d:%d", + ubifs_err(c, "unexpected node type %d in bud LEB %d:%d", snod->type, lnum, snod->offs); err = -EINVAL; goto out_dump; @@ -685,7 +685,7 @@ out: return err; out_dump: - ubifs_err("bad node is at LEB %d:%d", lnum, snod->offs); + ubifs_err(c, "bad node is at LEB %d:%d", lnum, snod->offs); ubifs_dump_node(c, snod->node); ubifs_scan_destroy(sleb); return -EINVAL; @@ -805,7 +805,7 @@ static int validate_ref(struct ubifs_info *c, const struct ubifs_ref_node *ref) if (bud) { if (bud->jhead == jhead && bud->start <= offs) return 1; - ubifs_err("bud at LEB %d:%d was already referred", lnum, offs); + ubifs_err(c, "bud at LEB %d:%d was already referred", lnum, offs); return -EINVAL; } @@ -861,12 +861,12 @@ static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf) * numbers. */ if (snod->type != UBIFS_CS_NODE) { - ubifs_err("first log node at LEB %d:%d is not CS node", + ubifs_err(c, "first log node at LEB %d:%d is not CS node", lnum, offs); goto out_dump; } if (le64_to_cpu(node->cmt_no) != c->cmt_no) { - ubifs_err("first CS node at LEB %d:%d has wrong commit number %llu expected %llu", + ubifs_err(c, "first CS node at LEB %d:%d has wrong commit number %llu expected %llu", lnum, offs, (unsigned long long)le64_to_cpu(node->cmt_no), c->cmt_no); @@ -891,7 +891,7 @@ static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf) /* Make sure the first node sits at offset zero of the LEB */ if (snod->offs != 0) { - ubifs_err("first node is not at zero offset"); + ubifs_err(c, "first node is not at zero offset"); goto out_dump; } @@ -899,12 +899,12 @@ static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf) cond_resched(); if (snod->sqnum >= SQNUM_WATERMARK) { - ubifs_err("file system's life ended"); + ubifs_err(c, "file system's life ended"); goto out_dump; } if (snod->sqnum < c->cs_sqnum) { - ubifs_err("bad sqnum %llu, commit sqnum %llu", + ubifs_err(c, "bad sqnum %llu, commit sqnum %llu", snod->sqnum, c->cs_sqnum); goto out_dump; } @@ -934,12 +934,12 @@ static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf) case UBIFS_CS_NODE: /* Make sure it sits at the beginning of LEB */ if (snod->offs != 0) { - ubifs_err("unexpected node in log"); + ubifs_err(c, "unexpected node in log"); goto out_dump; } break; default: - ubifs_err("unexpected node in log"); + ubifs_err(c, "unexpected node in log"); goto out_dump; } } @@ -955,7 +955,7 @@ out: return err; out_dump: - ubifs_err("log error detected while replaying the log at LEB %d:%d", + ubifs_err(c, "log error detected while replaying the log at LEB %d:%d", lnum, offs + snod->offs); ubifs_dump_node(c, snod->node); ubifs_scan_destroy(sleb); @@ -1017,7 +1017,7 @@ int ubifs_replay_journal(struct ubifs_info *c) return free; /* Error code */ if (c->ihead_offs != c->leb_size - free) { - ubifs_err("bad index head LEB %d:%d", c->ihead_lnum, + ubifs_err(c, "bad index head LEB %d:%d", c->ihead_lnum, c->ihead_offs); return -EINVAL; } @@ -1040,7 +1040,7 @@ int ubifs_replay_journal(struct ubifs_info *c) * someting went wrong and we cannot proceed mounting * the file-system. */ - ubifs_err("no UBIFS nodes found at the log head LEB %d:%d, possibly corrupted", + ubifs_err(c, "no UBIFS nodes found at the log head LEB %d:%d, possibly corrupted", lnum, 0); err = -EINVAL; } diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c index 79c6dbb..f4fbc7b 100644 --- a/fs/ubifs/sb.c +++ b/fs/ubifs/sb.c @@ -335,7 +335,7 @@ static int create_default_filesystem(struct ubifs_info *c) if (err) return err; - ubifs_msg("default file-system created"); + ubifs_msg(c, "default file-system created"); return 0; } @@ -365,13 +365,13 @@ static int validate_sb(struct ubifs_info *c, struct ubifs_sb_node *sup) } if (le32_to_cpu(sup->min_io_size) != c->min_io_size) { - ubifs_err("min. I/O unit mismatch: %d in superblock, %d real", + ubifs_err(c, "min. I/O unit mismatch: %d in superblock, %d real", le32_to_cpu(sup->min_io_size), c->min_io_size); goto failed; } if (le32_to_cpu(sup->leb_size) != c->leb_size) { - ubifs_err("LEB size mismatch: %d in superblock, %d real", + ubifs_err(c, "LEB size mismatch: %d in superblock, %d real", le32_to_cpu(sup->leb_size), c->leb_size); goto failed; } @@ -393,33 +393,33 @@ static int validate_sb(struct ubifs_info *c, struct ubifs_sb_node *sup) min_leb_cnt += c->lpt_lebs + c->orph_lebs + c->jhead_cnt + 6; if (c->leb_cnt < min_leb_cnt || c->leb_cnt > c->vi.size) { - ubifs_err("bad LEB count: %d in superblock, %d on UBI volume, %d minimum required", + ubifs_err(c, "bad LEB count: %d in superblock, %d on UBI volume, %d minimum required", c->leb_cnt, c->vi.size, min_leb_cnt); goto failed; } if (c->max_leb_cnt < c->leb_cnt) { - ubifs_err("max. LEB count %d less than LEB count %d", + ubifs_err(c, "max. LEB count %d less than LEB count %d", c->max_leb_cnt, c->leb_cnt); goto failed; } if (c->main_lebs < UBIFS_MIN_MAIN_LEBS) { - ubifs_err("too few main LEBs count %d, must be at least %d", + ubifs_err(c, "too few main LEBs count %d, must be at least %d", c->main_lebs, UBIFS_MIN_MAIN_LEBS); goto failed; } max_bytes = (long long)c->leb_size * UBIFS_MIN_BUD_LEBS; if (c->max_bud_bytes < max_bytes) { - ubifs_err("too small journal (%lld bytes), must be at least %lld bytes", + ubifs_err(c, "too small journal (%lld bytes), must be at least %lld bytes", c->max_bud_bytes, max_bytes); goto failed; } max_bytes = (long long)c->leb_size * c->main_lebs; if (c->max_bud_bytes > max_bytes) { - ubifs_err("too large journal size (%lld bytes), only %lld bytes available in the main area", + ubifs_err(c, "too large journal size (%lld bytes), only %lld bytes available in the main area", c->max_bud_bytes, max_bytes); goto failed; } @@ -468,7 +468,7 @@ static int validate_sb(struct ubifs_info *c, struct ubifs_sb_node *sup) return 0; failed: - ubifs_err("bad superblock, error %d", err); + ubifs_err(c, "bad superblock, error %d", err); ubifs_dump_node(c, sup); return -EINVAL; } @@ -549,12 +549,12 @@ int ubifs_read_superblock(struct ubifs_info *c) ubifs_assert(!c->ro_media || c->ro_mount); if (!c->ro_mount || c->ro_compat_version > UBIFS_RO_COMPAT_VERSION) { - ubifs_err("on-flash format version is w%d/r%d, but software only supports up to version w%d/r%d", + ubifs_err(c, "on-flash format version is w%d/r%d, but software only supports up to version w%d/r%d", c->fmt_version, c->ro_compat_version, UBIFS_FORMAT_VERSION, UBIFS_RO_COMPAT_VERSION); if (c->ro_compat_version <= UBIFS_RO_COMPAT_VERSION) { - ubifs_msg("only R/O mounting is possible"); + ubifs_msg(c, "only R/O mounting is possible"); err = -EROFS; } else err = -EINVAL; @@ -570,7 +570,7 @@ int ubifs_read_superblock(struct ubifs_info *c) } if (c->fmt_version < 3) { - ubifs_err("on-flash format version %d is not supported", + ubifs_err(c, "on-flash format version %d is not supported", c->fmt_version); err = -EINVAL; goto out; @@ -595,7 +595,7 @@ int ubifs_read_superblock(struct ubifs_info *c) c->key_len = UBIFS_SK_LEN; break; default: - ubifs_err("unsupported key format"); + ubifs_err(c, "unsupported key format"); err = -EINVAL; goto out; } @@ -785,7 +785,7 @@ int ubifs_fixup_free_space(struct ubifs_info *c) ubifs_assert(c->space_fixup); ubifs_assert(!c->ro_mount); - ubifs_msg("start fixing up free space"); + ubifs_msg(c, "start fixing up free space"); err = fixup_free_space(c); if (err) @@ -804,6 +804,6 @@ int ubifs_fixup_free_space(struct ubifs_info *c) if (err) return err; - ubifs_msg("free space fixup complete"); + ubifs_msg(c, "free space fixup complete"); return err; } diff --git a/fs/ubifs/scan.c b/fs/ubifs/scan.c index 89adbc4..aab8734 100644 --- a/fs/ubifs/scan.c +++ b/fs/ubifs/scan.c @@ -100,7 +100,7 @@ int ubifs_scan_a_node(const struct ubifs_info *c, void *buf, int len, int lnum, if (pad_len < 0 || offs + node_len + pad_len > c->leb_size) { if (!quiet) { - ubifs_err("bad pad node at LEB %d:%d", + ubifs_err(c, "bad pad node at LEB %d:%d", lnum, offs); ubifs_dump_node(c, pad); } @@ -110,7 +110,7 @@ int ubifs_scan_a_node(const struct ubifs_info *c, void *buf, int len, int lnum, /* Make the node pads to 8-byte boundary */ if ((node_len + pad_len) & 7) { if (!quiet) - ubifs_err("bad padding length %d - %d", + ubifs_err(c, "bad padding length %d - %d", offs, offs + node_len + pad_len); return SCANNED_A_BAD_PAD_NODE; } @@ -152,7 +152,7 @@ struct ubifs_scan_leb *ubifs_start_scan(const struct ubifs_info *c, int lnum, err = ubifs_leb_read(c, lnum, sbuf + offs, offs, c->leb_size - offs, 0); if (err && err != -EBADMSG) { - ubifs_err("cannot read %d bytes from LEB %d:%d, error %d", + ubifs_err(c, "cannot read %d bytes from LEB %d:%d, error %d", c->leb_size - offs, lnum, offs, err); kfree(sleb); return ERR_PTR(err); @@ -240,11 +240,11 @@ void ubifs_scanned_corruption(const struct ubifs_info *c, int lnum, int offs, { int len; - ubifs_err("corruption at LEB %d:%d", lnum, offs); + ubifs_err(c, "corruption at LEB %d:%d", lnum, offs); len = c->leb_size - offs; if (len > 8192) len = 8192; - ubifs_err("first %d bytes from LEB %d:%d", len, lnum, offs); + ubifs_err(c, "first %d bytes from LEB %d:%d", len, lnum, offs); print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 4, buf, len, 1); } @@ -299,16 +299,16 @@ struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum, switch (ret) { case SCANNED_GARBAGE: - ubifs_err("garbage"); + ubifs_err(c, "garbage"); goto corrupted; case SCANNED_A_NODE: break; case SCANNED_A_CORRUPT_NODE: case SCANNED_A_BAD_PAD_NODE: - ubifs_err("bad node"); + ubifs_err(c, "bad node"); goto corrupted; default: - ubifs_err("unknown"); + ubifs_err(c, "unknown"); err = -EINVAL; goto error; } @@ -325,7 +325,7 @@ struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum, if (offs % c->min_io_size) { if (!quiet) - ubifs_err("empty space starts at non-aligned offset %d", + ubifs_err(c, "empty space starts at non-aligned offset %d", offs); goto corrupted; } @@ -338,7 +338,7 @@ struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum, for (; len; offs++, buf++, len--) if (*(uint8_t *)buf != 0xff) { if (!quiet) - ubifs_err("corrupt empty space at LEB %d:%d", + ubifs_err(c, "corrupt empty space at LEB %d:%d", lnum, offs); goto corrupted; } @@ -348,14 +348,14 @@ struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum, corrupted: if (!quiet) { ubifs_scanned_corruption(c, lnum, offs, buf); - ubifs_err("LEB %d scanning failed", lnum); + ubifs_err(c, "LEB %d scanning failed", lnum); } err = -EUCLEAN; ubifs_scan_destroy(sleb); return ERR_PTR(err); error: - ubifs_err("LEB %d scanning failed, error %d", lnum, err); + ubifs_err(c, "LEB %d scanning failed, error %d", lnum, err); ubifs_scan_destroy(sleb); return ERR_PTR(err); } diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 93e9465..75e6f04 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -70,13 +70,13 @@ static int validate_inode(struct ubifs_info *c, const struct inode *inode) const struct ubifs_inode *ui = ubifs_inode(inode); if (inode->i_size > c->max_inode_sz) { - ubifs_err("inode is too large (%lld)", + ubifs_err(c, "inode is too large (%lld)", (long long)inode->i_size); return 1; } if (ui->compr_type >= UBIFS_COMPR_TYPES_CNT) { - ubifs_err("unknown compression type %d", ui->compr_type); + ubifs_err(c, "unknown compression type %d", ui->compr_type); return 2; } @@ -90,7 +90,7 @@ static int validate_inode(struct ubifs_info *c, const struct inode *inode) return 5; if (!ubifs_compr_present(ui->compr_type)) { - ubifs_warn("inode %lu uses '%s' compression, but it was not compiled in", + ubifs_warn(c, "inode %lu uses '%s' compression, but it was not compiled in", inode->i_ino, ubifs_compr_name(ui->compr_type)); } @@ -242,14 +242,14 @@ struct inode *ubifs_iget(struct super_block *sb, unsigned long inum) return inode; out_invalid: - ubifs_err("inode %lu validation failed, error %d", inode->i_ino, err); + ubifs_err(c, "inode %lu validation failed, error %d", inode->i_ino, err); ubifs_dump_node(c, ino); ubifs_dump_inode(c, inode); err = -EINVAL; out_ino: kfree(ino); out: - ubifs_err("failed to read inode %lu, error %d", inode->i_ino, err); + ubifs_err(c, "failed to read inode %lu, error %d", inode->i_ino, err); iget_failed(inode); return ERR_PTR(err); } @@ -319,7 +319,7 @@ static int ubifs_write_inode(struct inode *inode, struct writeback_control *wbc) if (inode->i_nlink) { err = ubifs_jnl_write_inode(c, inode); if (err) - ubifs_err("can't write inode %lu, error %d", + ubifs_err(c, "can't write inode %lu, error %d", inode->i_ino, err); else err = dbg_check_inode_size(c, inode, ui->ui_size); @@ -363,7 +363,7 @@ static void ubifs_evict_inode(struct inode *inode) * Worst case we have a lost orphan inode wasting space, so a * simple error message is OK here. */ - ubifs_err("can't delete inode %lu, error %d", + ubifs_err(c, "can't delete inode %lu, error %d", inode->i_ino, err); out: @@ -492,17 +492,17 @@ static int ubifs_sync_fs(struct super_block *sb, int wait) static int init_constants_early(struct ubifs_info *c) { if (c->vi.corrupted) { - ubifs_warn("UBI volume is corrupted - read-only mode"); + ubifs_warn(c, "UBI volume is corrupted - read-only mode"); c->ro_media = 1; } if (c->di.ro_mode) { - ubifs_msg("read-only UBI device"); + ubifs_msg(c, "read-only UBI device"); c->ro_media = 1; } if (c->vi.vol_type == UBI_STATIC_VOLUME) { - ubifs_msg("static UBI volume - read-only mode"); + ubifs_msg(c, "static UBI volume - read-only mode"); c->ro_media = 1; } @@ -516,19 +516,19 @@ static int init_constants_early(struct ubifs_info *c) c->max_write_shift = fls(c->max_write_size) - 1; if (c->leb_size < UBIFS_MIN_LEB_SZ) { - ubifs_err("too small LEBs (%d bytes), min. is %d bytes", + ubifs_err(c, "too small LEBs (%d bytes), min. is %d bytes", c->leb_size, UBIFS_MIN_LEB_SZ); return -EINVAL; } if (c->leb_cnt < UBIFS_MIN_LEB_CNT) { - ubifs_err("too few LEBs (%d), min. is %d", + ubifs_err(c, "too few LEBs (%d), min. is %d", c->leb_cnt, UBIFS_MIN_LEB_CNT); return -EINVAL; } if (!is_power_of_2(c->min_io_size)) { - ubifs_err("bad min. I/O size %d", c->min_io_size); + ubifs_err(c, "bad min. I/O size %d", c->min_io_size); return -EINVAL; } @@ -539,7 +539,7 @@ static int init_constants_early(struct ubifs_info *c) if (c->max_write_size < c->min_io_size || c->max_write_size % c->min_io_size || !is_power_of_2(c->max_write_size)) { - ubifs_err("bad write buffer size %d for %d min. I/O unit", + ubifs_err(c, "bad write buffer size %d for %d min. I/O unit", c->max_write_size, c->min_io_size); return -EINVAL; } @@ -665,7 +665,7 @@ static int init_constants_sb(struct ubifs_info *c) tmp = UBIFS_CS_NODE_SZ + UBIFS_REF_NODE_SZ * c->jhead_cnt; tmp = ALIGN(tmp, c->min_io_size); if (tmp > c->leb_size) { - ubifs_err("too small LEB size %d, at least %d needed", + ubifs_err(c, "too small LEB size %d, at least %d needed", c->leb_size, tmp); return -EINVAL; } @@ -680,7 +680,7 @@ static int init_constants_sb(struct ubifs_info *c) tmp /= c->leb_size; tmp += 1; if (c->log_lebs < tmp) { - ubifs_err("too small log %d LEBs, required min. %d LEBs", + ubifs_err(c, "too small log %d LEBs, required min. %d LEBs", c->log_lebs, tmp); return -EINVAL; } @@ -772,7 +772,7 @@ static int take_gc_lnum(struct ubifs_info *c) int err; if (c->gc_lnum == -1) { - ubifs_err("no LEB for GC"); + ubifs_err(c, "no LEB for GC"); return -EINVAL; } @@ -857,7 +857,7 @@ static void free_orphans(struct ubifs_info *c) orph = list_entry(c->orph_list.next, struct ubifs_orphan, list); list_del(&orph->list); kfree(orph); - ubifs_err("orphan list not empty at unmount"); + ubifs_err(c, "orphan list not empty at unmount"); } vfree(c->orph_buf); @@ -954,7 +954,8 @@ static const match_table_t tokens = { */ static int parse_standard_option(const char *option) { - ubifs_msg("parse %s", option); + + pr_notice("UBIFS: parse %s\n", option); if (!strcmp(option, "sync")) return MS_SYNCHRONOUS; return 0; @@ -1026,7 +1027,7 @@ static int ubifs_parse_options(struct ubifs_info *c, char *options, else if (!strcmp(name, "zlib")) c->mount_opts.compr_type = UBIFS_COMPR_ZLIB; else { - ubifs_err("unknown compressor \"%s\"", name); + ubifs_err(c, "unknown compressor \"%s\"", name); //FIXME: is c ready? kfree(name); return -EINVAL; } @@ -1042,7 +1043,7 @@ static int ubifs_parse_options(struct ubifs_info *c, char *options, flag = parse_standard_option(p); if (!flag) { - ubifs_err("unrecognized mount option \"%s\" or missing value", + ubifs_err(c, "unrecognized mount option \"%s\" or missing value", p); return -EINVAL; } @@ -1105,7 +1106,7 @@ again: } /* Just disable bulk-read */ - ubifs_warn("cannot allocate %d bytes of memory for bulk-read, disabling it", + ubifs_warn(c, "cannot allocate %d bytes of memory for bulk-read, disabling it", c->max_bu_buf_len); c->mount_opts.bulk_read = 1; c->bulk_read = 0; @@ -1124,7 +1125,7 @@ static int check_free_space(struct ubifs_info *c) { ubifs_assert(c->dark_wm > 0); if (c->lst.total_free + c->lst.total_dirty < c->dark_wm) { - ubifs_err("insufficient free space to mount in R/W mode"); + ubifs_err(c, "insufficient free space to mount in R/W mode"); ubifs_dump_budg(c, &c->bi); ubifs_dump_lprops(c); return -ENOSPC; @@ -1166,14 +1167,14 @@ static int mount_ubifs(struct ubifs_info *c) * This UBI volume is empty, and read-only, or the file system * is mounted read-only - we cannot format it. */ - ubifs_err("can't format empty UBI volume: read-only %s", + ubifs_err(c, "can't format empty UBI volume: read-only %s", c->ro_media ? "UBI volume" : "mount"); err = -EROFS; goto out_free; } if (c->ro_media && !c->ro_mount) { - ubifs_err("cannot mount read-write - read-only media"); + ubifs_err(c, "cannot mount read-write - read-only media"); err = -EROFS; goto out_free; } @@ -1221,7 +1222,7 @@ static int mount_ubifs(struct ubifs_info *c) * or overridden by mount options is actually compiled in. */ if (!ubifs_compr_present(c->default_compr)) { - ubifs_err("'compressor \"%s\" is not compiled in", + ubifs_err(c, "'compressor \"%s\" is not compiled in", ubifs_compr_name(c->default_compr)); err = -ENOTSUPP; goto out_free; @@ -1250,7 +1251,7 @@ static int mount_ubifs(struct ubifs_info *c) if (IS_ERR(c->bgt)) { err = PTR_ERR(c->bgt); c->bgt = NULL; - ubifs_err("cannot spawn \"%s\", error %d", + ubifs_err(c, "cannot spawn \"%s\", error %d", c->bgt_name, err); goto out_wbufs; } @@ -1264,7 +1265,7 @@ static int mount_ubifs(struct ubifs_info *c) init_constants_master(c); if ((c->mst_node->flags & cpu_to_le32(UBIFS_MST_DIRTY)) != 0) { - ubifs_msg("recovery needed"); + ubifs_msg(c, "recovery needed"); c->need_recovery = 1; } @@ -1284,7 +1285,7 @@ static int mount_ubifs(struct ubifs_info *c) goto out_lpt; } - if (!c->ro_mount) { + if (!c->ro_mount && !c->need_recovery) { /* * Set the "dirty" flag so that if we reboot uncleanly we * will notice this immediately on the next mount. @@ -1373,10 +1374,10 @@ static int mount_ubifs(struct ubifs_info *c) if (c->need_recovery) { if (c->ro_mount) - ubifs_msg("recovery deferred"); + ubifs_msg(c, "recovery deferred"); else { c->need_recovery = 0; - ubifs_msg("recovery completed"); + ubifs_msg(c, "recovery completed"); /* * GC LEB has to be empty and taken at this point. But * the journal head LEBs may also be accounted as @@ -1397,20 +1398,20 @@ static int mount_ubifs(struct ubifs_info *c) c->mounting = 0; - ubifs_msg("mounted UBI device %d, volume %d, name \"%s\"%s", + ubifs_msg(c, "UBIFS: mounted UBI device %d, volume %d, name \"%s\"%s", c->vi.ubi_num, c->vi.vol_id, c->vi.name, c->ro_mount ? ", R/O mode" : ""); x = (long long)c->main_lebs * c->leb_size; y = (long long)c->log_lebs * c->leb_size + c->max_bud_bytes; - ubifs_msg("LEB size: %d bytes (%d KiB), min./max. I/O unit sizes: %d bytes/%d bytes", + ubifs_msg(c, "LEB size: %d bytes (%d KiB), min./max. I/O unit sizes: %d bytes/%d bytes", c->leb_size, c->leb_size >> 10, c->min_io_size, c->max_write_size); - ubifs_msg("FS size: %lld bytes (%lld MiB, %d LEBs), journal size %lld bytes (%lld MiB, %d LEBs)", + ubifs_msg(c, "FS size: %lld bytes (%lld MiB, %d LEBs), journal size %lld bytes (%lld MiB, %d LEBs)", x, x >> 20, c->main_lebs, y, y >> 20, c->log_lebs + c->max_bud_cnt); - ubifs_msg("reserved for root: %llu bytes (%llu KiB)", + ubifs_msg(c, "reserved for root: %llu bytes (%llu KiB)", c->report_rp_size, c->report_rp_size >> 10); - ubifs_msg("media format: w%d/r%d (latest is w%d/r%d), UUID %pUB%s", + ubifs_msg(c, "media format: w%d/r%d (latest is w%d/r%d), UUID %pUB%s", c->fmt_version, c->ro_compat_version, UBIFS_FORMAT_VERSION, UBIFS_RO_COMPAT_VERSION, c->uuid, c->big_lpt ? ", big LPT model" : ", small LPT model"); @@ -1543,8 +1544,8 @@ static int ubifs_remount_rw(struct ubifs_info *c) int err, lnum; if (c->rw_incompat) { - ubifs_err("the file-system is not R/W-compatible"); - ubifs_msg("on-flash format version is w%d/r%d, but software only supports up to version w%d/r%d", + ubifs_err(c, "the file-system is not R/W-compatible"); + ubifs_msg(c, "on-flash format version is w%d/r%d, but software only supports up to version w%d/r%d", c->fmt_version, c->ro_compat_version, UBIFS_FORMAT_VERSION, UBIFS_RO_COMPAT_VERSION); return -EROFS; @@ -1581,7 +1582,7 @@ static int ubifs_remount_rw(struct ubifs_info *c) } if (c->need_recovery) { - ubifs_msg("completing deferred recovery"); + ubifs_msg(c, "completing deferred recovery"); err = ubifs_write_rcvrd_mst_node(c); if (err) goto out; @@ -1630,7 +1631,7 @@ static int ubifs_remount_rw(struct ubifs_info *c) if (IS_ERR(c->bgt)) { err = PTR_ERR(c->bgt); c->bgt = NULL; - ubifs_err("cannot spawn \"%s\", error %d", + ubifs_err(c, "cannot spawn \"%s\", error %d", c->bgt_name, err); goto out; } @@ -1664,7 +1665,7 @@ static int ubifs_remount_rw(struct ubifs_info *c) if (c->need_recovery) { c->need_recovery = 0; - ubifs_msg("deferred recovery completed"); + ubifs_msg(c, "deferred recovery completed"); } else { /* * Do not run the debugging space check if the were doing @@ -1752,8 +1753,7 @@ static void ubifs_put_super(struct super_block *sb) int i; struct ubifs_info *c = sb->s_fs_info; - ubifs_msg("un-mount UBI device %d, volume %d", c->vi.ubi_num, - c->vi.vol_id); + ubifs_msg(c, "un-mount UBI device %d", c->vi.ubi_num); /* * The following asserts are only valid if there has not been a failure @@ -1809,7 +1809,7 @@ static void ubifs_put_super(struct super_block *sb) * next mount, so we just print a message and * continue to unmount normally. */ - ubifs_err("failed to write master node, error %d", + ubifs_err(c, "failed to write master node, error %d", err); } else { for (i = 0; i < c->jhead_cnt; i++) @@ -1834,17 +1834,17 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data) err = ubifs_parse_options(c, data, 1); if (err) { - ubifs_err("invalid or unknown remount parameter"); + ubifs_err(c, "invalid or unknown remount parameter"); return err; } if (c->ro_mount && !(*flags & MS_RDONLY)) { if (c->ro_error) { - ubifs_msg("cannot re-mount R/W due to prior errors"); + ubifs_msg(c, "cannot re-mount R/W due to prior errors"); return -EROFS; } if (c->ro_media) { - ubifs_msg("cannot re-mount R/W - UBI volume is R/O"); + ubifs_msg(c, "cannot re-mount R/W - UBI volume is R/O"); return -EROFS; } err = ubifs_remount_rw(c); @@ -1852,7 +1852,7 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data) return err; } else if (!c->ro_mount && (*flags & MS_RDONLY)) { if (c->ro_error) { - ubifs_msg("cannot re-mount R/O due to prior errors"); + ubifs_msg(c, "cannot re-mount R/O due to prior errors"); return -EROFS; } ubifs_remount_ro(c); @@ -2104,8 +2104,8 @@ static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags, */ ubi = open_ubi(name, UBI_READONLY); if (IS_ERR(ubi)) { - ubifs_err("cannot open \"%s\", error %d", - name, (int)PTR_ERR(ubi)); + pr_err("UBIFS error (pid: %d): cannot open \"%s\", error %d", + current->pid, name, (int)PTR_ERR(ubi)); return ERR_CAST(ubi); } @@ -2233,8 +2233,8 @@ static int __init ubifs_init(void) * UBIFS_BLOCK_SIZE. It is assumed that both are powers of 2. */ if (PAGE_CACHE_SIZE < UBIFS_BLOCK_SIZE) { - ubifs_err("VFS page cache size is %u bytes, but UBIFS requires at least 4096 bytes", - (unsigned int)PAGE_CACHE_SIZE); + pr_err("UBIFS error (pid %d): VFS page cache size is %u bytes, but UBIFS requires at least 4096 bytes", + current->pid, (unsigned int)PAGE_CACHE_SIZE); return -EINVAL; } @@ -2257,7 +2257,8 @@ static int __init ubifs_init(void) err = register_filesystem(&ubifs_fs_type); if (err) { - ubifs_err("cannot register file system, error %d", err); + pr_err("UBIFS error (pid %d): cannot register file system, error %d", + current->pid, err); goto out_dbg; } return 0; diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c index 6793db0..957f575 100644 --- a/fs/ubifs/tnc.c +++ b/fs/ubifs/tnc.c @@ -98,7 +98,7 @@ static int insert_old_idx(struct ubifs_info *c, int lnum, int offs) else if (offs > o->offs) p = &(*p)->rb_right; else { - ubifs_err("old idx added twice!"); + ubifs_err(c, "old idx added twice!"); kfree(old_idx); return 0; } @@ -447,7 +447,7 @@ static int try_read_node(const struct ubifs_info *c, void *buf, int type, err = ubifs_leb_read(c, lnum, buf, offs, len, 1); if (err) { - ubifs_err("cannot read node type %d from LEB %d:%d, error %d", + ubifs_err(c, "cannot read node type %d from LEB %d:%d, error %d", type, lnum, offs, err); return err; } @@ -1684,27 +1684,27 @@ static int validate_data_node(struct ubifs_info *c, void *buf, int err, len; if (ch->node_type != UBIFS_DATA_NODE) { - ubifs_err("bad node type (%d but expected %d)", + ubifs_err(c, "bad node type (%d but expected %d)", ch->node_type, UBIFS_DATA_NODE); goto out_err; } err = ubifs_check_node(c, buf, zbr->lnum, zbr->offs, 0, 0); if (err) { - ubifs_err("expected node type %d", UBIFS_DATA_NODE); + ubifs_err(c, "expected node type %d", UBIFS_DATA_NODE); goto out; } len = le32_to_cpu(ch->len); if (len != zbr->len) { - ubifs_err("bad node length %d, expected %d", len, zbr->len); + ubifs_err(c, "bad node length %d, expected %d", len, zbr->len); goto out_err; } /* Make sure the key of the read node is correct */ key_read(c, buf + UBIFS_KEY_OFFSET, &key1); if (!keys_eq(c, &zbr->key, &key1)) { - ubifs_err("bad key in node at LEB %d:%d", + ubifs_err(c, "bad key in node at LEB %d:%d", zbr->lnum, zbr->offs); dbg_tnck(&zbr->key, "looked for key "); dbg_tnck(&key1, "found node's key "); @@ -1716,7 +1716,7 @@ static int validate_data_node(struct ubifs_info *c, void *buf, out_err: err = -EINVAL; out: - ubifs_err("bad node at LEB %d:%d", zbr->lnum, zbr->offs); + ubifs_err(c, "bad node at LEB %d:%d", zbr->lnum, zbr->offs); ubifs_dump_node(c, buf); dump_stack(); return err; @@ -1741,7 +1741,7 @@ int ubifs_tnc_bulk_read(struct ubifs_info *c, struct bu_info *bu) len = bu->zbranch[bu->cnt - 1].offs; len += bu->zbranch[bu->cnt - 1].len - offs; if (len > bu->buf_len) { - ubifs_err("buffer too small %d vs %d", bu->buf_len, len); + ubifs_err(c, "buffer too small %d vs %d", bu->buf_len, len); return -EINVAL; } @@ -1757,7 +1757,7 @@ int ubifs_tnc_bulk_read(struct ubifs_info *c, struct bu_info *bu) return -EAGAIN; if (err && err != -EBADMSG) { - ubifs_err("failed to read from LEB %d:%d, error %d", + ubifs_err(c, "failed to read from LEB %d:%d, error %d", lnum, offs, err); dump_stack(); dbg_tnck(&bu->key, "key "); @@ -3313,7 +3313,7 @@ int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode, out_dump: block = key_block(c, key); - ubifs_err("inode %lu has size %lld, but there are data at offset %lld", + ubifs_err(c, "inode %lu has size %lld, but there are data at offset %lld", (unsigned long)inode->i_ino, size, ((loff_t)block) << UBIFS_BLOCK_SHIFT); mutex_unlock(&c->tnc_mutex); diff --git a/fs/ubifs/tnc_commit.c b/fs/ubifs/tnc_commit.c index 7a205e0..b45345d 100644 --- a/fs/ubifs/tnc_commit.c +++ b/fs/ubifs/tnc_commit.c @@ -53,7 +53,7 @@ static int make_idx_node(struct ubifs_info *c, struct ubifs_idx_node *idx, br->offs = cpu_to_le32(zbr->offs); br->len = cpu_to_le32(zbr->len); if (!zbr->lnum || !zbr->len) { - ubifs_err("bad ref in znode"); + ubifs_err(c, "bad ref in znode"); ubifs_dump_znode(c, znode); if (zbr->znode) ubifs_dump_znode(c, zbr->znode); @@ -384,7 +384,7 @@ static int layout_in_gaps(struct ubifs_info *c, int cnt) * Do not print scary warnings if the debugging * option which forces in-the-gaps is enabled. */ - ubifs_warn("out of space"); + ubifs_warn(c, "out of space"); ubifs_dump_budg(c, &c->bi); ubifs_dump_lprops(c); } @@ -441,7 +441,7 @@ static int layout_in_empty_space(struct ubifs_info *c) /* Determine the index node position */ if (lnum == -1) { if (c->ileb_nxt >= c->ileb_cnt) { - ubifs_err("out of space"); + ubifs_err(c, "out of space"); return -ENOSPC; } lnum = c->ilebs[c->ileb_nxt++]; @@ -855,7 +855,7 @@ static int write_index(struct ubifs_info *c) br->offs = cpu_to_le32(zbr->offs); br->len = cpu_to_le32(zbr->len); if (!zbr->lnum || !zbr->len) { - ubifs_err("bad ref in znode"); + ubifs_err(c, "bad ref in znode"); ubifs_dump_znode(c, znode); if (zbr->znode) ubifs_dump_znode(c, zbr->znode); @@ -875,7 +875,7 @@ static int write_index(struct ubifs_info *c) if (lnum != znode->lnum || offs != znode->offs || len != znode->len) { - ubifs_err("inconsistent znode posn"); + ubifs_err(c, "inconsistent znode posn"); return -EINVAL; } @@ -973,7 +973,7 @@ static int write_index(struct ubifs_info *c) if (lnum != c->dbg->new_ihead_lnum || buf_offs != c->dbg->new_ihead_offs) { - ubifs_err("inconsistent ihead"); + ubifs_err(c, "inconsistent ihead"); return -EINVAL; } diff --git a/fs/ubifs/tnc_misc.c b/fs/ubifs/tnc_misc.c index f6bf899..93f5b78 100644 --- a/fs/ubifs/tnc_misc.c +++ b/fs/ubifs/tnc_misc.c @@ -293,9 +293,9 @@ static int read_znode(struct ubifs_info *c, int lnum, int offs, int len, lnum, offs, znode->level, znode->child_cnt); if (znode->child_cnt > c->fanout || znode->level > UBIFS_MAX_LEVELS) { - ubifs_err("current fanout %d, branch count %d", + ubifs_err(c, "current fanout %d, branch count %d", c->fanout, znode->child_cnt); - ubifs_err("max levels %d, znode level %d", + ubifs_err(c, "max levels %d, znode level %d", UBIFS_MAX_LEVELS, znode->level); err = 1; goto out_dump; @@ -316,7 +316,7 @@ static int read_znode(struct ubifs_info *c, int lnum, int offs, int len, if (zbr->lnum < c->main_first || zbr->lnum >= c->leb_cnt || zbr->offs < 0 || zbr->offs + zbr->len > c->leb_size || zbr->offs & 7) { - ubifs_err("bad branch %d", i); + ubifs_err(c, "bad branch %d", i); err = 2; goto out_dump; } @@ -328,7 +328,7 @@ static int read_znode(struct ubifs_info *c, int lnum, int offs, int len, case UBIFS_XENT_KEY: break; default: - ubifs_err("bad key type at slot %d: %d", + ubifs_err(c, "bad key type at slot %d: %d", i, key_type(c, &zbr->key)); err = 3; goto out_dump; @@ -340,17 +340,17 @@ static int read_znode(struct ubifs_info *c, int lnum, int offs, int len, type = key_type(c, &zbr->key); if (c->ranges[type].max_len == 0) { if (zbr->len != c->ranges[type].len) { - ubifs_err("bad target node (type %d) length (%d)", + ubifs_err(c, "bad target node (type %d) length (%d)", type, zbr->len); - ubifs_err("have to be %d", c->ranges[type].len); + ubifs_err(c, "have to be %d", c->ranges[type].len); err = 4; goto out_dump; } } else if (zbr->len < c->ranges[type].min_len || zbr->len > c->ranges[type].max_len) { - ubifs_err("bad target node (type %d) length (%d)", + ubifs_err(c, "bad target node (type %d) length (%d)", type, zbr->len); - ubifs_err("have to be in range of %d-%d", + ubifs_err(c, "have to be in range of %d-%d", c->ranges[type].min_len, c->ranges[type].max_len); err = 5; @@ -370,12 +370,12 @@ static int read_znode(struct ubifs_info *c, int lnum, int offs, int len, cmp = keys_cmp(c, key1, key2); if (cmp > 0) { - ubifs_err("bad key order (keys %d and %d)", i, i + 1); + ubifs_err(c, "bad key order (keys %d and %d)", i, i + 1); err = 6; goto out_dump; } else if (cmp == 0 && !is_hash_key(c, key1)) { /* These can only be keys with colliding hash */ - ubifs_err("keys %d and %d are not hashed but equivalent", + ubifs_err(c, "keys %d and %d are not hashed but equivalent", i, i + 1); err = 7; goto out_dump; @@ -386,7 +386,7 @@ static int read_znode(struct ubifs_info *c, int lnum, int offs, int len, return 0; out_dump: - ubifs_err("bad indexing node at LEB %d:%d, error %d", lnum, offs, err); + ubifs_err(c, "bad indexing node at LEB %d:%d, error %d", lnum, offs, err); ubifs_dump_node(c, idx); kfree(idx); return -EINVAL; @@ -482,7 +482,7 @@ int ubifs_tnc_read_node(struct ubifs_info *c, struct ubifs_zbranch *zbr, /* Make sure the key of the read node is correct */ key_read(c, node + UBIFS_KEY_OFFSET, &key1); if (!keys_eq(c, key, &key1)) { - ubifs_err("bad key in node at LEB %d:%d", + ubifs_err(c, "bad key in node at LEB %d:%d", zbr->lnum, zbr->offs); dbg_tnck(key, "looked for key "); dbg_tnck(&key1, "but found node's key "); diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index bc04b9c..de75902 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -43,15 +43,19 @@ #define UBIFS_VERSION 1 /* Normal UBIFS messages */ -#define ubifs_msg(fmt, ...) pr_notice("UBIFS: " fmt "\n", ##__VA_ARGS__) +#define ubifs_msg(c, fmt, ...) \ + pr_notice("UBIFS (ubi%d:%d): " fmt "\n", \ + (c)->vi.ubi_num, (c)->vi.vol_id, ##__VA_ARGS__) /* UBIFS error messages */ -#define ubifs_err(fmt, ...) \ - pr_err("UBIFS error (pid %d): %s: " fmt "\n", current->pid, \ +#define ubifs_err(c, fmt, ...) \ + pr_err("UBIFS error (ubi%d:%d pid %d): %s: " fmt "\n", \ + (c)->vi.ubi_num, (c)->vi.vol_id, current->pid, \ __func__, ##__VA_ARGS__) /* UBIFS warning messages */ -#define ubifs_warn(fmt, ...) \ - pr_warn("UBIFS warning (pid %d): %s: " fmt "\n", \ - current->pid, __func__, ##__VA_ARGS__) +#define ubifs_warn(c, fmt, ...) \ + pr_warn("UBIFS warning (ubi%d:%d pid %d): %s: " fmt "\n", \ + (c)->vi.ubi_num, (c)->vi.vol_id, current->pid, \ + __func__, ##__VA_ARGS__) /* * A variant of 'ubifs_err()' which takes the UBIFS file-sytem description * object as an argument. @@ -59,7 +63,7 @@ #define ubifs_errc(c, fmt, ...) \ do { \ if (!(c)->probing) \ - ubifs_err(fmt, ##__VA_ARGS__); \ + ubifs_err(c, fmt, ##__VA_ARGS__); \ } while (0) /* UBIFS file system VFS magic number */ @@ -158,7 +162,7 @@ #define WORST_COMPR_FACTOR 2 /* - * How much memory is needed for a buffer where we comress a data node. + * How much memory is needed for a buffer where we compress a data node. */ #define COMPRESSED_DATA_NODE_BUF_SZ \ (UBIFS_DATA_NODE_SZ + UBIFS_BLOCK_SIZE * WORST_COMPR_FACTOR) @@ -664,7 +668,7 @@ typedef int (*ubifs_lpt_scan_callback)(struct ubifs_info *c, * @lock: serializes @buf, @lnum, @offs, @avail, @used, @next_ino and @inodes * fields * @softlimit: soft write-buffer timeout interval - * @delta: hard and soft timeouts delta (the timer expire inteval is @softlimit + * @delta: hard and soft timeouts delta (the timer expire interval is @softlimit * and @softlimit + @delta) * @timer: write-buffer timer * @no_timer: non-zero if this write-buffer does not have a timer @@ -930,9 +934,9 @@ struct ubifs_orphan { /** * struct ubifs_mount_opts - UBIFS-specific mount options information. * @unmount_mode: selected unmount mode (%0 default, %1 normal, %2 fast) - * @bulk_read: enable/disable bulk-reads (%0 default, %1 disabe, %2 enable) + * @bulk_read: enable/disable bulk-reads (%0 default, %1 disable, %2 enable) * @chk_data_crc: enable/disable CRC data checking when reading data nodes - * (%0 default, %1 disabe, %2 enable) + * (%0 default, %1 disable, %2 enable) * @override_compr: override default compressor (%0 - do not override and use * superblock compressor, %1 - override and use compressor * specified in @compr_type) @@ -962,9 +966,9 @@ struct ubifs_mount_opts { * optimization) * @nospace_rp: the same as @nospace, but additionally means that even reserved * pool is full - * @page_budget: budget for a page (constant, nenver changed after mount) - * @inode_budget: budget for an inode (constant, nenver changed after mount) - * @dent_budget: budget for a directory entry (constant, nenver changed after + * @page_budget: budget for a page (constant, never changed after mount) + * @inode_budget: budget for an inode (constant, never changed after mount) + * @dent_budget: budget for a directory entry (constant, never changed after * mount) */ struct ubifs_budg_info { @@ -1787,10 +1791,10 @@ long ubifs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg); /* compressor.c */ int __init ubifs_compressors_init(void); void ubifs_compressors_exit(void); -void ubifs_compress(const void *in_buf, int in_len, void *out_buf, int *out_len, - int *compr_type); -int ubifs_decompress(const void *buf, int len, void *out, int *out_len, - int compr_type); +void ubifs_compress(const struct ubifs_info *c, const void *in_buf, int in_len, + void *out_buf, int *out_len, int *compr_type); +int ubifs_decompress(const struct ubifs_info *c, const void *buf, int len, + void *out, int *out_len, int compr_type); #include "debug.h" #include "misc.h" diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c index a92be24..96f3448 100644 --- a/fs/ubifs/xattr.c +++ b/fs/ubifs/xattr.c @@ -108,7 +108,7 @@ static int create_xattr(struct ubifs_info *c, struct inode *host, .dirtied_ino_d = ALIGN(host_ui->data_len, 8) }; if (host_ui->xattr_cnt >= MAX_XATTRS_PER_INODE) { - ubifs_err("inode %lu already has too many xattrs (%d), cannot create more", + ubifs_err(c, "inode %lu already has too many xattrs (%d), cannot create more", host->i_ino, host_ui->xattr_cnt); return -ENOSPC; } @@ -120,7 +120,7 @@ static int create_xattr(struct ubifs_info *c, struct inode *host, */ names_len = host_ui->xattr_names + host_ui->xattr_cnt + nm->len + 1; if (names_len > XATTR_LIST_MAX) { - ubifs_err("cannot add one more xattr name to inode %lu, total names length would become %d, max. is %d", + ubifs_err(c, "cannot add one more xattr name to inode %lu, total names length would become %d, max. is %d", host->i_ino, names_len, XATTR_LIST_MAX); return -ENOSPC; } @@ -288,13 +288,13 @@ static struct inode *iget_xattr(struct ubifs_info *c, ino_t inum) inode = ubifs_iget(c->vfs_sb, inum); if (IS_ERR(inode)) { - ubifs_err("dead extended attribute entry, error %d", + ubifs_err(c, "dead extended attribute entry, error %d", (int)PTR_ERR(inode)); return inode; } if (ubifs_inode(inode)->xattr) return inode; - ubifs_err("corrupt extended attribute entry"); + ubifs_err(c, "corrupt extended attribute entry"); iput(inode); return ERR_PTR(-EINVAL); } @@ -364,15 +364,15 @@ int ubifs_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { dbg_gen("xattr '%s', host ino %lu ('%pd'), size %zd", - name, dentry->d_inode->i_ino, dentry, size); + name, d_inode(dentry)->i_ino, dentry, size); - return setxattr(dentry->d_inode, name, value, size, flags); + return setxattr(d_inode(dentry), name, value, size, flags); } ssize_t ubifs_getxattr(struct dentry *dentry, const char *name, void *buf, size_t size) { - struct inode *inode, *host = dentry->d_inode; + struct inode *inode, *host = d_inode(dentry); struct ubifs_info *c = host->i_sb->s_fs_info; struct qstr nm = QSTR_INIT(name, strlen(name)); struct ubifs_inode *ui; @@ -412,7 +412,7 @@ ssize_t ubifs_getxattr(struct dentry *dentry, const char *name, void *buf, if (buf) { /* If @buf is %NULL we are supposed to return the length */ if (ui->data_len > size) { - ubifs_err("buffer size %zd, xattr len %d", + ubifs_err(c, "buffer size %zd, xattr len %d", size, ui->data_len); err = -ERANGE; goto out_iput; @@ -432,7 +432,7 @@ out_unlock: ssize_t ubifs_listxattr(struct dentry *dentry, char *buffer, size_t size) { union ubifs_key key; - struct inode *host = dentry->d_inode; + struct inode *host = d_inode(dentry); struct ubifs_info *c = host->i_sb->s_fs_info; struct ubifs_inode *host_ui = ubifs_inode(host); struct ubifs_dent_node *xent, *pxent = NULL; @@ -485,7 +485,7 @@ ssize_t ubifs_listxattr(struct dentry *dentry, char *buffer, size_t size) kfree(pxent); if (err != -ENOENT) { - ubifs_err("cannot find next direntry, error %d", err); + ubifs_err(c, "cannot find next direntry, error %d", err); return err; } @@ -535,7 +535,7 @@ out_cancel: int ubifs_removexattr(struct dentry *dentry, const char *name) { - struct inode *inode, *host = dentry->d_inode; + struct inode *inode, *host = d_inode(dentry); struct ubifs_info *c = host->i_sb->s_fs_info; struct qstr nm = QSTR_INIT(name, strlen(name)); struct ubifs_dent_node *xent; @@ -657,8 +657,10 @@ int ubifs_init_security(struct inode *dentry, struct inode *inode, &init_xattrs, 0); mutex_unlock(&inode->i_mutex); - if (err) - ubifs_err("cannot initialize security for inode %lu, error %d", + if (err) { + struct ubifs_info *c = dentry->i_sb->s_fs_info; + ubifs_err(c, "cannot initialize security for inode %lu, error %d", inode->i_ino, err); + } return err; } diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c index 1ba2baa..6d6a96b 100644 --- a/fs/udf/balloc.c +++ b/fs/udf/balloc.c @@ -21,7 +21,6 @@ #include "udfdecl.h" -#include <linux/buffer_head.h> #include <linux/bitops.h> #include "udf_i.h" @@ -63,15 +62,14 @@ static int __load_block_bitmap(struct super_block *sb, block_group, nr_groups); } - if (bitmap->s_block_bitmap[block_group]) { + if (bitmap->s_block_bitmap[block_group]) return block_group; - } else { - retval = read_block_bitmap(sb, bitmap, block_group, - block_group); - if (retval < 0) - return retval; - return block_group; - } + + retval = read_block_bitmap(sb, bitmap, block_group, block_group); + if (retval < 0) + return retval; + + return block_group; } static inline int load_block_bitmap(struct super_block *sb, @@ -358,7 +356,6 @@ static void udf_table_free_blocks(struct super_block *sb, struct kernel_lb_addr eloc; struct extent_position oepos, epos; int8_t etype; - int i; struct udf_inode_info *iinfo; mutex_lock(&sbi->s_alloc_mutex); @@ -425,7 +422,6 @@ static void udf_table_free_blocks(struct super_block *sb, } if (epos.bh != oepos.bh) { - i = -1; oepos.block = epos.block; brelse(oepos.bh); get_bh(epos.bh); @@ -762,7 +758,7 @@ inline int udf_prealloc_blocks(struct super_block *sb, uint32_t block_count) { struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition]; - sector_t allocated; + int allocated; if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) allocated = udf_bitmap_prealloc_blocks(sb, diff --git a/fs/udf/dir.c b/fs/udf/dir.c index 05e90ed..541a12b 100644 --- a/fs/udf/dir.c +++ b/fs/udf/dir.c @@ -30,7 +30,6 @@ #include <linux/errno.h> #include <linux/mm.h> #include <linux/slab.h> -#include <linux/buffer_head.h> #include "udf_i.h" #include "udf_sb.h" diff --git a/fs/udf/directory.c b/fs/udf/directory.c index 3e44f57..c763fda 100644 --- a/fs/udf/directory.c +++ b/fs/udf/directory.c @@ -16,7 +16,6 @@ #include <linux/fs.h> #include <linux/string.h> -#include <linux/buffer_head.h> struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos, struct udf_fileident_bh *fibh, diff --git a/fs/udf/file.c b/fs/udf/file.c index 08f3555..7a95b8f 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c @@ -33,8 +33,7 @@ #include <linux/capability.h> #include <linux/errno.h> #include <linux/pagemap.h> -#include <linux/buffer_head.h> -#include <linux/aio.h> +#include <linux/uio.h> #include "udf_i.h" #include "udf_sb.h" @@ -100,8 +99,7 @@ static int udf_adinicb_write_begin(struct file *file, return 0; } -static ssize_t udf_adinicb_direct_IO(int rw, struct kiocb *iocb, - struct iov_iter *iter, +static ssize_t udf_adinicb_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset) { /* Fallback to buffered I/O. */ @@ -121,21 +119,21 @@ static ssize_t udf_file_write_iter(struct kiocb *iocb, struct iov_iter *from) ssize_t retval; struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); - int err, pos; - size_t count = iocb->ki_nbytes; struct udf_inode_info *iinfo = UDF_I(inode); + int err; mutex_lock(&inode->i_mutex); + + retval = generic_write_checks(iocb, from); + if (retval <= 0) + goto out; + down_write(&iinfo->i_data_sem); if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { - if (file->f_flags & O_APPEND) - pos = inode->i_size; - else - pos = iocb->ki_pos; + loff_t end = iocb->ki_pos + iov_iter_count(from); if (inode->i_sb->s_blocksize < - (udf_file_entry_alloc_offset(inode) + - pos + count)) { + (udf_file_entry_alloc_offset(inode) + end)) { err = udf_expand_file_adinicb(inode); if (err) { mutex_unlock(&inode->i_mutex); @@ -143,16 +141,14 @@ static ssize_t udf_file_write_iter(struct kiocb *iocb, struct iov_iter *from) return err; } } else { - if (pos + count > inode->i_size) - iinfo->i_lenAlloc = pos + count; - else - iinfo->i_lenAlloc = inode->i_size; + iinfo->i_lenAlloc = max(end, inode->i_size); up_write(&iinfo->i_data_sem); } } else up_write(&iinfo->i_data_sem); retval = __generic_file_write_iter(iocb, from); +out: mutex_unlock(&inode->i_mutex); if (retval > 0) { @@ -240,12 +236,10 @@ static int udf_release_file(struct inode *inode, struct file *filp) } const struct file_operations udf_file_operations = { - .read = new_sync_read, .read_iter = generic_file_read_iter, .unlocked_ioctl = udf_ioctl, .open = generic_file_open, .mmap = generic_file_mmap, - .write = new_sync_write, .write_iter = udf_file_write_iter, .release = udf_release_file, .fsync = generic_file_fsync, @@ -255,7 +249,7 @@ const struct file_operations udf_file_operations = { static int udf_setattr(struct dentry *dentry, struct iattr *attr) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); int error; error = inode_change_ok(inode, attr); diff --git a/fs/udf/inode.c b/fs/udf/inode.c index a445d59..6afac3d 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -33,12 +33,11 @@ #include <linux/mm.h> #include <linux/module.h> #include <linux/pagemap.h> -#include <linux/buffer_head.h> #include <linux/writeback.h> #include <linux/slab.h> #include <linux/crc-itu-t.h> #include <linux/mpage.h> -#include <linux/aio.h> +#include <linux/uio.h> #include "udf_i.h" #include "udf_sb.h" @@ -215,8 +214,7 @@ static int udf_write_begin(struct file *file, struct address_space *mapping, return ret; } -static ssize_t udf_direct_IO(int rw, struct kiocb *iocb, - struct iov_iter *iter, +static ssize_t udf_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset) { struct file *file = iocb->ki_filp; @@ -225,8 +223,8 @@ static ssize_t udf_direct_IO(int rw, struct kiocb *iocb, size_t count = iov_iter_count(iter); ssize_t ret; - ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, udf_get_block); - if (unlikely(ret < 0 && (rw & WRITE))) + ret = blockdev_direct_IO(iocb, inode, iter, offset, udf_get_block); + if (unlikely(ret < 0 && iov_iter_rw(iter) == WRITE)) udf_write_failed(mapping, offset + count); return ret; } @@ -1637,7 +1635,7 @@ static int udf_update_inode(struct inode *inode, int do_sync) udf_get_lb_pblock(inode->i_sb, &iinfo->i_location, 0)); if (!bh) { udf_debug("getblk failure\n"); - return -ENOMEM; + return -EIO; } lock_buffer(bh); diff --git a/fs/udf/misc.c b/fs/udf/misc.c index c175b4d..71d1c25 100644 --- a/fs/udf/misc.c +++ b/fs/udf/misc.c @@ -23,7 +23,6 @@ #include <linux/fs.h> #include <linux/string.h> -#include <linux/buffer_head.h> #include <linux/crc-itu-t.h> #include "udf_i.h" diff --git a/fs/udf/namei.c b/fs/udf/namei.c index 33b246b..5c03f0d 100644 --- a/fs/udf/namei.c +++ b/fs/udf/namei.c @@ -27,7 +27,6 @@ #include <linux/errno.h> #include <linux/mm.h> #include <linux/slab.h> -#include <linux/buffer_head.h> #include <linux/sched.h> #include <linux/crc-itu-t.h> #include <linux/exportfs.h> @@ -552,7 +551,7 @@ static int udf_delete_entry(struct inode *inode, struct fileIdentDesc *fi, static int udf_add_nondir(struct dentry *dentry, struct inode *inode) { struct udf_inode_info *iinfo = UDF_I(inode); - struct inode *dir = dentry->d_parent->d_inode; + struct inode *dir = d_inode(dentry->d_parent); struct udf_fileident_bh fibh; struct fileIdentDesc cfi, *fi; int err; @@ -569,8 +568,8 @@ static int udf_add_nondir(struct dentry *dentry, struct inode *inode) *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse = cpu_to_le32(iinfo->i_unique & 0x00000000FFFFFFFFUL); udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL); - if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) - mark_inode_dirty(dir); + dir->i_ctime = dir->i_mtime = current_fs_time(dir->i_sb); + mark_inode_dirty(dir); if (fibh.sbh != fibh.ebh) brelse(fibh.ebh); brelse(fibh.sbh); @@ -683,6 +682,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) cfi.fileCharacteristics |= FID_FILE_CHAR_DIRECTORY; udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL); inc_nlink(dir); + dir->i_ctime = dir->i_mtime = current_fs_time(dir->i_sb); mark_inode_dirty(dir); unlock_new_inode(inode); d_instantiate(dentry, inode); @@ -767,7 +767,7 @@ static int empty_dir(struct inode *dir) static int udf_rmdir(struct inode *dir, struct dentry *dentry) { int retval; - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct udf_fileident_bh fibh; struct fileIdentDesc *fi, cfi; struct kernel_lb_addr tloc; @@ -809,7 +809,7 @@ out: static int udf_unlink(struct inode *dir, struct dentry *dentry) { int retval; - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct udf_fileident_bh fibh; struct fileIdentDesc *fi; struct fileIdentDesc cfi; @@ -999,7 +999,7 @@ out_no_entry: static int udf_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { - struct inode *inode = old_dentry->d_inode; + struct inode *inode = d_inode(old_dentry); struct udf_fileident_bh fibh; struct fileIdentDesc cfi, *fi; int err; @@ -1024,6 +1024,8 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir, inc_nlink(inode); inode->i_ctime = current_fs_time(inode->i_sb); mark_inode_dirty(inode); + dir->i_ctime = dir->i_mtime = current_fs_time(dir->i_sb); + mark_inode_dirty(dir); ihold(inode); d_instantiate(dentry, inode); @@ -1036,8 +1038,8 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir, static int udf_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { - struct inode *old_inode = old_dentry->d_inode; - struct inode *new_inode = new_dentry->d_inode; + struct inode *old_inode = d_inode(old_dentry); + struct inode *new_inode = d_inode(new_dentry); struct udf_fileident_bh ofibh, nfibh; struct fileIdentDesc *ofi = NULL, *nfi = NULL, *dir_fi = NULL; struct fileIdentDesc ocfi, ncfi; @@ -1127,7 +1129,9 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry, inode_dec_link_count(new_inode); } old_dir->i_ctime = old_dir->i_mtime = current_fs_time(old_dir->i_sb); + new_dir->i_ctime = new_dir->i_mtime = current_fs_time(new_dir->i_sb); mark_inode_dirty(old_dir); + mark_inode_dirty(new_dir); if (dir_fi) { dir_fi->icb.extLocation = cpu_to_lelb(UDF_I(new_dir)->i_location); @@ -1175,7 +1179,7 @@ static struct dentry *udf_get_parent(struct dentry *child) struct fileIdentDesc cfi; struct udf_fileident_bh fibh; - if (!udf_find_entry(child->d_inode, &dotdot, &fibh, &cfi)) + if (!udf_find_entry(d_inode(child), &dotdot, &fibh, &cfi)) return ERR_PTR(-EACCES); if (fibh.sbh != fibh.ebh) @@ -1183,7 +1187,7 @@ static struct dentry *udf_get_parent(struct dentry *child) brelse(fibh.sbh); tloc = lelb_to_cpu(cfi.icb.extLocation); - inode = udf_iget(child->d_inode->i_sb, &tloc); + inode = udf_iget(d_inode(child)->i_sb, &tloc); if (IS_ERR(inode)) return ERR_CAST(inode); diff --git a/fs/udf/partition.c b/fs/udf/partition.c index d6caf01..5f861ed2 100644 --- a/fs/udf/partition.c +++ b/fs/udf/partition.c @@ -24,7 +24,6 @@ #include <linux/fs.h> #include <linux/string.h> -#include <linux/buffer_head.h> #include <linux/mutex.h> uint32_t udf_get_pblock(struct super_block *sb, uint32_t block, diff --git a/fs/udf/super.c b/fs/udf/super.c index f169411..6299f34 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -48,7 +48,6 @@ #include <linux/stat.h> #include <linux/cdrom.h> #include <linux/nls.h> -#include <linux/buffer_head.h> #include <linux/vfs.h> #include <linux/vmalloc.h> #include <linux/errno.h> diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c index ac10ca9..8dfbc40 100644 --- a/fs/udf/symlink.c +++ b/fs/udf/symlink.c @@ -27,7 +27,6 @@ #include <linux/mm.h> #include <linux/stat.h> #include <linux/pagemap.h> -#include <linux/buffer_head.h> #include "udf_i.h" static int udf_pc_to_char(struct super_block *sb, unsigned char *from, diff --git a/fs/udf/truncate.c b/fs/udf/truncate.c index 8a9657d..42b8c57 100644 --- a/fs/udf/truncate.c +++ b/fs/udf/truncate.c @@ -22,7 +22,6 @@ #include "udfdecl.h" #include <linux/fs.h> #include <linux/mm.h> -#include <linux/buffer_head.h> #include "udf_i.h" #include "udf_sb.h" diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c index 0ecc2ce..1bfe8ca 100644 --- a/fs/ufs/dir.c +++ b/fs/ufs/dir.c @@ -311,7 +311,7 @@ found: */ int ufs_add_link(struct dentry *dentry, struct inode *inode) { - struct inode *dir = dentry->d_parent->d_inode; + struct inode *dir = d_inode(dentry->d_parent); const unsigned char *name = dentry->d_name.name; int namelen = dentry->d_name.len; struct super_block *sb = dir->i_sb; diff --git a/fs/ufs/file.c b/fs/ufs/file.c index c84ec01..042ddbf 100644 --- a/fs/ufs/file.c +++ b/fs/ufs/file.c @@ -35,9 +35,7 @@ const struct file_operations ufs_file_operations = { .llseek = generic_file_llseek, - .read = new_sync_read, .read_iter = generic_file_read_iter, - .write = new_sync_write, .write_iter = generic_file_write_iter, .mmap = generic_file_mmap, .open = generic_file_open, diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c index fd65deb..e491a93 100644 --- a/fs/ufs/namei.c +++ b/fs/ufs/namei.c @@ -165,7 +165,7 @@ out_fail: static int ufs_link (struct dentry * old_dentry, struct inode * dir, struct dentry *dentry) { - struct inode *inode = old_dentry->d_inode; + struct inode *inode = d_inode(old_dentry); int error; lock_ufs(dir->i_sb); @@ -222,7 +222,7 @@ out_fail: static int ufs_unlink(struct inode *dir, struct dentry *dentry) { - struct inode * inode = dentry->d_inode; + struct inode * inode = d_inode(dentry); struct ufs_dir_entry *de; struct page *page; int err = -ENOENT; @@ -244,7 +244,7 @@ out: static int ufs_rmdir (struct inode * dir, struct dentry *dentry) { - struct inode * inode = dentry->d_inode; + struct inode * inode = d_inode(dentry); int err= -ENOTEMPTY; lock_ufs(dir->i_sb); @@ -263,8 +263,8 @@ static int ufs_rmdir (struct inode * dir, struct dentry *dentry) static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { - struct inode *old_inode = old_dentry->d_inode; - struct inode *new_inode = new_dentry->d_inode; + struct inode *old_inode = d_inode(old_dentry); + struct inode *new_inode = d_inode(new_dentry); struct page *dir_page = NULL; struct ufs_dir_entry * dir_de = NULL; struct page *old_page; diff --git a/fs/ufs/super.c b/fs/ufs/super.c index 8092d37..b3bc3e7 100644 --- a/fs/ufs/super.c +++ b/fs/ufs/super.c @@ -144,10 +144,10 @@ static struct dentry *ufs_get_parent(struct dentry *child) struct qstr dot_dot = QSTR_INIT("..", 2); ino_t ino; - ino = ufs_inode_by_name(child->d_inode, &dot_dot); + ino = ufs_inode_by_name(d_inode(child), &dot_dot); if (!ino) return ERR_PTR(-ENOENT); - return d_obtain_alias(ufs_iget(child->d_inode->i_sb, ino)); + return d_obtain_alias(ufs_iget(d_inode(child)->i_sb, ino)); } static const struct export_operations ufs_export_ops = { diff --git a/fs/ufs/symlink.c b/fs/ufs/symlink.c index d283628..5b537e2 100644 --- a/fs/ufs/symlink.c +++ b/fs/ufs/symlink.c @@ -34,7 +34,7 @@ static void *ufs_follow_link(struct dentry *dentry, struct nameidata *nd) { - struct ufs_inode_info *p = UFS_I(dentry->d_inode); + struct ufs_inode_info *p = UFS_I(d_inode(dentry)); nd_set_link(nd, (char*)p->i_u1.i_symlink); return NULL; } diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c index f04f89f..2115470 100644 --- a/fs/ufs/truncate.c +++ b/fs/ufs/truncate.c @@ -492,7 +492,7 @@ out: int ufs_setattr(struct dentry *dentry, struct iattr *attr) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); unsigned int ia_valid = attr->ia_valid; int error; diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c index a6fbf44..516162b 100644 --- a/fs/xfs/libxfs/xfs_alloc.c +++ b/fs/xfs/libxfs/xfs_alloc.c @@ -260,6 +260,7 @@ xfs_alloc_fix_len( rlen = rlen - (k - args->mod); else rlen = rlen - args->prod + (args->mod - k); + /* casts to (int) catch length underflows */ if ((int)rlen < (int)args->minlen) return; ASSERT(rlen >= args->minlen && rlen <= args->maxlen); @@ -286,7 +287,8 @@ xfs_alloc_fix_minleft( if (diff >= 0) return 1; args->len += diff; /* shrink the allocated space */ - if (args->len >= args->minlen) + /* casts to (int) catch length underflows */ + if ((int)args->len >= (int)args->minlen) return 1; args->agbno = NULLAGBLOCK; return 0; @@ -315,6 +317,9 @@ xfs_alloc_fixup_trees( xfs_agblock_t nfbno2; /* second new free startblock */ xfs_extlen_t nflen1=0; /* first new free length */ xfs_extlen_t nflen2=0; /* second new free length */ + struct xfs_mount *mp; + + mp = cnt_cur->bc_mp; /* * Look up the record in the by-size tree if necessary. @@ -323,13 +328,13 @@ xfs_alloc_fixup_trees( #ifdef DEBUG if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i))) return error; - XFS_WANT_CORRUPTED_RETURN( + XFS_WANT_CORRUPTED_RETURN(mp, i == 1 && nfbno1 == fbno && nflen1 == flen); #endif } else { if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i))) return error; - XFS_WANT_CORRUPTED_RETURN(i == 1); + XFS_WANT_CORRUPTED_RETURN(mp, i == 1); } /* * Look up the record in the by-block tree if necessary. @@ -338,13 +343,13 @@ xfs_alloc_fixup_trees( #ifdef DEBUG if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i))) return error; - XFS_WANT_CORRUPTED_RETURN( + XFS_WANT_CORRUPTED_RETURN(mp, i == 1 && nfbno1 == fbno && nflen1 == flen); #endif } else { if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i))) return error; - XFS_WANT_CORRUPTED_RETURN(i == 1); + XFS_WANT_CORRUPTED_RETURN(mp, i == 1); } #ifdef DEBUG @@ -355,7 +360,7 @@ xfs_alloc_fixup_trees( bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_bufs[0]); cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_bufs[0]); - XFS_WANT_CORRUPTED_RETURN( + XFS_WANT_CORRUPTED_RETURN(mp, bnoblock->bb_numrecs == cntblock->bb_numrecs); } #endif @@ -386,25 +391,25 @@ xfs_alloc_fixup_trees( */ if ((error = xfs_btree_delete(cnt_cur, &i))) return error; - XFS_WANT_CORRUPTED_RETURN(i == 1); + XFS_WANT_CORRUPTED_RETURN(mp, i == 1); /* * Add new by-size btree entry(s). */ if (nfbno1 != NULLAGBLOCK) { if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i))) return error; - XFS_WANT_CORRUPTED_RETURN(i == 0); + XFS_WANT_CORRUPTED_RETURN(mp, i == 0); if ((error = xfs_btree_insert(cnt_cur, &i))) return error; - XFS_WANT_CORRUPTED_RETURN(i == 1); + XFS_WANT_CORRUPTED_RETURN(mp, i == 1); } if (nfbno2 != NULLAGBLOCK) { if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i))) return error; - XFS_WANT_CORRUPTED_RETURN(i == 0); + XFS_WANT_CORRUPTED_RETURN(mp, i == 0); if ((error = xfs_btree_insert(cnt_cur, &i))) return error; - XFS_WANT_CORRUPTED_RETURN(i == 1); + XFS_WANT_CORRUPTED_RETURN(mp, i == 1); } /* * Fix up the by-block btree entry(s). @@ -415,7 +420,7 @@ xfs_alloc_fixup_trees( */ if ((error = xfs_btree_delete(bno_cur, &i))) return error; - XFS_WANT_CORRUPTED_RETURN(i == 1); + XFS_WANT_CORRUPTED_RETURN(mp, i == 1); } else { /* * Update the by-block entry to start later|be shorter. @@ -429,10 +434,10 @@ xfs_alloc_fixup_trees( */ if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i))) return error; - XFS_WANT_CORRUPTED_RETURN(i == 0); + XFS_WANT_CORRUPTED_RETURN(mp, i == 0); if ((error = xfs_btree_insert(bno_cur, &i))) return error; - XFS_WANT_CORRUPTED_RETURN(i == 1); + XFS_WANT_CORRUPTED_RETURN(mp, i == 1); } return 0; } @@ -682,7 +687,7 @@ xfs_alloc_ag_vextent_exact( error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i); if (error) goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0); ASSERT(fbno <= args->agbno); /* @@ -783,7 +788,7 @@ xfs_alloc_find_best_extent( error = xfs_alloc_get_rec(*scur, sbno, slen, &i); if (error) goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0); xfs_alloc_compute_aligned(args, *sbno, *slen, sbnoa, slena); /* @@ -946,7 +951,7 @@ restart: if ((error = xfs_alloc_get_rec(cnt_cur, <bno, <len, &i))) goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0); if (ltlen >= args->minlen) break; if ((error = xfs_btree_increment(cnt_cur, 0, &i))) @@ -966,7 +971,7 @@ restart: */ if ((error = xfs_alloc_get_rec(cnt_cur, <bno, <len, &i))) goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0); xfs_alloc_compute_aligned(args, ltbno, ltlen, <bnoa, <lena); if (ltlena < args->minlen) @@ -999,7 +1004,7 @@ restart: cnt_cur->bc_ptrs[0] = besti; if ((error = xfs_alloc_get_rec(cnt_cur, <bno, <len, &i))) goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0); ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length)); args->len = blen; if (!xfs_alloc_fix_minleft(args)) { @@ -1088,7 +1093,7 @@ restart: if (bno_cur_lt) { if ((error = xfs_alloc_get_rec(bno_cur_lt, <bno, <len, &i))) goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0); xfs_alloc_compute_aligned(args, ltbno, ltlen, <bnoa, <lena); if (ltlena >= args->minlen) @@ -1104,7 +1109,7 @@ restart: if (bno_cur_gt) { if ((error = xfs_alloc_get_rec(bno_cur_gt, >bno, >len, &i))) goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0); xfs_alloc_compute_aligned(args, gtbno, gtlen, >bnoa, >lena); if (gtlena >= args->minlen) @@ -1303,7 +1308,7 @@ restart: error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i); if (error) goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0); xfs_alloc_compute_aligned(args, fbno, flen, &rbno, &rlen); @@ -1342,7 +1347,7 @@ restart: * This can't happen in the second case above. */ rlen = XFS_EXTLEN_MIN(args->maxlen, rlen); - XFS_WANT_CORRUPTED_GOTO(rlen == 0 || + XFS_WANT_CORRUPTED_GOTO(args->mp, rlen == 0 || (rlen <= flen && rbno + rlen <= fbno + flen), error0); if (rlen < args->maxlen) { xfs_agblock_t bestfbno; @@ -1362,13 +1367,13 @@ restart: if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i))) goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0); if (flen < bestrlen) break; xfs_alloc_compute_aligned(args, fbno, flen, &rbno, &rlen); rlen = XFS_EXTLEN_MIN(args->maxlen, rlen); - XFS_WANT_CORRUPTED_GOTO(rlen == 0 || + XFS_WANT_CORRUPTED_GOTO(args->mp, rlen == 0 || (rlen <= flen && rbno + rlen <= fbno + flen), error0); if (rlen > bestrlen) { @@ -1383,7 +1388,7 @@ restart: if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen, &i))) goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0); rlen = bestrlen; rbno = bestrbno; flen = bestflen; @@ -1408,7 +1413,7 @@ restart: if (!xfs_alloc_fix_minleft(args)) goto out_nominleft; rlen = args->len; - XFS_WANT_CORRUPTED_GOTO(rlen <= flen, error0); + XFS_WANT_CORRUPTED_GOTO(args->mp, rlen <= flen, error0); /* * Allocate and initialize a cursor for the by-block tree. */ @@ -1422,7 +1427,7 @@ restart: cnt_cur = bno_cur = NULL; args->len = rlen; args->agbno = rbno; - XFS_WANT_CORRUPTED_GOTO( + XFS_WANT_CORRUPTED_GOTO(args->mp, args->agbno + args->len <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length), error0); @@ -1467,7 +1472,7 @@ xfs_alloc_ag_vextent_small( if (i) { if ((error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i))) goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0); } /* * Nothing in the btree, try the freelist. Make sure @@ -1493,7 +1498,7 @@ xfs_alloc_ag_vextent_small( } args->len = 1; args->agbno = fbno; - XFS_WANT_CORRUPTED_GOTO( + XFS_WANT_CORRUPTED_GOTO(args->mp, args->agbno + args->len <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length), error0); @@ -1579,7 +1584,7 @@ xfs_free_ag_extent( */ if ((error = xfs_alloc_get_rec(bno_cur, <bno, <len, &i))) goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0); /* * It's not contiguous, though. */ @@ -1591,7 +1596,8 @@ xfs_free_ag_extent( * space was invalid, it's (partly) already free. * Very bad. */ - XFS_WANT_CORRUPTED_GOTO(ltbno + ltlen <= bno, error0); + XFS_WANT_CORRUPTED_GOTO(mp, + ltbno + ltlen <= bno, error0); } } /* @@ -1606,7 +1612,7 @@ xfs_free_ag_extent( */ if ((error = xfs_alloc_get_rec(bno_cur, >bno, >len, &i))) goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0); /* * It's not contiguous, though. */ @@ -1618,7 +1624,7 @@ xfs_free_ag_extent( * space was invalid, it's (partly) already free. * Very bad. */ - XFS_WANT_CORRUPTED_GOTO(gtbno >= bno + len, error0); + XFS_WANT_CORRUPTED_GOTO(mp, gtbno >= bno + len, error0); } } /* @@ -1635,31 +1641,31 @@ xfs_free_ag_extent( */ if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i))) goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0); if ((error = xfs_btree_delete(cnt_cur, &i))) goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0); /* * Delete the old by-size entry on the right. */ if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i))) goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0); if ((error = xfs_btree_delete(cnt_cur, &i))) goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0); /* * Delete the old by-block entry for the right block. */ if ((error = xfs_btree_delete(bno_cur, &i))) goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0); /* * Move the by-block cursor back to the left neighbor. */ if ((error = xfs_btree_decrement(bno_cur, 0, &i))) goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0); #ifdef DEBUG /* * Check that this is the right record: delete didn't @@ -1672,7 +1678,7 @@ xfs_free_ag_extent( if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen, &i))) goto error0; - XFS_WANT_CORRUPTED_GOTO( + XFS_WANT_CORRUPTED_GOTO(mp, i == 1 && xxbno == ltbno && xxlen == ltlen, error0); } @@ -1695,17 +1701,17 @@ xfs_free_ag_extent( */ if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i))) goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0); if ((error = xfs_btree_delete(cnt_cur, &i))) goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0); /* * Back up the by-block cursor to the left neighbor, and * update its length. */ if ((error = xfs_btree_decrement(bno_cur, 0, &i))) goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0); nbno = ltbno; nlen = len + ltlen; if ((error = xfs_alloc_update(bno_cur, nbno, nlen))) @@ -1721,10 +1727,10 @@ xfs_free_ag_extent( */ if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i))) goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0); if ((error = xfs_btree_delete(cnt_cur, &i))) goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0); /* * Update the starting block and length of the right * neighbor in the by-block tree. @@ -1743,7 +1749,7 @@ xfs_free_ag_extent( nlen = len; if ((error = xfs_btree_insert(bno_cur, &i))) goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0); } xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR); bno_cur = NULL; @@ -1752,10 +1758,10 @@ xfs_free_ag_extent( */ if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i))) goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 0, error0); + XFS_WANT_CORRUPTED_GOTO(mp, i == 0, error0); if ((error = xfs_btree_insert(cnt_cur, &i))) goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0); xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); cnt_cur = NULL; diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c index 15105db..04e79d5 100644 --- a/fs/xfs/libxfs/xfs_attr_leaf.c +++ b/fs/xfs/libxfs/xfs_attr_leaf.c @@ -86,8 +86,83 @@ STATIC void xfs_attr3_leaf_moveents(struct xfs_da_args *args, int move_count); STATIC int xfs_attr_leaf_entsize(xfs_attr_leafblock_t *leaf, int index); +/* + * attr3 block 'firstused' conversion helpers. + * + * firstused refers to the offset of the first used byte of the nameval region + * of an attr leaf block. The region starts at the tail of the block and expands + * backwards towards the middle. As such, firstused is initialized to the block + * size for an empty leaf block and is reduced from there. + * + * The attr3 block size is pegged to the fsb size and the maximum fsb is 64k. + * The in-core firstused field is 32-bit and thus supports the maximum fsb size. + * The on-disk field is only 16-bit, however, and overflows at 64k. Since this + * only occurs at exactly 64k, we use zero as a magic on-disk value to represent + * the attr block size. The following helpers manage the conversion between the + * in-core and on-disk formats. + */ + +static void +xfs_attr3_leaf_firstused_from_disk( + struct xfs_da_geometry *geo, + struct xfs_attr3_icleaf_hdr *to, + struct xfs_attr_leafblock *from) +{ + struct xfs_attr3_leaf_hdr *hdr3; + + if (from->hdr.info.magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC)) { + hdr3 = (struct xfs_attr3_leaf_hdr *) from; + to->firstused = be16_to_cpu(hdr3->firstused); + } else { + to->firstused = be16_to_cpu(from->hdr.firstused); + } + + /* + * Convert from the magic fsb size value to actual blocksize. This + * should only occur for empty blocks when the block size overflows + * 16-bits. + */ + if (to->firstused == XFS_ATTR3_LEAF_NULLOFF) { + ASSERT(!to->count && !to->usedbytes); + ASSERT(geo->blksize > USHRT_MAX); + to->firstused = geo->blksize; + } +} + +static void +xfs_attr3_leaf_firstused_to_disk( + struct xfs_da_geometry *geo, + struct xfs_attr_leafblock *to, + struct xfs_attr3_icleaf_hdr *from) +{ + struct xfs_attr3_leaf_hdr *hdr3; + uint32_t firstused; + + /* magic value should only be seen on disk */ + ASSERT(from->firstused != XFS_ATTR3_LEAF_NULLOFF); + + /* + * Scale down the 32-bit in-core firstused value to the 16-bit on-disk + * value. This only overflows at the max supported value of 64k. Use the + * magic on-disk value to represent block size in this case. + */ + firstused = from->firstused; + if (firstused > USHRT_MAX) { + ASSERT(from->firstused == geo->blksize); + firstused = XFS_ATTR3_LEAF_NULLOFF; + } + + if (from->magic == XFS_ATTR3_LEAF_MAGIC) { + hdr3 = (struct xfs_attr3_leaf_hdr *) to; + hdr3->firstused = cpu_to_be16(firstused); + } else { + to->hdr.firstused = cpu_to_be16(firstused); + } +} + void xfs_attr3_leaf_hdr_from_disk( + struct xfs_da_geometry *geo, struct xfs_attr3_icleaf_hdr *to, struct xfs_attr_leafblock *from) { @@ -104,7 +179,7 @@ xfs_attr3_leaf_hdr_from_disk( to->magic = be16_to_cpu(hdr3->info.hdr.magic); to->count = be16_to_cpu(hdr3->count); to->usedbytes = be16_to_cpu(hdr3->usedbytes); - to->firstused = be16_to_cpu(hdr3->firstused); + xfs_attr3_leaf_firstused_from_disk(geo, to, from); to->holes = hdr3->holes; for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) { @@ -118,7 +193,7 @@ xfs_attr3_leaf_hdr_from_disk( to->magic = be16_to_cpu(from->hdr.info.magic); to->count = be16_to_cpu(from->hdr.count); to->usedbytes = be16_to_cpu(from->hdr.usedbytes); - to->firstused = be16_to_cpu(from->hdr.firstused); + xfs_attr3_leaf_firstused_from_disk(geo, to, from); to->holes = from->hdr.holes; for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) { @@ -129,10 +204,11 @@ xfs_attr3_leaf_hdr_from_disk( void xfs_attr3_leaf_hdr_to_disk( + struct xfs_da_geometry *geo, struct xfs_attr_leafblock *to, struct xfs_attr3_icleaf_hdr *from) { - int i; + int i; ASSERT(from->magic == XFS_ATTR_LEAF_MAGIC || from->magic == XFS_ATTR3_LEAF_MAGIC); @@ -145,7 +221,7 @@ xfs_attr3_leaf_hdr_to_disk( hdr3->info.hdr.magic = cpu_to_be16(from->magic); hdr3->count = cpu_to_be16(from->count); hdr3->usedbytes = cpu_to_be16(from->usedbytes); - hdr3->firstused = cpu_to_be16(from->firstused); + xfs_attr3_leaf_firstused_to_disk(geo, to, from); hdr3->holes = from->holes; hdr3->pad1 = 0; @@ -160,7 +236,7 @@ xfs_attr3_leaf_hdr_to_disk( to->hdr.info.magic = cpu_to_be16(from->magic); to->hdr.count = cpu_to_be16(from->count); to->hdr.usedbytes = cpu_to_be16(from->usedbytes); - to->hdr.firstused = cpu_to_be16(from->firstused); + xfs_attr3_leaf_firstused_to_disk(geo, to, from); to->hdr.holes = from->holes; to->hdr.pad1 = 0; @@ -178,7 +254,7 @@ xfs_attr3_leaf_verify( struct xfs_attr_leafblock *leaf = bp->b_addr; struct xfs_attr3_icleaf_hdr ichdr; - xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf); + xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf); if (xfs_sb_version_hascrc(&mp->m_sb)) { struct xfs_da3_node_hdr *hdr3 = bp->b_addr; @@ -757,9 +833,10 @@ xfs_attr_shortform_allfit( struct xfs_attr3_icleaf_hdr leafhdr; int bytes; int i; + struct xfs_mount *mp = bp->b_target->bt_mount; leaf = bp->b_addr; - xfs_attr3_leaf_hdr_from_disk(&leafhdr, leaf); + xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &leafhdr, leaf); entry = xfs_attr3_leaf_entryp(leaf); bytes = sizeof(struct xfs_attr_sf_hdr); @@ -812,7 +889,7 @@ xfs_attr3_leaf_to_shortform( memcpy(tmpbuffer, bp->b_addr, args->geo->blksize); leaf = (xfs_attr_leafblock_t *)tmpbuffer; - xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf); + xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf); entry = xfs_attr3_leaf_entryp(leaf); /* XXX (dgc): buffer is about to be marked stale - why zero it? */ @@ -923,7 +1000,7 @@ xfs_attr3_leaf_to_node( btree = dp->d_ops->node_tree_p(node); leaf = bp2->b_addr; - xfs_attr3_leaf_hdr_from_disk(&icleafhdr, leaf); + xfs_attr3_leaf_hdr_from_disk(args->geo, &icleafhdr, leaf); entries = xfs_attr3_leaf_entryp(leaf); /* both on-disk, don't endian-flip twice */ @@ -988,7 +1065,7 @@ xfs_attr3_leaf_create( } ichdr.freemap[0].size = ichdr.firstused - ichdr.freemap[0].base; - xfs_attr3_leaf_hdr_to_disk(leaf, &ichdr); + xfs_attr3_leaf_hdr_to_disk(args->geo, leaf, &ichdr); xfs_trans_log_buf(args->trans, bp, 0, args->geo->blksize - 1); *bpp = bp; @@ -1073,7 +1150,7 @@ xfs_attr3_leaf_add( trace_xfs_attr_leaf_add(args); leaf = bp->b_addr; - xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf); + xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf); ASSERT(args->index >= 0 && args->index <= ichdr.count); entsize = xfs_attr_leaf_newentsize(args, NULL); @@ -1126,7 +1203,7 @@ xfs_attr3_leaf_add( tmp = xfs_attr3_leaf_add_work(bp, &ichdr, args, 0); out_log_hdr: - xfs_attr3_leaf_hdr_to_disk(leaf, &ichdr); + xfs_attr3_leaf_hdr_to_disk(args->geo, leaf, &ichdr); xfs_trans_log_buf(args->trans, bp, XFS_DA_LOGRANGE(leaf, &leaf->hdr, xfs_attr3_leaf_hdr_size(leaf))); @@ -1294,7 +1371,7 @@ xfs_attr3_leaf_compact( ichdr_dst->freemap[0].base; /* write the header back to initialise the underlying buffer */ - xfs_attr3_leaf_hdr_to_disk(leaf_dst, ichdr_dst); + xfs_attr3_leaf_hdr_to_disk(args->geo, leaf_dst, ichdr_dst); /* * Copy all entry's in the same (sorted) order, @@ -1344,9 +1421,10 @@ xfs_attr_leaf_order( { struct xfs_attr3_icleaf_hdr ichdr1; struct xfs_attr3_icleaf_hdr ichdr2; + struct xfs_mount *mp = leaf1_bp->b_target->bt_mount; - xfs_attr3_leaf_hdr_from_disk(&ichdr1, leaf1_bp->b_addr); - xfs_attr3_leaf_hdr_from_disk(&ichdr2, leaf2_bp->b_addr); + xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr1, leaf1_bp->b_addr); + xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr2, leaf2_bp->b_addr); return xfs_attr3_leaf_order(leaf1_bp, &ichdr1, leaf2_bp, &ichdr2); } @@ -1388,8 +1466,8 @@ xfs_attr3_leaf_rebalance( ASSERT(blk2->magic == XFS_ATTR_LEAF_MAGIC); leaf1 = blk1->bp->b_addr; leaf2 = blk2->bp->b_addr; - xfs_attr3_leaf_hdr_from_disk(&ichdr1, leaf1); - xfs_attr3_leaf_hdr_from_disk(&ichdr2, leaf2); + xfs_attr3_leaf_hdr_from_disk(state->args->geo, &ichdr1, leaf1); + xfs_attr3_leaf_hdr_from_disk(state->args->geo, &ichdr2, leaf2); ASSERT(ichdr2.count == 0); args = state->args; @@ -1490,8 +1568,8 @@ xfs_attr3_leaf_rebalance( ichdr1.count, count); } - xfs_attr3_leaf_hdr_to_disk(leaf1, &ichdr1); - xfs_attr3_leaf_hdr_to_disk(leaf2, &ichdr2); + xfs_attr3_leaf_hdr_to_disk(state->args->geo, leaf1, &ichdr1); + xfs_attr3_leaf_hdr_to_disk(state->args->geo, leaf2, &ichdr2); xfs_trans_log_buf(args->trans, blk1->bp, 0, args->geo->blksize - 1); xfs_trans_log_buf(args->trans, blk2->bp, 0, args->geo->blksize - 1); @@ -1684,7 +1762,7 @@ xfs_attr3_leaf_toosmall( */ blk = &state->path.blk[ state->path.active-1 ]; leaf = blk->bp->b_addr; - xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf); + xfs_attr3_leaf_hdr_from_disk(state->args->geo, &ichdr, leaf); bytes = xfs_attr3_leaf_hdr_size(leaf) + ichdr.count * sizeof(xfs_attr_leaf_entry_t) + ichdr.usedbytes; @@ -1740,7 +1818,7 @@ xfs_attr3_leaf_toosmall( if (error) return error; - xfs_attr3_leaf_hdr_from_disk(&ichdr2, bp->b_addr); + xfs_attr3_leaf_hdr_from_disk(state->args->geo, &ichdr2, bp->b_addr); bytes = state->args->geo->blksize - (state->args->geo->blksize >> 2) - @@ -1805,7 +1883,7 @@ xfs_attr3_leaf_remove( trace_xfs_attr_leaf_remove(args); leaf = bp->b_addr; - xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf); + xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf); ASSERT(ichdr.count > 0 && ichdr.count < args->geo->blksize / 8); ASSERT(args->index >= 0 && args->index < ichdr.count); @@ -1918,12 +1996,11 @@ xfs_attr3_leaf_remove( tmp = be16_to_cpu(entry->nameidx); } ichdr.firstused = tmp; - if (!ichdr.firstused) - ichdr.firstused = tmp - XFS_ATTR_LEAF_NAME_ALIGN; + ASSERT(ichdr.firstused != 0); } else { ichdr.holes = 1; /* mark as needing compaction */ } - xfs_attr3_leaf_hdr_to_disk(leaf, &ichdr); + xfs_attr3_leaf_hdr_to_disk(args->geo, leaf, &ichdr); xfs_trans_log_buf(args->trans, bp, XFS_DA_LOGRANGE(leaf, &leaf->hdr, xfs_attr3_leaf_hdr_size(leaf))); @@ -1957,8 +2034,8 @@ xfs_attr3_leaf_unbalance( drop_leaf = drop_blk->bp->b_addr; save_leaf = save_blk->bp->b_addr; - xfs_attr3_leaf_hdr_from_disk(&drophdr, drop_leaf); - xfs_attr3_leaf_hdr_from_disk(&savehdr, save_leaf); + xfs_attr3_leaf_hdr_from_disk(state->args->geo, &drophdr, drop_leaf); + xfs_attr3_leaf_hdr_from_disk(state->args->geo, &savehdr, save_leaf); entry = xfs_attr3_leaf_entryp(drop_leaf); /* @@ -2012,7 +2089,7 @@ xfs_attr3_leaf_unbalance( tmphdr.firstused = state->args->geo->blksize; /* write the header to the temp buffer to initialise it */ - xfs_attr3_leaf_hdr_to_disk(tmp_leaf, &tmphdr); + xfs_attr3_leaf_hdr_to_disk(state->args->geo, tmp_leaf, &tmphdr); if (xfs_attr3_leaf_order(save_blk->bp, &savehdr, drop_blk->bp, &drophdr)) { @@ -2039,7 +2116,7 @@ xfs_attr3_leaf_unbalance( kmem_free(tmp_leaf); } - xfs_attr3_leaf_hdr_to_disk(save_leaf, &savehdr); + xfs_attr3_leaf_hdr_to_disk(state->args->geo, save_leaf, &savehdr); xfs_trans_log_buf(state->args->trans, save_blk->bp, 0, state->args->geo->blksize - 1); @@ -2085,7 +2162,7 @@ xfs_attr3_leaf_lookup_int( trace_xfs_attr_leaf_lookup(args); leaf = bp->b_addr; - xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf); + xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf); entries = xfs_attr3_leaf_entryp(leaf); ASSERT(ichdr.count < args->geo->blksize / 8); @@ -2190,7 +2267,7 @@ xfs_attr3_leaf_getvalue( int valuelen; leaf = bp->b_addr; - xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf); + xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf); ASSERT(ichdr.count < args->geo->blksize / 8); ASSERT(args->index < ichdr.count); @@ -2391,8 +2468,9 @@ xfs_attr_leaf_lasthash( { struct xfs_attr3_icleaf_hdr ichdr; struct xfs_attr_leaf_entry *entries; + struct xfs_mount *mp = bp->b_target->bt_mount; - xfs_attr3_leaf_hdr_from_disk(&ichdr, bp->b_addr); + xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, bp->b_addr); entries = xfs_attr3_leaf_entryp(bp->b_addr); if (count) *count = ichdr.count; @@ -2486,7 +2564,7 @@ xfs_attr3_leaf_clearflag( ASSERT(entry->flags & XFS_ATTR_INCOMPLETE); #ifdef DEBUG - xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf); + xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf); ASSERT(args->index < ichdr.count); ASSERT(args->index >= 0); @@ -2550,7 +2628,7 @@ xfs_attr3_leaf_setflag( leaf = bp->b_addr; #ifdef DEBUG - xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf); + xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf); ASSERT(args->index < ichdr.count); ASSERT(args->index >= 0); #endif @@ -2629,11 +2707,11 @@ xfs_attr3_leaf_flipflags( entry2 = &xfs_attr3_leaf_entryp(leaf2)[args->index2]; #ifdef DEBUG - xfs_attr3_leaf_hdr_from_disk(&ichdr1, leaf1); + xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr1, leaf1); ASSERT(args->index < ichdr1.count); ASSERT(args->index >= 0); - xfs_attr3_leaf_hdr_from_disk(&ichdr2, leaf2); + xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr2, leaf2); ASSERT(args->index2 < ichdr2.count); ASSERT(args->index2 >= 0); diff --git a/fs/xfs/libxfs/xfs_attr_leaf.h b/fs/xfs/libxfs/xfs_attr_leaf.h index e2929da..025c4b8 100644 --- a/fs/xfs/libxfs/xfs_attr_leaf.h +++ b/fs/xfs/libxfs/xfs_attr_leaf.h @@ -100,9 +100,11 @@ int xfs_attr_leaf_newentsize(struct xfs_da_args *args, int *local); int xfs_attr3_leaf_read(struct xfs_trans *tp, struct xfs_inode *dp, xfs_dablk_t bno, xfs_daddr_t mappedbno, struct xfs_buf **bpp); -void xfs_attr3_leaf_hdr_from_disk(struct xfs_attr3_icleaf_hdr *to, +void xfs_attr3_leaf_hdr_from_disk(struct xfs_da_geometry *geo, + struct xfs_attr3_icleaf_hdr *to, struct xfs_attr_leafblock *from); -void xfs_attr3_leaf_hdr_to_disk(struct xfs_attr_leafblock *to, +void xfs_attr3_leaf_hdr_to_disk(struct xfs_da_geometry *geo, + struct xfs_attr_leafblock *to, struct xfs_attr3_icleaf_hdr *from); #endif /* __XFS_ATTR_LEAF_H__ */ diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index 61ec015..aeffeaa 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c @@ -244,30 +244,6 @@ xfs_bmap_forkoff_reset( } } -/* - * Debug/sanity checking code - */ - -STATIC int -xfs_bmap_sanity_check( - struct xfs_mount *mp, - struct xfs_buf *bp, - int level) -{ - struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); - - if (block->bb_magic != cpu_to_be32(XFS_BMAP_CRC_MAGIC) && - block->bb_magic != cpu_to_be32(XFS_BMAP_MAGIC)) - return 0; - - if (be16_to_cpu(block->bb_level) != level || - be16_to_cpu(block->bb_numrecs) == 0 || - be16_to_cpu(block->bb_numrecs) > mp->m_bmap_dmxr[level != 0]) - return 0; - - return 1; -} - #ifdef DEBUG STATIC struct xfs_buf * xfs_bmap_get_bp( @@ -410,9 +386,6 @@ xfs_bmap_check_leaf_extents( goto error_norelse; } block = XFS_BUF_TO_BLOCK(bp); - XFS_WANT_CORRUPTED_GOTO( - xfs_bmap_sanity_check(mp, bp, level), - error0); if (level == 0) break; @@ -424,7 +397,8 @@ xfs_bmap_check_leaf_extents( xfs_check_block(block, mp, 0, 0); pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); bno = be64_to_cpu(*pp); - XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, bno), error0); + XFS_WANT_CORRUPTED_GOTO(mp, + XFS_FSB_SANITY_CHECK(mp, bno), error0); if (bp_release) { bp_release = 0; xfs_trans_brelse(NULL, bp); @@ -1029,7 +1003,7 @@ xfs_bmap_add_attrfork_btree( if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat))) goto error0; /* must be at least one entry */ - XFS_WANT_CORRUPTED_GOTO(stat == 1, error0); + XFS_WANT_CORRUPTED_GOTO(mp, stat == 1, error0); if ((error = xfs_btree_new_iroot(cur, flags, &stat))) goto error0; if (stat == 0) { @@ -1311,14 +1285,12 @@ xfs_bmap_read_extents( if (error) return error; block = XFS_BUF_TO_BLOCK(bp); - XFS_WANT_CORRUPTED_GOTO( - xfs_bmap_sanity_check(mp, bp, level), - error0); if (level == 0) break; pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); bno = be64_to_cpu(*pp); - XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, bno), error0); + XFS_WANT_CORRUPTED_GOTO(mp, + XFS_FSB_SANITY_CHECK(mp, bno), error0); xfs_trans_brelse(tp, bp); } /* @@ -1345,9 +1317,6 @@ xfs_bmap_read_extents( XFS_ERRLEVEL_LOW, ip->i_mount, block); goto error0; } - XFS_WANT_CORRUPTED_GOTO( - xfs_bmap_sanity_check(mp, bp, 0), - error0); /* * Read-ahead the next leaf block, if any. */ @@ -1755,7 +1724,9 @@ xfs_bmap_add_extent_delay_real( xfs_filblks_t temp=0; /* value for da_new calculations */ xfs_filblks_t temp2=0;/* value for da_new calculations */ int tmp_rval; /* partial logging flags */ + struct xfs_mount *mp; + mp = bma->tp ? bma->tp->t_mountp : NULL; ifp = XFS_IFORK_PTR(bma->ip, XFS_DATA_FORK); ASSERT(bma->idx >= 0); @@ -1866,15 +1837,15 @@ xfs_bmap_add_extent_delay_real( RIGHT.br_blockcount, &i); if (error) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 1, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); error = xfs_btree_delete(bma->cur, &i); if (error) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 1, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); error = xfs_btree_decrement(bma->cur, 0, &i); if (error) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 1, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); error = xfs_bmbt_update(bma->cur, LEFT.br_startoff, LEFT.br_startblock, LEFT.br_blockcount + @@ -1907,7 +1878,7 @@ xfs_bmap_add_extent_delay_real( &i); if (error) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 1, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); error = xfs_bmbt_update(bma->cur, LEFT.br_startoff, LEFT.br_startblock, LEFT.br_blockcount + @@ -1938,7 +1909,7 @@ xfs_bmap_add_extent_delay_real( RIGHT.br_blockcount, &i); if (error) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 1, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); error = xfs_bmbt_update(bma->cur, PREV.br_startoff, new->br_startblock, PREV.br_blockcount + @@ -1968,12 +1939,12 @@ xfs_bmap_add_extent_delay_real( &i); if (error) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 0, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); bma->cur->bc_rec.b.br_state = XFS_EXT_NORM; error = xfs_btree_insert(bma->cur, &i); if (error) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 1, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); } break; @@ -2001,7 +1972,7 @@ xfs_bmap_add_extent_delay_real( &i); if (error) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 1, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); error = xfs_bmbt_update(bma->cur, LEFT.br_startoff, LEFT.br_startblock, LEFT.br_blockcount + @@ -2038,12 +2009,12 @@ xfs_bmap_add_extent_delay_real( &i); if (error) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 0, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); bma->cur->bc_rec.b.br_state = XFS_EXT_NORM; error = xfs_btree_insert(bma->cur, &i); if (error) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 1, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); } if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) { @@ -2084,7 +2055,7 @@ xfs_bmap_add_extent_delay_real( RIGHT.br_blockcount, &i); if (error) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 1, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); error = xfs_bmbt_update(bma->cur, new->br_startoff, new->br_startblock, new->br_blockcount + @@ -2122,12 +2093,12 @@ xfs_bmap_add_extent_delay_real( &i); if (error) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 0, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); bma->cur->bc_rec.b.br_state = XFS_EXT_NORM; error = xfs_btree_insert(bma->cur, &i); if (error) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 1, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); } if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) { @@ -2191,12 +2162,12 @@ xfs_bmap_add_extent_delay_real( &i); if (error) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 0, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); bma->cur->bc_rec.b.br_state = XFS_EXT_NORM; error = xfs_btree_insert(bma->cur, &i); if (error) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 1, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); } if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) { @@ -2212,9 +2183,8 @@ xfs_bmap_add_extent_delay_real( diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) - (bma->cur ? bma->cur->bc_private.b.allocated : 0)); if (diff > 0) { - error = xfs_icsb_modify_counters(bma->ip->i_mount, - XFS_SBS_FDBLOCKS, - -((int64_t)diff), 0); + error = xfs_mod_fdblocks(bma->ip->i_mount, + -((int64_t)diff), false); ASSERT(!error); if (error) goto done; @@ -2265,9 +2235,8 @@ xfs_bmap_add_extent_delay_real( temp += bma->cur->bc_private.b.allocated; ASSERT(temp <= da_old); if (temp < da_old) - xfs_icsb_modify_counters(bma->ip->i_mount, - XFS_SBS_FDBLOCKS, - (int64_t)(da_old - temp), 0); + xfs_mod_fdblocks(bma->ip->i_mount, + (int64_t)(da_old - temp), false); } /* clear out the allocated field, done with it now in any case. */ @@ -2309,6 +2278,7 @@ xfs_bmap_add_extent_unwritten_real( /* left is 0, right is 1, prev is 2 */ int rval=0; /* return value (logging flags) */ int state = 0;/* state bits, accessed thru macros */ + struct xfs_mount *mp = tp->t_mountp; *logflagsp = 0; @@ -2421,19 +2391,19 @@ xfs_bmap_add_extent_unwritten_real( RIGHT.br_startblock, RIGHT.br_blockcount, &i))) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 1, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); if ((error = xfs_btree_delete(cur, &i))) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 1, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); if ((error = xfs_btree_decrement(cur, 0, &i))) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 1, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); if ((error = xfs_btree_delete(cur, &i))) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 1, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); if ((error = xfs_btree_decrement(cur, 0, &i))) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 1, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); if ((error = xfs_bmbt_update(cur, LEFT.br_startoff, LEFT.br_startblock, LEFT.br_blockcount + PREV.br_blockcount + @@ -2464,13 +2434,13 @@ xfs_bmap_add_extent_unwritten_real( PREV.br_startblock, PREV.br_blockcount, &i))) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 1, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); if ((error = xfs_btree_delete(cur, &i))) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 1, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); if ((error = xfs_btree_decrement(cur, 0, &i))) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 1, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); if ((error = xfs_bmbt_update(cur, LEFT.br_startoff, LEFT.br_startblock, LEFT.br_blockcount + PREV.br_blockcount, @@ -2499,13 +2469,13 @@ xfs_bmap_add_extent_unwritten_real( RIGHT.br_startblock, RIGHT.br_blockcount, &i))) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 1, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); if ((error = xfs_btree_delete(cur, &i))) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 1, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); if ((error = xfs_btree_decrement(cur, 0, &i))) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 1, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); if ((error = xfs_bmbt_update(cur, new->br_startoff, new->br_startblock, new->br_blockcount + RIGHT.br_blockcount, @@ -2532,7 +2502,7 @@ xfs_bmap_add_extent_unwritten_real( new->br_startblock, new->br_blockcount, &i))) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 1, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); if ((error = xfs_bmbt_update(cur, new->br_startoff, new->br_startblock, new->br_blockcount, newext))) @@ -2569,7 +2539,7 @@ xfs_bmap_add_extent_unwritten_real( PREV.br_startblock, PREV.br_blockcount, &i))) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 1, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); if ((error = xfs_bmbt_update(cur, PREV.br_startoff + new->br_blockcount, PREV.br_startblock + new->br_blockcount, @@ -2611,7 +2581,7 @@ xfs_bmap_add_extent_unwritten_real( PREV.br_startblock, PREV.br_blockcount, &i))) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 1, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); if ((error = xfs_bmbt_update(cur, PREV.br_startoff + new->br_blockcount, PREV.br_startblock + new->br_blockcount, @@ -2621,7 +2591,7 @@ xfs_bmap_add_extent_unwritten_real( cur->bc_rec.b = *new; if ((error = xfs_btree_insert(cur, &i))) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 1, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); } break; @@ -2651,7 +2621,7 @@ xfs_bmap_add_extent_unwritten_real( PREV.br_startblock, PREV.br_blockcount, &i))) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 1, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); if ((error = xfs_bmbt_update(cur, PREV.br_startoff, PREV.br_startblock, PREV.br_blockcount - new->br_blockcount, @@ -2689,7 +2659,7 @@ xfs_bmap_add_extent_unwritten_real( PREV.br_startblock, PREV.br_blockcount, &i))) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 1, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); if ((error = xfs_bmbt_update(cur, PREV.br_startoff, PREV.br_startblock, PREV.br_blockcount - new->br_blockcount, @@ -2699,11 +2669,11 @@ xfs_bmap_add_extent_unwritten_real( new->br_startblock, new->br_blockcount, &i))) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 0, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); cur->bc_rec.b.br_state = XFS_EXT_NORM; if ((error = xfs_btree_insert(cur, &i))) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 1, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); } break; @@ -2737,7 +2707,7 @@ xfs_bmap_add_extent_unwritten_real( PREV.br_startblock, PREV.br_blockcount, &i))) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 1, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); /* new right extent - oldext */ if ((error = xfs_bmbt_update(cur, r[1].br_startoff, r[1].br_startblock, r[1].br_blockcount, @@ -2749,7 +2719,7 @@ xfs_bmap_add_extent_unwritten_real( new->br_startoff - PREV.br_startoff; if ((error = xfs_btree_insert(cur, &i))) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 1, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); /* * Reset the cursor to the position of the new extent * we are about to insert as we can't trust it after @@ -2759,12 +2729,12 @@ xfs_bmap_add_extent_unwritten_real( new->br_startblock, new->br_blockcount, &i))) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 0, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); /* new middle extent - newext */ cur->bc_rec.b.br_state = new->br_state; if ((error = xfs_btree_insert(cur, &i))) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 1, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); } break; @@ -2944,8 +2914,8 @@ xfs_bmap_add_extent_hole_delay( } if (oldlen != newlen) { ASSERT(oldlen > newlen); - xfs_icsb_modify_counters(ip->i_mount, XFS_SBS_FDBLOCKS, - (int64_t)(oldlen - newlen), 0); + xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen), + false); /* * Nothing to do for disk quota accounting here. */ @@ -2968,7 +2938,9 @@ xfs_bmap_add_extent_hole_real( xfs_bmbt_irec_t right; /* right neighbor extent entry */ int rval=0; /* return value (logging flags) */ int state; /* state bits, accessed thru macros */ + struct xfs_mount *mp; + mp = bma->tp ? bma->tp->t_mountp : NULL; ifp = XFS_IFORK_PTR(bma->ip, whichfork); ASSERT(bma->idx >= 0); @@ -3056,15 +3028,15 @@ xfs_bmap_add_extent_hole_real( &i); if (error) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 1, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); error = xfs_btree_delete(bma->cur, &i); if (error) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 1, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); error = xfs_btree_decrement(bma->cur, 0, &i); if (error) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 1, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); error = xfs_bmbt_update(bma->cur, left.br_startoff, left.br_startblock, left.br_blockcount + @@ -3097,7 +3069,7 @@ xfs_bmap_add_extent_hole_real( &i); if (error) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 1, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); error = xfs_bmbt_update(bma->cur, left.br_startoff, left.br_startblock, left.br_blockcount + @@ -3131,7 +3103,7 @@ xfs_bmap_add_extent_hole_real( right.br_blockcount, &i); if (error) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 1, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); error = xfs_bmbt_update(bma->cur, new->br_startoff, new->br_startblock, new->br_blockcount + @@ -3161,12 +3133,12 @@ xfs_bmap_add_extent_hole_real( new->br_blockcount, &i); if (error) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 0, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); bma->cur->bc_rec.b.br_state = new->br_state; error = xfs_btree_insert(bma->cur, &i); if (error) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 1, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); } break; } @@ -4160,18 +4132,15 @@ xfs_bmapi_reserve_delalloc( ASSERT(indlen > 0); if (rt) { - error = xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS, - -((int64_t)extsz), 0); + error = xfs_mod_frextents(mp, -((int64_t)extsz)); } else { - error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, - -((int64_t)alen), 0); + error = xfs_mod_fdblocks(mp, -((int64_t)alen), false); } if (error) goto out_unreserve_quota; - error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, - -((int64_t)indlen), 0); + error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false); if (error) goto out_unreserve_blocks; @@ -4198,9 +4167,9 @@ xfs_bmapi_reserve_delalloc( out_unreserve_blocks: if (rt) - xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS, extsz, 0); + xfs_mod_frextents(mp, extsz); else - xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, alen, 0); + xfs_mod_fdblocks(mp, alen, false); out_unreserve_quota: if (XFS_IS_QUOTA_ON(mp)) xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0, rt ? @@ -4801,7 +4770,7 @@ xfs_bmap_del_extent( got.br_startblock, got.br_blockcount, &i))) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 1, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); } da_old = da_new = 0; } else { @@ -4835,7 +4804,7 @@ xfs_bmap_del_extent( } if ((error = xfs_btree_delete(cur, &i))) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 1, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); break; case 2: @@ -4935,7 +4904,8 @@ xfs_bmap_del_extent( got.br_startblock, temp, &i))) goto done; - XFS_WANT_CORRUPTED_GOTO(i == 1, done); + XFS_WANT_CORRUPTED_GOTO(mp, + i == 1, done); /* * Update the btree record back * to the original value. @@ -4956,7 +4926,7 @@ xfs_bmap_del_extent( error = -ENOSPC; goto done; } - XFS_WANT_CORRUPTED_GOTO(i == 1, done); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); } else flags |= xfs_ilog_fext(whichfork); XFS_IFORK_NEXT_SET(ip, whichfork, @@ -5012,10 +4982,8 @@ xfs_bmap_del_extent( * Nothing to do for disk quota accounting here. */ ASSERT(da_old >= da_new); - if (da_old > da_new) { - xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, - (int64_t)(da_old - da_new), 0); - } + if (da_old > da_new) + xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), false); done: *logflagsp = flags; return error; @@ -5284,14 +5252,13 @@ xfs_bunmapi( rtexts = XFS_FSB_TO_B(mp, del.br_blockcount); do_div(rtexts, mp->m_sb.sb_rextsize); - xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS, - (int64_t)rtexts, 0); + xfs_mod_frextents(mp, (int64_t)rtexts); (void)xfs_trans_reserve_quota_nblks(NULL, ip, -((long)del.br_blockcount), 0, XFS_QMOPT_RES_RTBLKS); } else { - xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, - (int64_t)del.br_blockcount, 0); + xfs_mod_fdblocks(mp, (int64_t)del.br_blockcount, + false); (void)xfs_trans_reserve_quota_nblks(NULL, ip, -((long)del.br_blockcount), 0, XFS_QMOPT_RES_REGBLKS); @@ -5453,6 +5420,7 @@ xfs_bmse_merge( struct xfs_bmbt_irec left; xfs_filblks_t blockcount; int error, i; + struct xfs_mount *mp = ip->i_mount; xfs_bmbt_get_all(gotp, &got); xfs_bmbt_get_all(leftp, &left); @@ -5487,19 +5455,19 @@ xfs_bmse_merge( got.br_blockcount, &i); if (error) return error; - XFS_WANT_CORRUPTED_RETURN(i == 1); + XFS_WANT_CORRUPTED_RETURN(mp, i == 1); error = xfs_btree_delete(cur, &i); if (error) return error; - XFS_WANT_CORRUPTED_RETURN(i == 1); + XFS_WANT_CORRUPTED_RETURN(mp, i == 1); /* lookup and update size of the previous extent */ error = xfs_bmbt_lookup_eq(cur, left.br_startoff, left.br_startblock, left.br_blockcount, &i); if (error) return error; - XFS_WANT_CORRUPTED_RETURN(i == 1); + XFS_WANT_CORRUPTED_RETURN(mp, i == 1); left.br_blockcount = blockcount; @@ -5518,50 +5486,92 @@ xfs_bmse_shift_one( int *current_ext, struct xfs_bmbt_rec_host *gotp, struct xfs_btree_cur *cur, - int *logflags) + int *logflags, + enum shift_direction direction) { struct xfs_ifork *ifp; + struct xfs_mount *mp; xfs_fileoff_t startoff; - struct xfs_bmbt_rec_host *leftp; + struct xfs_bmbt_rec_host *adj_irecp; struct xfs_bmbt_irec got; - struct xfs_bmbt_irec left; + struct xfs_bmbt_irec adj_irec; int error; int i; + int total_extents; + mp = ip->i_mount; ifp = XFS_IFORK_PTR(ip, whichfork); + total_extents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t); xfs_bmbt_get_all(gotp, &got); - startoff = got.br_startoff - offset_shift_fsb; /* delalloc extents should be prevented by caller */ - XFS_WANT_CORRUPTED_RETURN(!isnullstartblock(got.br_startblock)); + XFS_WANT_CORRUPTED_RETURN(mp, !isnullstartblock(got.br_startblock)); - /* - * Check for merge if we've got an extent to the left, otherwise make - * sure there's enough room at the start of the file for the shift. - */ - if (*current_ext) { - /* grab the left extent and check for a large enough hole */ - leftp = xfs_iext_get_ext(ifp, *current_ext - 1); - xfs_bmbt_get_all(leftp, &left); + if (direction == SHIFT_LEFT) { + startoff = got.br_startoff - offset_shift_fsb; + + /* + * Check for merge if we've got an extent to the left, + * otherwise make sure there's enough room at the start + * of the file for the shift. + */ + if (!*current_ext) { + if (got.br_startoff < offset_shift_fsb) + return -EINVAL; + goto update_current_ext; + } + /* + * grab the left extent and check for a large + * enough hole. + */ + adj_irecp = xfs_iext_get_ext(ifp, *current_ext - 1); + xfs_bmbt_get_all(adj_irecp, &adj_irec); - if (startoff < left.br_startoff + left.br_blockcount) + if (startoff < + adj_irec.br_startoff + adj_irec.br_blockcount) return -EINVAL; /* check whether to merge the extent or shift it down */ - if (xfs_bmse_can_merge(&left, &got, offset_shift_fsb)) { + if (xfs_bmse_can_merge(&adj_irec, &got, + offset_shift_fsb)) { return xfs_bmse_merge(ip, whichfork, offset_shift_fsb, - *current_ext, gotp, leftp, cur, - logflags); + *current_ext, gotp, adj_irecp, + cur, logflags); } - } else if (got.br_startoff < offset_shift_fsb) - return -EINVAL; - + } else { + startoff = got.br_startoff + offset_shift_fsb; + /* nothing to move if this is the last extent */ + if (*current_ext >= (total_extents - 1)) + goto update_current_ext; + /* + * If this is not the last extent in the file, make sure there + * is enough room between current extent and next extent for + * accommodating the shift. + */ + adj_irecp = xfs_iext_get_ext(ifp, *current_ext + 1); + xfs_bmbt_get_all(adj_irecp, &adj_irec); + if (startoff + got.br_blockcount > adj_irec.br_startoff) + return -EINVAL; + /* + * Unlike a left shift (which involves a hole punch), + * a right shift does not modify extent neighbors + * in any way. We should never find mergeable extents + * in this scenario. Check anyways and warn if we + * encounter two extents that could be one. + */ + if (xfs_bmse_can_merge(&got, &adj_irec, offset_shift_fsb)) + WARN_ON_ONCE(1); + } /* * Increment the extent index for the next iteration, update the start * offset of the in-core extent and update the btree if applicable. */ - (*current_ext)++; +update_current_ext: + if (direction == SHIFT_LEFT) + (*current_ext)++; + else + (*current_ext)--; xfs_bmbt_set_startoff(gotp, startoff); *logflags |= XFS_ILOG_CORE; if (!cur) { @@ -5573,18 +5583,18 @@ xfs_bmse_shift_one( got.br_blockcount, &i); if (error) return error; - XFS_WANT_CORRUPTED_RETURN(i == 1); + XFS_WANT_CORRUPTED_RETURN(mp, i == 1); got.br_startoff = startoff; return xfs_bmbt_update(cur, got.br_startoff, got.br_startblock, - got.br_blockcount, got.br_state); + got.br_blockcount, got.br_state); } /* - * Shift extent records to the left to cover a hole. + * Shift extent records to the left/right to cover/create a hole. * * The maximum number of extents to be shifted in a single operation is - * @num_exts. @start_fsb specifies the file offset to start the shift and the + * @num_exts. @stop_fsb specifies the file offset at which to stop shift and the * file offset where we've left off is returned in @next_fsb. @offset_shift_fsb * is the length by which each extent is shifted. If there is no hole to shift * the extents into, this will be considered invalid operation and we abort @@ -5594,12 +5604,13 @@ int xfs_bmap_shift_extents( struct xfs_trans *tp, struct xfs_inode *ip, - xfs_fileoff_t start_fsb, + xfs_fileoff_t *next_fsb, xfs_fileoff_t offset_shift_fsb, int *done, - xfs_fileoff_t *next_fsb, + xfs_fileoff_t stop_fsb, xfs_fsblock_t *firstblock, struct xfs_bmap_free *flist, + enum shift_direction direction, int num_exts) { struct xfs_btree_cur *cur = NULL; @@ -5609,10 +5620,11 @@ xfs_bmap_shift_extents( struct xfs_ifork *ifp; xfs_extnum_t nexts = 0; xfs_extnum_t current_ext; + xfs_extnum_t total_extents; + xfs_extnum_t stop_extent; int error = 0; int whichfork = XFS_DATA_FORK; int logflags = 0; - int total_extents; if (unlikely(XFS_TEST_ERROR( (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && @@ -5628,6 +5640,8 @@ xfs_bmap_shift_extents( ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); + ASSERT(direction == SHIFT_LEFT || direction == SHIFT_RIGHT); + ASSERT(*next_fsb != NULLFSBLOCK || direction == SHIFT_RIGHT); ifp = XFS_IFORK_PTR(ip, whichfork); if (!(ifp->if_flags & XFS_IFEXTENTS)) { @@ -5645,43 +5659,83 @@ xfs_bmap_shift_extents( } /* + * There may be delalloc extents in the data fork before the range we + * are collapsing out, so we cannot use the count of real extents here. + * Instead we have to calculate it from the incore fork. + */ + total_extents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t); + if (total_extents == 0) { + *done = 1; + goto del_cursor; + } + + /* + * In case of first right shift, we need to initialize next_fsb + */ + if (*next_fsb == NULLFSBLOCK) { + gotp = xfs_iext_get_ext(ifp, total_extents - 1); + xfs_bmbt_get_all(gotp, &got); + *next_fsb = got.br_startoff; + if (stop_fsb > *next_fsb) { + *done = 1; + goto del_cursor; + } + } + + /* Lookup the extent index at which we have to stop */ + if (direction == SHIFT_RIGHT) { + gotp = xfs_iext_bno_to_ext(ifp, stop_fsb, &stop_extent); + /* Make stop_extent exclusive of shift range */ + stop_extent--; + } else + stop_extent = total_extents; + + /* * Look up the extent index for the fsb where we start shifting. We can * henceforth iterate with current_ext as extent list changes are locked * out via ilock. * * gotp can be null in 2 cases: 1) if there are no extents or 2) - * start_fsb lies in a hole beyond which there are no extents. Either + * *next_fsb lies in a hole beyond which there are no extents. Either * way, we are done. */ - gotp = xfs_iext_bno_to_ext(ifp, start_fsb, ¤t_ext); + gotp = xfs_iext_bno_to_ext(ifp, *next_fsb, ¤t_ext); if (!gotp) { *done = 1; goto del_cursor; } - /* - * There may be delalloc extents in the data fork before the range we - * are collapsing out, so we cannot use the count of real extents here. - * Instead we have to calculate it from the incore fork. - */ - total_extents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t); - while (nexts++ < num_exts && current_ext < total_extents) { + /* some sanity checking before we finally start shifting extents */ + if ((direction == SHIFT_LEFT && current_ext >= stop_extent) || + (direction == SHIFT_RIGHT && current_ext <= stop_extent)) { + error = -EIO; + goto del_cursor; + } + + while (nexts++ < num_exts) { error = xfs_bmse_shift_one(ip, whichfork, offset_shift_fsb, - ¤t_ext, gotp, cur, &logflags); + ¤t_ext, gotp, cur, &logflags, + direction); if (error) goto del_cursor; + /* + * If there was an extent merge during the shift, the extent + * count can change. Update the total and grade the next record. + */ + if (direction == SHIFT_LEFT) { + total_extents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t); + stop_extent = total_extents; + } - /* update total extent count and grab the next record */ - total_extents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t); - if (current_ext >= total_extents) + if (current_ext == stop_extent) { + *done = 1; + *next_fsb = NULLFSBLOCK; break; + } gotp = xfs_iext_get_ext(ifp, current_ext); } - /* Check if we are done */ - if (current_ext == total_extents) { - *done = 1; - } else if (next_fsb) { + if (!*done) { xfs_bmbt_get_all(gotp, &got); *next_fsb = got.br_startoff; } @@ -5696,3 +5750,189 @@ del_cursor: return error; } + +/* + * Splits an extent into two extents at split_fsb block such that it is + * the first block of the current_ext. @current_ext is a target extent + * to be split. @split_fsb is a block where the extents is split. + * If split_fsb lies in a hole or the first block of extents, just return 0. + */ +STATIC int +xfs_bmap_split_extent_at( + struct xfs_trans *tp, + struct xfs_inode *ip, + xfs_fileoff_t split_fsb, + xfs_fsblock_t *firstfsb, + struct xfs_bmap_free *free_list) +{ + int whichfork = XFS_DATA_FORK; + struct xfs_btree_cur *cur = NULL; + struct xfs_bmbt_rec_host *gotp; + struct xfs_bmbt_irec got; + struct xfs_bmbt_irec new; /* split extent */ + struct xfs_mount *mp = ip->i_mount; + struct xfs_ifork *ifp; + xfs_fsblock_t gotblkcnt; /* new block count for got */ + xfs_extnum_t current_ext; + int error = 0; + int logflags = 0; + int i = 0; + + if (unlikely(XFS_TEST_ERROR( + (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && + XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), + mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) { + XFS_ERROR_REPORT("xfs_bmap_split_extent_at", + XFS_ERRLEVEL_LOW, mp); + return -EFSCORRUPTED; + } + + if (XFS_FORCED_SHUTDOWN(mp)) + return -EIO; + + ifp = XFS_IFORK_PTR(ip, whichfork); + if (!(ifp->if_flags & XFS_IFEXTENTS)) { + /* Read in all the extents */ + error = xfs_iread_extents(tp, ip, whichfork); + if (error) + return error; + } + + /* + * gotp can be null in 2 cases: 1) if there are no extents + * or 2) split_fsb lies in a hole beyond which there are + * no extents. Either way, we are done. + */ + gotp = xfs_iext_bno_to_ext(ifp, split_fsb, ¤t_ext); + if (!gotp) + return 0; + + xfs_bmbt_get_all(gotp, &got); + + /* + * Check split_fsb lies in a hole or the start boundary offset + * of the extent. + */ + if (got.br_startoff >= split_fsb) + return 0; + + gotblkcnt = split_fsb - got.br_startoff; + new.br_startoff = split_fsb; + new.br_startblock = got.br_startblock + gotblkcnt; + new.br_blockcount = got.br_blockcount - gotblkcnt; + new.br_state = got.br_state; + + if (ifp->if_flags & XFS_IFBROOT) { + cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); + cur->bc_private.b.firstblock = *firstfsb; + cur->bc_private.b.flist = free_list; + cur->bc_private.b.flags = 0; + error = xfs_bmbt_lookup_eq(cur, got.br_startoff, + got.br_startblock, + got.br_blockcount, + &i); + if (error) + goto del_cursor; + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor); + } + + xfs_bmbt_set_blockcount(gotp, gotblkcnt); + got.br_blockcount = gotblkcnt; + + logflags = XFS_ILOG_CORE; + if (cur) { + error = xfs_bmbt_update(cur, got.br_startoff, + got.br_startblock, + got.br_blockcount, + got.br_state); + if (error) + goto del_cursor; + } else + logflags |= XFS_ILOG_DEXT; + + /* Add new extent */ + current_ext++; + xfs_iext_insert(ip, current_ext, 1, &new, 0); + XFS_IFORK_NEXT_SET(ip, whichfork, + XFS_IFORK_NEXTENTS(ip, whichfork) + 1); + + if (cur) { + error = xfs_bmbt_lookup_eq(cur, new.br_startoff, + new.br_startblock, new.br_blockcount, + &i); + if (error) + goto del_cursor; + XFS_WANT_CORRUPTED_GOTO(mp, i == 0, del_cursor); + cur->bc_rec.b.br_state = new.br_state; + + error = xfs_btree_insert(cur, &i); + if (error) + goto del_cursor; + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor); + } + + /* + * Convert to a btree if necessary. + */ + if (xfs_bmap_needs_btree(ip, whichfork)) { + int tmp_logflags; /* partial log flag return val */ + + ASSERT(cur == NULL); + error = xfs_bmap_extents_to_btree(tp, ip, firstfsb, free_list, + &cur, 0, &tmp_logflags, whichfork); + logflags |= tmp_logflags; + } + +del_cursor: + if (cur) { + cur->bc_private.b.allocated = 0; + xfs_btree_del_cursor(cur, + error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); + } + + if (logflags) + xfs_trans_log_inode(tp, ip, logflags); + return error; +} + +int +xfs_bmap_split_extent( + struct xfs_inode *ip, + xfs_fileoff_t split_fsb) +{ + struct xfs_mount *mp = ip->i_mount; + struct xfs_trans *tp; + struct xfs_bmap_free free_list; + xfs_fsblock_t firstfsb; + int committed; + int error; + + tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT); + error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write, + XFS_DIOSTRAT_SPACE_RES(mp, 0), 0); + if (error) { + xfs_trans_cancel(tp, 0); + return error; + } + + xfs_ilock(ip, XFS_ILOCK_EXCL); + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); + + xfs_bmap_init(&free_list, &firstfsb); + + error = xfs_bmap_split_extent_at(tp, ip, split_fsb, + &firstfsb, &free_list); + if (error) + goto out; + + error = xfs_bmap_finish(&tp, &free_list, &committed); + if (error) + goto out; + + return xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); + + +out: + xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); + return error; +} diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h index b9d8a49..6aaa0c1 100644 --- a/fs/xfs/libxfs/xfs_bmap.h +++ b/fs/xfs/libxfs/xfs_bmap.h @@ -166,6 +166,11 @@ static inline void xfs_bmap_init(xfs_bmap_free_t *flp, xfs_fsblock_t *fbp) */ #define XFS_BMAP_MAX_SHIFT_EXTENTS 1 +enum shift_direction { + SHIFT_LEFT = 0, + SHIFT_RIGHT, +}; + #ifdef DEBUG void xfs_bmap_trace_exlist(struct xfs_inode *ip, xfs_extnum_t cnt, int whichfork, unsigned long caller_ip); @@ -211,8 +216,10 @@ int xfs_check_nostate_extents(struct xfs_ifork *ifp, xfs_extnum_t idx, xfs_extnum_t num); uint xfs_default_attroffset(struct xfs_inode *ip); int xfs_bmap_shift_extents(struct xfs_trans *tp, struct xfs_inode *ip, - xfs_fileoff_t start_fsb, xfs_fileoff_t offset_shift_fsb, - int *done, xfs_fileoff_t *next_fsb, xfs_fsblock_t *firstblock, - struct xfs_bmap_free *flist, int num_exts); + xfs_fileoff_t *next_fsb, xfs_fileoff_t offset_shift_fsb, + int *done, xfs_fileoff_t stop_fsb, xfs_fsblock_t *firstblock, + struct xfs_bmap_free *flist, enum shift_direction direction, + int num_exts); +int xfs_bmap_split_extent(struct xfs_inode *ip, xfs_fileoff_t split_offset); #endif /* __XFS_BMAP_H__ */ diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c index 81cad43..c72283d 100644 --- a/fs/xfs/libxfs/xfs_btree.c +++ b/fs/xfs/libxfs/xfs_btree.c @@ -168,7 +168,7 @@ xfs_btree_check_lptr( xfs_fsblock_t bno, /* btree block disk address */ int level) /* btree block level */ { - XFS_WANT_CORRUPTED_RETURN( + XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, level > 0 && bno != NULLFSBLOCK && XFS_FSB_SANITY_CHECK(cur->bc_mp, bno)); @@ -187,7 +187,7 @@ xfs_btree_check_sptr( { xfs_agblock_t agblocks = cur->bc_mp->m_sb.sb_agblocks; - XFS_WANT_CORRUPTED_RETURN( + XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, level > 0 && bno != NULLAGBLOCK && bno != 0 && @@ -1825,7 +1825,7 @@ xfs_btree_lookup( error = xfs_btree_increment(cur, 0, &i); if (error) goto error0; - XFS_WANT_CORRUPTED_RETURN(i == 1); + XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1); XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); *stat = 1; return 0; @@ -2285,7 +2285,7 @@ xfs_btree_rshift( if (error) goto error0; i = xfs_btree_lastrec(tcur, level); - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0); error = xfs_btree_increment(tcur, level, &i); if (error) @@ -3138,7 +3138,7 @@ xfs_btree_insert( goto error0; } - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0); level++; /* @@ -3582,15 +3582,15 @@ xfs_btree_delrec( * Actually any entry but the first would suffice. */ i = xfs_btree_lastrec(tcur, level); - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0); error = xfs_btree_increment(tcur, level, &i); if (error) goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0); i = xfs_btree_lastrec(tcur, level); - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0); /* Grab a pointer to the block. */ right = xfs_btree_get_block(tcur, level, &rbp); @@ -3634,12 +3634,12 @@ xfs_btree_delrec( rrecs = xfs_btree_get_numrecs(right); if (!xfs_btree_ptr_is_null(cur, &lptr)) { i = xfs_btree_firstrec(tcur, level); - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0); error = xfs_btree_decrement(tcur, level, &i); if (error) goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0); } } @@ -3653,13 +3653,13 @@ xfs_btree_delrec( * previous block. */ i = xfs_btree_firstrec(tcur, level); - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0); error = xfs_btree_decrement(tcur, level, &i); if (error) goto error0; i = xfs_btree_firstrec(tcur, level); - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0); /* Grab a pointer to the block. */ left = xfs_btree_get_block(tcur, level, &lbp); diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c index 9cb0115..2385f8c 100644 --- a/fs/xfs/libxfs/xfs_da_btree.c +++ b/fs/xfs/libxfs/xfs_da_btree.c @@ -538,12 +538,12 @@ xfs_da3_root_split( oldroot = blk1->bp->b_addr; if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) || oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)) { - struct xfs_da3_icnode_hdr nodehdr; + struct xfs_da3_icnode_hdr icnodehdr; - dp->d_ops->node_hdr_from_disk(&nodehdr, oldroot); + dp->d_ops->node_hdr_from_disk(&icnodehdr, oldroot); btree = dp->d_ops->node_tree_p(oldroot); - size = (int)((char *)&btree[nodehdr.count] - (char *)oldroot); - level = nodehdr.level; + size = (int)((char *)&btree[icnodehdr.count] - (char *)oldroot); + level = icnodehdr.level; /* * we are about to copy oldroot to bp, so set up the type diff --git a/fs/xfs/libxfs/xfs_da_format.h b/fs/xfs/libxfs/xfs_da_format.h index 0a49b02..74bcbab 100644 --- a/fs/xfs/libxfs/xfs_da_format.h +++ b/fs/xfs/libxfs/xfs_da_format.h @@ -725,7 +725,13 @@ struct xfs_attr3_icleaf_hdr { __uint16_t magic; __uint16_t count; __uint16_t usedbytes; - __uint16_t firstused; + /* + * firstused is 32-bit here instead of 16-bit like the on-disk variant + * to support maximum fsb size of 64k without overflow issues throughout + * the attr code. Instead, the overflow condition is handled on + * conversion to/from disk. + */ + __uint32_t firstused; __u8 holes; struct { __uint16_t base; @@ -734,6 +740,12 @@ struct xfs_attr3_icleaf_hdr { }; /* + * Special value to represent fs block size in the leaf header firstused field. + * Only used when block size overflows the 2-bytes available on disk. + */ +#define XFS_ATTR3_LEAF_NULLOFF 0 + +/* * Flags used in the leaf_entry[i].flags field. * NOTE: the INCOMPLETE bit must not collide with the flags bits specified * on the system call, they are "or"ed together for various operations. diff --git a/fs/xfs/libxfs/xfs_dir2_data.c b/fs/xfs/libxfs/xfs_dir2_data.c index 5ff31be..de1ea16 100644 --- a/fs/xfs/libxfs/xfs_dir2_data.c +++ b/fs/xfs/libxfs/xfs_dir2_data.c @@ -89,7 +89,7 @@ __xfs_dir3_data_check( * so just ensure that the count falls somewhere inside the * block right now. */ - XFS_WANT_CORRUPTED_RETURN(be32_to_cpu(btp->count) < + XFS_WANT_CORRUPTED_RETURN(mp, be32_to_cpu(btp->count) < ((char *)btp - p) / sizeof(struct xfs_dir2_leaf_entry)); break; case cpu_to_be32(XFS_DIR3_DATA_MAGIC): @@ -107,21 +107,21 @@ __xfs_dir3_data_check( bf = ops->data_bestfree_p(hdr); count = lastfree = freeseen = 0; if (!bf[0].length) { - XFS_WANT_CORRUPTED_RETURN(!bf[0].offset); + XFS_WANT_CORRUPTED_RETURN(mp, !bf[0].offset); freeseen |= 1 << 0; } if (!bf[1].length) { - XFS_WANT_CORRUPTED_RETURN(!bf[1].offset); + XFS_WANT_CORRUPTED_RETURN(mp, !bf[1].offset); freeseen |= 1 << 1; } if (!bf[2].length) { - XFS_WANT_CORRUPTED_RETURN(!bf[2].offset); + XFS_WANT_CORRUPTED_RETURN(mp, !bf[2].offset); freeseen |= 1 << 2; } - XFS_WANT_CORRUPTED_RETURN(be16_to_cpu(bf[0].length) >= + XFS_WANT_CORRUPTED_RETURN(mp, be16_to_cpu(bf[0].length) >= be16_to_cpu(bf[1].length)); - XFS_WANT_CORRUPTED_RETURN(be16_to_cpu(bf[1].length) >= + XFS_WANT_CORRUPTED_RETURN(mp, be16_to_cpu(bf[1].length) >= be16_to_cpu(bf[2].length)); /* * Loop over the data/unused entries. @@ -134,18 +134,18 @@ __xfs_dir3_data_check( * doesn't need to be there. */ if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) { - XFS_WANT_CORRUPTED_RETURN(lastfree == 0); - XFS_WANT_CORRUPTED_RETURN( + XFS_WANT_CORRUPTED_RETURN(mp, lastfree == 0); + XFS_WANT_CORRUPTED_RETURN(mp, be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup)) == (char *)dup - (char *)hdr); dfp = xfs_dir2_data_freefind(hdr, bf, dup); if (dfp) { i = (int)(dfp - bf); - XFS_WANT_CORRUPTED_RETURN( + XFS_WANT_CORRUPTED_RETURN(mp, (freeseen & (1 << i)) == 0); freeseen |= 1 << i; } else { - XFS_WANT_CORRUPTED_RETURN( + XFS_WANT_CORRUPTED_RETURN(mp, be16_to_cpu(dup->length) <= be16_to_cpu(bf[2].length)); } @@ -160,13 +160,13 @@ __xfs_dir3_data_check( * The linear search is crude but this is DEBUG code. */ dep = (xfs_dir2_data_entry_t *)p; - XFS_WANT_CORRUPTED_RETURN(dep->namelen != 0); - XFS_WANT_CORRUPTED_RETURN( + XFS_WANT_CORRUPTED_RETURN(mp, dep->namelen != 0); + XFS_WANT_CORRUPTED_RETURN(mp, !xfs_dir_ino_validate(mp, be64_to_cpu(dep->inumber))); - XFS_WANT_CORRUPTED_RETURN( + XFS_WANT_CORRUPTED_RETURN(mp, be16_to_cpu(*ops->data_entry_tag_p(dep)) == (char *)dep - (char *)hdr); - XFS_WANT_CORRUPTED_RETURN( + XFS_WANT_CORRUPTED_RETURN(mp, ops->data_get_ftype(dep) < XFS_DIR3_FT_MAX); count++; lastfree = 0; @@ -183,14 +183,15 @@ __xfs_dir3_data_check( be32_to_cpu(lep[i].hashval) == hash) break; } - XFS_WANT_CORRUPTED_RETURN(i < be32_to_cpu(btp->count)); + XFS_WANT_CORRUPTED_RETURN(mp, + i < be32_to_cpu(btp->count)); } p += ops->data_entsize(dep->namelen); } /* * Need to have seen all the entries and all the bestfree slots. */ - XFS_WANT_CORRUPTED_RETURN(freeseen == 7); + XFS_WANT_CORRUPTED_RETURN(mp, freeseen == 7); if (hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC) || hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC)) { for (i = stale = 0; i < be32_to_cpu(btp->count); i++) { @@ -198,13 +199,13 @@ __xfs_dir3_data_check( cpu_to_be32(XFS_DIR2_NULL_DATAPTR)) stale++; if (i > 0) - XFS_WANT_CORRUPTED_RETURN( + XFS_WANT_CORRUPTED_RETURN(mp, be32_to_cpu(lep[i].hashval) >= be32_to_cpu(lep[i - 1].hashval)); } - XFS_WANT_CORRUPTED_RETURN(count == + XFS_WANT_CORRUPTED_RETURN(mp, count == be32_to_cpu(btp->count) - be32_to_cpu(btp->stale)); - XFS_WANT_CORRUPTED_RETURN(stale == be32_to_cpu(btp->stale)); + XFS_WANT_CORRUPTED_RETURN(mp, stale == be32_to_cpu(btp->stale)); } return 0; } diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h index 8eb7189..4daaa66 100644 --- a/fs/xfs/libxfs/xfs_format.h +++ b/fs/xfs/libxfs/xfs_format.h @@ -264,68 +264,6 @@ typedef struct xfs_dsb { /* must be padded to 64 bit alignment */ } xfs_dsb_t; -/* - * Sequence number values for the fields. - */ -typedef enum { - XFS_SBS_MAGICNUM, XFS_SBS_BLOCKSIZE, XFS_SBS_DBLOCKS, XFS_SBS_RBLOCKS, - XFS_SBS_REXTENTS, XFS_SBS_UUID, XFS_SBS_LOGSTART, XFS_SBS_ROOTINO, - XFS_SBS_RBMINO, XFS_SBS_RSUMINO, XFS_SBS_REXTSIZE, XFS_SBS_AGBLOCKS, - XFS_SBS_AGCOUNT, XFS_SBS_RBMBLOCKS, XFS_SBS_LOGBLOCKS, - XFS_SBS_VERSIONNUM, XFS_SBS_SECTSIZE, XFS_SBS_INODESIZE, - XFS_SBS_INOPBLOCK, XFS_SBS_FNAME, XFS_SBS_BLOCKLOG, - XFS_SBS_SECTLOG, XFS_SBS_INODELOG, XFS_SBS_INOPBLOG, XFS_SBS_AGBLKLOG, - XFS_SBS_REXTSLOG, XFS_SBS_INPROGRESS, XFS_SBS_IMAX_PCT, XFS_SBS_ICOUNT, - XFS_SBS_IFREE, XFS_SBS_FDBLOCKS, XFS_SBS_FREXTENTS, XFS_SBS_UQUOTINO, - XFS_SBS_GQUOTINO, XFS_SBS_QFLAGS, XFS_SBS_FLAGS, XFS_SBS_SHARED_VN, - XFS_SBS_INOALIGNMT, XFS_SBS_UNIT, XFS_SBS_WIDTH, XFS_SBS_DIRBLKLOG, - XFS_SBS_LOGSECTLOG, XFS_SBS_LOGSECTSIZE, XFS_SBS_LOGSUNIT, - XFS_SBS_FEATURES2, XFS_SBS_BAD_FEATURES2, XFS_SBS_FEATURES_COMPAT, - XFS_SBS_FEATURES_RO_COMPAT, XFS_SBS_FEATURES_INCOMPAT, - XFS_SBS_FEATURES_LOG_INCOMPAT, XFS_SBS_CRC, XFS_SBS_PAD, - XFS_SBS_PQUOTINO, XFS_SBS_LSN, - XFS_SBS_FIELDCOUNT -} xfs_sb_field_t; - -/* - * Mask values, defined based on the xfs_sb_field_t values. - * Only define the ones we're using. - */ -#define XFS_SB_MVAL(x) (1LL << XFS_SBS_ ## x) -#define XFS_SB_UUID XFS_SB_MVAL(UUID) -#define XFS_SB_FNAME XFS_SB_MVAL(FNAME) -#define XFS_SB_ROOTINO XFS_SB_MVAL(ROOTINO) -#define XFS_SB_RBMINO XFS_SB_MVAL(RBMINO) -#define XFS_SB_RSUMINO XFS_SB_MVAL(RSUMINO) -#define XFS_SB_VERSIONNUM XFS_SB_MVAL(VERSIONNUM) -#define XFS_SB_UQUOTINO XFS_SB_MVAL(UQUOTINO) -#define XFS_SB_GQUOTINO XFS_SB_MVAL(GQUOTINO) -#define XFS_SB_QFLAGS XFS_SB_MVAL(QFLAGS) -#define XFS_SB_SHARED_VN XFS_SB_MVAL(SHARED_VN) -#define XFS_SB_UNIT XFS_SB_MVAL(UNIT) -#define XFS_SB_WIDTH XFS_SB_MVAL(WIDTH) -#define XFS_SB_ICOUNT XFS_SB_MVAL(ICOUNT) -#define XFS_SB_IFREE XFS_SB_MVAL(IFREE) -#define XFS_SB_FDBLOCKS XFS_SB_MVAL(FDBLOCKS) -#define XFS_SB_FEATURES2 (XFS_SB_MVAL(FEATURES2) | \ - XFS_SB_MVAL(BAD_FEATURES2)) -#define XFS_SB_FEATURES_COMPAT XFS_SB_MVAL(FEATURES_COMPAT) -#define XFS_SB_FEATURES_RO_COMPAT XFS_SB_MVAL(FEATURES_RO_COMPAT) -#define XFS_SB_FEATURES_INCOMPAT XFS_SB_MVAL(FEATURES_INCOMPAT) -#define XFS_SB_FEATURES_LOG_INCOMPAT XFS_SB_MVAL(FEATURES_LOG_INCOMPAT) -#define XFS_SB_CRC XFS_SB_MVAL(CRC) -#define XFS_SB_PQUOTINO XFS_SB_MVAL(PQUOTINO) -#define XFS_SB_NUM_BITS ((int)XFS_SBS_FIELDCOUNT) -#define XFS_SB_ALL_BITS ((1LL << XFS_SB_NUM_BITS) - 1) -#define XFS_SB_MOD_BITS \ - (XFS_SB_UUID | XFS_SB_ROOTINO | XFS_SB_RBMINO | XFS_SB_RSUMINO | \ - XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | XFS_SB_GQUOTINO | \ - XFS_SB_QFLAGS | XFS_SB_SHARED_VN | XFS_SB_UNIT | XFS_SB_WIDTH | \ - XFS_SB_ICOUNT | XFS_SB_IFREE | XFS_SB_FDBLOCKS | XFS_SB_FEATURES2 | \ - XFS_SB_FEATURES_COMPAT | XFS_SB_FEATURES_RO_COMPAT | \ - XFS_SB_FEATURES_INCOMPAT | XFS_SB_FEATURES_LOG_INCOMPAT | \ - XFS_SB_PQUOTINO) - /* * Misc. Flags - warning - these will be cleared by xfs_repair unless diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c index 116ef1d..07349a1 100644 --- a/fs/xfs/libxfs/xfs_ialloc.c +++ b/fs/xfs/libxfs/xfs_ialloc.c @@ -376,7 +376,8 @@ xfs_ialloc_ag_alloc( */ newlen = args.mp->m_ialloc_inos; if (args.mp->m_maxicount && - args.mp->m_sb.sb_icount + newlen > args.mp->m_maxicount) + percpu_counter_read(&args.mp->m_icount) + newlen > + args.mp->m_maxicount) return -ENOSPC; args.minlen = args.maxlen = args.mp->m_ialloc_blks; /* @@ -700,7 +701,7 @@ xfs_ialloc_next_rec( error = xfs_inobt_get_rec(cur, rec, &i); if (error) return error; - XFS_WANT_CORRUPTED_RETURN(i == 1); + XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1); } return 0; @@ -724,7 +725,7 @@ xfs_ialloc_get_rec( error = xfs_inobt_get_rec(cur, rec, &i); if (error) return error; - XFS_WANT_CORRUPTED_RETURN(i == 1); + XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1); } return 0; @@ -783,12 +784,12 @@ xfs_dialloc_ag_inobt( error = xfs_inobt_lookup(cur, pagino, XFS_LOOKUP_LE, &i); if (error) goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0); error = xfs_inobt_get_rec(cur, &rec, &j); if (error) goto error0; - XFS_WANT_CORRUPTED_GOTO(j == 1, error0); + XFS_WANT_CORRUPTED_GOTO(mp, j == 1, error0); if (rec.ir_freecount > 0) { /* @@ -944,19 +945,19 @@ newino: error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i); if (error) goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0); for (;;) { error = xfs_inobt_get_rec(cur, &rec, &i); if (error) goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0); if (rec.ir_freecount > 0) break; error = xfs_btree_increment(cur, 0, &i); if (error) goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0); } alloc_inode: @@ -1016,7 +1017,7 @@ xfs_dialloc_ag_finobt_near( error = xfs_inobt_get_rec(lcur, rec, &i); if (error) return error; - XFS_WANT_CORRUPTED_RETURN(i == 1); + XFS_WANT_CORRUPTED_RETURN(lcur->bc_mp, i == 1); /* * See if we've landed in the parent inode record. The finobt @@ -1039,10 +1040,10 @@ xfs_dialloc_ag_finobt_near( error = xfs_inobt_get_rec(rcur, &rrec, &j); if (error) goto error_rcur; - XFS_WANT_CORRUPTED_GOTO(j == 1, error_rcur); + XFS_WANT_CORRUPTED_GOTO(lcur->bc_mp, j == 1, error_rcur); } - XFS_WANT_CORRUPTED_GOTO(i == 1 || j == 1, error_rcur); + XFS_WANT_CORRUPTED_GOTO(lcur->bc_mp, i == 1 || j == 1, error_rcur); if (i == 1 && j == 1) { /* * Both the left and right records are valid. Choose the closer @@ -1095,7 +1096,7 @@ xfs_dialloc_ag_finobt_newino( error = xfs_inobt_get_rec(cur, rec, &i); if (error) return error; - XFS_WANT_CORRUPTED_RETURN(i == 1); + XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1); return 0; } } @@ -1106,12 +1107,12 @@ xfs_dialloc_ag_finobt_newino( error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i); if (error) return error; - XFS_WANT_CORRUPTED_RETURN(i == 1); + XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1); error = xfs_inobt_get_rec(cur, rec, &i); if (error) return error; - XFS_WANT_CORRUPTED_RETURN(i == 1); + XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1); return 0; } @@ -1133,19 +1134,19 @@ xfs_dialloc_ag_update_inobt( error = xfs_inobt_lookup(cur, frec->ir_startino, XFS_LOOKUP_EQ, &i); if (error) return error; - XFS_WANT_CORRUPTED_RETURN(i == 1); + XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1); error = xfs_inobt_get_rec(cur, &rec, &i); if (error) return error; - XFS_WANT_CORRUPTED_RETURN(i == 1); + XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1); ASSERT((XFS_AGINO_TO_OFFSET(cur->bc_mp, rec.ir_startino) % XFS_INODES_PER_CHUNK) == 0); rec.ir_free &= ~XFS_INOBT_MASK(offset); rec.ir_freecount--; - XFS_WANT_CORRUPTED_RETURN((rec.ir_free == frec->ir_free) && + XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, (rec.ir_free == frec->ir_free) && (rec.ir_freecount == frec->ir_freecount)); return xfs_inobt_update(cur, &rec); @@ -1340,7 +1341,8 @@ xfs_dialloc( * inode. */ if (mp->m_maxicount && - mp->m_sb.sb_icount + mp->m_ialloc_inos > mp->m_maxicount) { + percpu_counter_read(&mp->m_icount) + mp->m_ialloc_inos > + mp->m_maxicount) { noroom = 1; okalloc = 0; } @@ -1475,14 +1477,14 @@ xfs_difree_inobt( __func__, error); goto error0; } - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0); error = xfs_inobt_get_rec(cur, &rec, &i); if (error) { xfs_warn(mp, "%s: xfs_inobt_get_rec() returned error %d.", __func__, error); goto error0; } - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0); /* * Get the offset in the inode chunk. */ @@ -1592,7 +1594,7 @@ xfs_difree_finobt( * freed an inode in a previously fully allocated chunk. If not, * something is out of sync. */ - XFS_WANT_CORRUPTED_GOTO(ibtrec->ir_freecount == 1, error); + XFS_WANT_CORRUPTED_GOTO(mp, ibtrec->ir_freecount == 1, error); error = xfs_inobt_insert_rec(cur, ibtrec->ir_freecount, ibtrec->ir_free, &i); @@ -1613,12 +1615,12 @@ xfs_difree_finobt( error = xfs_inobt_get_rec(cur, &rec, &i); if (error) goto error; - XFS_WANT_CORRUPTED_GOTO(i == 1, error); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error); rec.ir_free |= XFS_INOBT_MASK(offset); rec.ir_freecount++; - XFS_WANT_CORRUPTED_GOTO((rec.ir_free == ibtrec->ir_free) && + XFS_WANT_CORRUPTED_GOTO(mp, (rec.ir_free == ibtrec->ir_free) && (rec.ir_freecount == ibtrec->ir_freecount), error); diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c index b0a5fe9..dc4bfc5 100644 --- a/fs/xfs/libxfs/xfs_sb.c +++ b/fs/xfs/libxfs/xfs_sb.c @@ -111,14 +111,6 @@ xfs_mount_validate_sb( bool check_inprogress, bool check_version) { - - /* - * If the log device and data device have the - * same device number, the log is internal. - * Consequently, the sb_logstart should be non-zero. If - * we have a zero sb_logstart in this case, we may be trying to mount - * a volume filesystem in a non-volume manner. - */ if (sbp->sb_magicnum != XFS_SB_MAGIC) { xfs_warn(mp, "bad magic number"); return -EWRONGFS; @@ -743,17 +735,15 @@ xfs_initialize_perag_data( btree += pag->pagf_btreeblks; xfs_perag_put(pag); } - /* - * Overwrite incore superblock counters with just-read data - */ + + /* Overwrite incore superblock counters with just-read data */ spin_lock(&mp->m_sb_lock); sbp->sb_ifree = ifree; sbp->sb_icount = ialloc; sbp->sb_fdblocks = bfree + bfreelst + btree; spin_unlock(&mp->m_sb_lock); - /* Fixup the per-cpu counters as well. */ - xfs_icsb_reinit_counters(mp); + xfs_reinit_percpu_counters(mp); return 0; } @@ -771,6 +761,10 @@ xfs_log_sb( struct xfs_mount *mp = tp->t_mountp; struct xfs_buf *bp = xfs_trans_getsb(tp, mp, 0); + mp->m_sb.sb_icount = percpu_counter_sum(&mp->m_icount); + mp->m_sb.sb_ifree = percpu_counter_sum(&mp->m_ifree); + mp->m_sb.sb_fdblocks = percpu_counter_sum(&mp->m_fdblocks); + xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb); xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF); xfs_trans_log_buf(tp, bp, 0, sizeof(struct xfs_dsb)); diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 3a9b7a1..a56960d 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -31,7 +31,6 @@ #include "xfs_bmap.h" #include "xfs_bmap_util.h" #include "xfs_bmap_btree.h" -#include <linux/aio.h> #include <linux/gfp.h> #include <linux/mpage.h> #include <linux/pagevec.h> @@ -1233,6 +1232,117 @@ xfs_vm_releasepage( return try_to_free_buffers(page); } +/* + * When we map a DIO buffer, we may need to attach an ioend that describes the + * type of write IO we are doing. This passes to the completion function the + * operations it needs to perform. If the mapping is for an overwrite wholly + * within the EOF then we don't need an ioend and so we don't allocate one. + * This avoids the unnecessary overhead of allocating and freeing ioends for + * workloads that don't require transactions on IO completion. + * + * If we get multiple mappings in a single IO, we might be mapping different + * types. But because the direct IO can only have a single private pointer, we + * need to ensure that: + * + * a) i) the ioend spans the entire region of unwritten mappings; or + * ii) the ioend spans all the mappings that cross or are beyond EOF; and + * b) if it contains unwritten extents, it is *permanently* marked as such + * + * We could do this by chaining ioends like buffered IO does, but we only + * actually get one IO completion callback from the direct IO, and that spans + * the entire IO regardless of how many mappings and IOs are needed to complete + * the DIO. There is only going to be one reference to the ioend and its life + * cycle is constrained by the DIO completion code. hence we don't need + * reference counting here. + */ +static void +xfs_map_direct( + struct inode *inode, + struct buffer_head *bh_result, + struct xfs_bmbt_irec *imap, + xfs_off_t offset) +{ + struct xfs_ioend *ioend; + xfs_off_t size = bh_result->b_size; + int type; + + if (ISUNWRITTEN(imap)) + type = XFS_IO_UNWRITTEN; + else + type = XFS_IO_OVERWRITE; + + trace_xfs_gbmap_direct(XFS_I(inode), offset, size, type, imap); + + if (bh_result->b_private) { + ioend = bh_result->b_private; + ASSERT(ioend->io_size > 0); + ASSERT(offset >= ioend->io_offset); + if (offset + size > ioend->io_offset + ioend->io_size) + ioend->io_size = offset - ioend->io_offset + size; + + if (type == XFS_IO_UNWRITTEN && type != ioend->io_type) + ioend->io_type = XFS_IO_UNWRITTEN; + + trace_xfs_gbmap_direct_update(XFS_I(inode), ioend->io_offset, + ioend->io_size, ioend->io_type, + imap); + } else if (type == XFS_IO_UNWRITTEN || + offset + size > i_size_read(inode)) { + ioend = xfs_alloc_ioend(inode, type); + ioend->io_offset = offset; + ioend->io_size = size; + + bh_result->b_private = ioend; + set_buffer_defer_completion(bh_result); + + trace_xfs_gbmap_direct_new(XFS_I(inode), offset, size, type, + imap); + } else { + trace_xfs_gbmap_direct_none(XFS_I(inode), offset, size, type, + imap); + } +} + +/* + * If this is O_DIRECT or the mpage code calling tell them how large the mapping + * is, so that we can avoid repeated get_blocks calls. + * + * If the mapping spans EOF, then we have to break the mapping up as the mapping + * for blocks beyond EOF must be marked new so that sub block regions can be + * correctly zeroed. We can't do this for mappings within EOF unless the mapping + * was just allocated or is unwritten, otherwise the callers would overwrite + * existing data with zeros. Hence we have to split the mapping into a range up + * to and including EOF, and a second mapping for beyond EOF. + */ +static void +xfs_map_trim_size( + struct inode *inode, + sector_t iblock, + struct buffer_head *bh_result, + struct xfs_bmbt_irec *imap, + xfs_off_t offset, + ssize_t size) +{ + xfs_off_t mapping_size; + + mapping_size = imap->br_startoff + imap->br_blockcount - iblock; + mapping_size <<= inode->i_blkbits; + + ASSERT(mapping_size > 0); + if (mapping_size > size) + mapping_size = size; + if (offset < i_size_read(inode) && + offset + mapping_size >= i_size_read(inode)) { + /* limit mapping to block that spans EOF */ + mapping_size = roundup_64(i_size_read(inode) - offset, + 1 << inode->i_blkbits); + } + if (mapping_size > LONG_MAX) + mapping_size = LONG_MAX; + + bh_result->b_size = mapping_size; +} + STATIC int __xfs_get_blocks( struct inode *inode, @@ -1321,31 +1431,37 @@ __xfs_get_blocks( xfs_iunlock(ip, lockmode); } - - trace_xfs_get_blocks_alloc(ip, offset, size, 0, &imap); + trace_xfs_get_blocks_alloc(ip, offset, size, + ISUNWRITTEN(&imap) ? XFS_IO_UNWRITTEN + : XFS_IO_DELALLOC, &imap); } else if (nimaps) { - trace_xfs_get_blocks_found(ip, offset, size, 0, &imap); + trace_xfs_get_blocks_found(ip, offset, size, + ISUNWRITTEN(&imap) ? XFS_IO_UNWRITTEN + : XFS_IO_OVERWRITE, &imap); xfs_iunlock(ip, lockmode); } else { trace_xfs_get_blocks_notfound(ip, offset, size); goto out_unlock; } + /* trim mapping down to size requested */ + if (direct || size > (1 << inode->i_blkbits)) + xfs_map_trim_size(inode, iblock, bh_result, + &imap, offset, size); + + /* + * For unwritten extents do not report a disk address in the buffered + * read case (treat as if we're reading into a hole). + */ if (imap.br_startblock != HOLESTARTBLOCK && - imap.br_startblock != DELAYSTARTBLOCK) { - /* - * For unwritten extents do not report a disk address on - * the read case (treat as if we're reading into a hole). - */ - if (create || !ISUNWRITTEN(&imap)) - xfs_map_buffer(inode, bh_result, &imap, offset); - if (create && ISUNWRITTEN(&imap)) { - if (direct) { - bh_result->b_private = inode; - set_buffer_defer_completion(bh_result); - } + imap.br_startblock != DELAYSTARTBLOCK && + (create || !ISUNWRITTEN(&imap))) { + xfs_map_buffer(inode, bh_result, &imap, offset); + if (ISUNWRITTEN(&imap)) set_buffer_unwritten(bh_result); - } + /* direct IO needs special help */ + if (create && direct) + xfs_map_direct(inode, bh_result, &imap, offset); } /* @@ -1378,39 +1494,6 @@ __xfs_get_blocks( } } - /* - * If this is O_DIRECT or the mpage code calling tell them how large - * the mapping is, so that we can avoid repeated get_blocks calls. - * - * If the mapping spans EOF, then we have to break the mapping up as the - * mapping for blocks beyond EOF must be marked new so that sub block - * regions can be correctly zeroed. We can't do this for mappings within - * EOF unless the mapping was just allocated or is unwritten, otherwise - * the callers would overwrite existing data with zeros. Hence we have - * to split the mapping into a range up to and including EOF, and a - * second mapping for beyond EOF. - */ - if (direct || size > (1 << inode->i_blkbits)) { - xfs_off_t mapping_size; - - mapping_size = imap.br_startoff + imap.br_blockcount - iblock; - mapping_size <<= inode->i_blkbits; - - ASSERT(mapping_size > 0); - if (mapping_size > size) - mapping_size = size; - if (offset < i_size_read(inode) && - offset + mapping_size >= i_size_read(inode)) { - /* limit mapping to block that spans EOF */ - mapping_size = roundup_64(i_size_read(inode) - offset, - 1 << inode->i_blkbits); - } - if (mapping_size > LONG_MAX) - mapping_size = LONG_MAX; - - bh_result->b_size = mapping_size; - } - return 0; out_unlock: @@ -1441,9 +1524,11 @@ xfs_get_blocks_direct( /* * Complete a direct I/O write request. * - * If the private argument is non-NULL __xfs_get_blocks signals us that we - * need to issue a transaction to convert the range from unwritten to written - * extents. + * The ioend structure is passed from __xfs_get_blocks() to tell us what to do. + * If no ioend exists (i.e. @private == NULL) then the write IO is an overwrite + * wholly within the EOF and so there is nothing for us to do. Note that in this + * case the completion can be called in interrupt context, whereas if we have an + * ioend we will always be called in task context (i.e. from a workqueue). */ STATIC void xfs_end_io_direct_write( @@ -1455,48 +1540,75 @@ xfs_end_io_direct_write( struct inode *inode = file_inode(iocb->ki_filp); struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; + struct xfs_ioend *ioend = private; - if (XFS_FORCED_SHUTDOWN(mp)) + trace_xfs_gbmap_direct_endio(ip, offset, size, + ioend ? ioend->io_type : 0, NULL); + + if (!ioend) { + ASSERT(offset + size <= i_size_read(inode)); return; + } + + if (XFS_FORCED_SHUTDOWN(mp)) + goto out_end_io; /* - * While the generic direct I/O code updates the inode size, it does - * so only after the end_io handler is called, which means our - * end_io handler thinks the on-disk size is outside the in-core - * size. To prevent this just update it a little bit earlier here. + * dio completion end_io functions are only called on writes if more + * than 0 bytes was written. */ + ASSERT(size > 0); + + /* + * The ioend only maps whole blocks, while the IO may be sector aligned. + * Hence the ioend offset/size may not match the IO offset/size exactly. + * Because we don't map overwrites within EOF into the ioend, the offset + * may not match, but only if the endio spans EOF. Either way, write + * the IO sizes into the ioend so that completion processing does the + * right thing. + */ + ASSERT(offset + size <= ioend->io_offset + ioend->io_size); + ioend->io_size = size; + ioend->io_offset = offset; + + /* + * The ioend tells us whether we are doing unwritten extent conversion + * or an append transaction that updates the on-disk file size. These + * cases are the only cases where we should *potentially* be needing + * to update the VFS inode size. + * + * We need to update the in-core inode size here so that we don't end up + * with the on-disk inode size being outside the in-core inode size. We + * have no other method of updating EOF for AIO, so always do it here + * if necessary. + * + * We need to lock the test/set EOF update as we can be racing with + * other IO completions here to update the EOF. Failing to serialise + * here can result in EOF moving backwards and Bad Things Happen when + * that occurs. + */ + spin_lock(&ip->i_flags_lock); if (offset + size > i_size_read(inode)) i_size_write(inode, offset + size); + spin_unlock(&ip->i_flags_lock); /* - * For direct I/O we do not know if we need to allocate blocks or not, - * so we can't preallocate an append transaction, as that results in - * nested reservations and log space deadlocks. Hence allocate the - * transaction here. While this is sub-optimal and can block IO - * completion for some time, we're stuck with doing it this way until - * we can pass the ioend to the direct IO allocation callbacks and - * avoid nesting that way. + * If we are doing an append IO that needs to update the EOF on disk, + * do the transaction reserve now so we can use common end io + * processing. Stashing the error (if there is one) in the ioend will + * result in the ioend processing passing on the error if it is + * possible as we can't return it from here. */ - if (private && size > 0) { - xfs_iomap_write_unwritten(ip, offset, size); - } else if (offset + size > ip->i_d.di_size) { - struct xfs_trans *tp; - int error; - - tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS); - error = xfs_trans_reserve(tp, &M_RES(mp)->tr_fsyncts, 0, 0); - if (error) { - xfs_trans_cancel(tp, 0); - return; - } + if (ioend->io_type == XFS_IO_OVERWRITE) + ioend->io_error = xfs_setfilesize_trans_alloc(ioend); - xfs_setfilesize(ip, tp, offset, size); - } +out_end_io: + xfs_end_io(&ioend->io_work); + return; } STATIC ssize_t xfs_vm_direct_IO( - int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t offset) @@ -1504,15 +1616,14 @@ xfs_vm_direct_IO( struct inode *inode = iocb->ki_filp->f_mapping->host; struct block_device *bdev = xfs_find_bdev_for_inode(inode); - if (rw & WRITE) { - return __blockdev_direct_IO(rw, iocb, inode, bdev, iter, - offset, xfs_get_blocks_direct, + if (iov_iter_rw(iter) == WRITE) { + return __blockdev_direct_IO(iocb, inode, bdev, iter, offset, + xfs_get_blocks_direct, xfs_end_io_direct_write, NULL, DIO_ASYNC_EXTEND); } - return __blockdev_direct_IO(rw, iocb, inode, bdev, iter, - offset, xfs_get_blocks_direct, - NULL, NULL, 0); + return __blockdev_direct_IO(iocb, inode, bdev, iter, offset, + xfs_get_blocks_direct, NULL, NULL, 0); } /* diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c index 83af4c1..f9c1c64 100644 --- a/fs/xfs/xfs_attr_inactive.c +++ b/fs/xfs/xfs_attr_inactive.c @@ -132,9 +132,10 @@ xfs_attr3_leaf_inactive( int size; int tmp; int i; + struct xfs_mount *mp = bp->b_target->bt_mount; leaf = bp->b_addr; - xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf); + xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf); /* * Count the number of "remote" value extents. diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c index a43d370..65fb37a 100644 --- a/fs/xfs/xfs_attr_list.c +++ b/fs/xfs/xfs_attr_list.c @@ -225,6 +225,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context) int error, i; struct xfs_buf *bp; struct xfs_inode *dp = context->dp; + struct xfs_mount *mp = dp->i_mount; trace_xfs_attr_node_list(context); @@ -256,7 +257,8 @@ xfs_attr_node_list(xfs_attr_list_context_t *context) case XFS_ATTR_LEAF_MAGIC: case XFS_ATTR3_LEAF_MAGIC: leaf = bp->b_addr; - xfs_attr3_leaf_hdr_from_disk(&leafhdr, leaf); + xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, + &leafhdr, leaf); entries = xfs_attr3_leaf_entryp(leaf); if (cursor->hashval > be32_to_cpu( entries[leafhdr.count - 1].hashval)) { @@ -340,7 +342,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context) xfs_trans_brelse(NULL, bp); return error; } - xfs_attr3_leaf_hdr_from_disk(&leafhdr, leaf); + xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &leafhdr, leaf); if (context->seen_enough || leafhdr.forw == 0) break; cursor->blkno = leafhdr.forw; @@ -368,11 +370,12 @@ xfs_attr3_leaf_list_int( struct xfs_attr_leaf_entry *entry; int retval; int i; + struct xfs_mount *mp = context->dp->i_mount; trace_xfs_attr_list_leaf(context); leaf = bp->b_addr; - xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf); + xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf); entries = xfs_attr3_leaf_entryp(leaf); cursor = context->cursor; diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c index 22a5dcb..a52bbd3 100644 --- a/fs/xfs/xfs_bmap_util.c +++ b/fs/xfs/xfs_bmap_util.c @@ -1376,22 +1376,19 @@ out: } /* - * xfs_collapse_file_space() - * This routine frees disk space and shift extent for the given file. - * The first thing we do is to free data blocks in the specified range - * by calling xfs_free_file_space(). It would also sync dirty data - * and invalidate page cache over the region on which collapse range - * is working. And Shift extent records to the left to cover a hole. - * RETURNS: - * 0 on success - * errno on error - * + * @next_fsb will keep track of the extent currently undergoing shift. + * @stop_fsb will keep track of the extent at which we have to stop. + * If we are shifting left, we will start with block (offset + len) and + * shift each extent till last extent. + * If we are shifting right, we will start with last extent inside file space + * and continue until we reach the block corresponding to offset. */ -int -xfs_collapse_file_space( - struct xfs_inode *ip, - xfs_off_t offset, - xfs_off_t len) +static int +xfs_shift_file_space( + struct xfs_inode *ip, + xfs_off_t offset, + xfs_off_t len, + enum shift_direction direction) { int done = 0; struct xfs_mount *mp = ip->i_mount; @@ -1400,21 +1397,26 @@ xfs_collapse_file_space( struct xfs_bmap_free free_list; xfs_fsblock_t first_block; int committed; - xfs_fileoff_t start_fsb; + xfs_fileoff_t stop_fsb; xfs_fileoff_t next_fsb; xfs_fileoff_t shift_fsb; - ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); + ASSERT(direction == SHIFT_LEFT || direction == SHIFT_RIGHT); - trace_xfs_collapse_file_space(ip); + if (direction == SHIFT_LEFT) { + next_fsb = XFS_B_TO_FSB(mp, offset + len); + stop_fsb = XFS_B_TO_FSB(mp, VFS_I(ip)->i_size); + } else { + /* + * If right shift, delegate the work of initialization of + * next_fsb to xfs_bmap_shift_extent as it has ilock held. + */ + next_fsb = NULLFSBLOCK; + stop_fsb = XFS_B_TO_FSB(mp, offset); + } - next_fsb = XFS_B_TO_FSB(mp, offset + len); shift_fsb = XFS_B_TO_FSB(mp, len); - error = xfs_free_file_space(ip, offset, len); - if (error) - return error; - /* * Trim eofblocks to avoid shifting uninitialized post-eof preallocation * into the accessible region of the file. @@ -1427,20 +1429,28 @@ xfs_collapse_file_space( /* * Writeback and invalidate cache for the remainder of the file as we're - * about to shift down every extent from the collapse range to EOF. The - * free of the collapse range above might have already done some of - * this, but we shouldn't rely on it to do anything outside of the range - * that was freed. + * about to shift down every extent from offset to EOF. */ error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, - offset + len, -1); + offset, -1); if (error) return error; error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping, - (offset + len) >> PAGE_CACHE_SHIFT, -1); + offset >> PAGE_CACHE_SHIFT, -1); if (error) return error; + /* + * The extent shiting code works on extent granularity. So, if + * stop_fsb is not the starting block of extent, we need to split + * the extent at stop_fsb. + */ + if (direction == SHIFT_RIGHT) { + error = xfs_bmap_split_extent(ip, stop_fsb); + if (error) + return error; + } + while (!error && !done) { tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT); /* @@ -1464,7 +1474,7 @@ xfs_collapse_file_space( if (error) goto out; - xfs_trans_ijoin(tp, ip, 0); + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); xfs_bmap_init(&free_list, &first_block); @@ -1472,10 +1482,9 @@ xfs_collapse_file_space( * We are using the write transaction in which max 2 bmbt * updates are allowed */ - start_fsb = next_fsb; - error = xfs_bmap_shift_extents(tp, ip, start_fsb, shift_fsb, - &done, &next_fsb, &first_block, &free_list, - XFS_BMAP_MAX_SHIFT_EXTENTS); + error = xfs_bmap_shift_extents(tp, ip, &next_fsb, shift_fsb, + &done, stop_fsb, &first_block, &free_list, + direction, XFS_BMAP_MAX_SHIFT_EXTENTS); if (error) goto out; @@ -1484,18 +1493,70 @@ xfs_collapse_file_space( goto out; error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); - xfs_iunlock(ip, XFS_ILOCK_EXCL); } return error; out: xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); - xfs_iunlock(ip, XFS_ILOCK_EXCL); return error; } /* + * xfs_collapse_file_space() + * This routine frees disk space and shift extent for the given file. + * The first thing we do is to free data blocks in the specified range + * by calling xfs_free_file_space(). It would also sync dirty data + * and invalidate page cache over the region on which collapse range + * is working. And Shift extent records to the left to cover a hole. + * RETURNS: + * 0 on success + * errno on error + * + */ +int +xfs_collapse_file_space( + struct xfs_inode *ip, + xfs_off_t offset, + xfs_off_t len) +{ + int error; + + ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); + trace_xfs_collapse_file_space(ip); + + error = xfs_free_file_space(ip, offset, len); + if (error) + return error; + + return xfs_shift_file_space(ip, offset, len, SHIFT_LEFT); +} + +/* + * xfs_insert_file_space() + * This routine create hole space by shifting extents for the given file. + * The first thing we do is to sync dirty data and invalidate page cache + * over the region on which insert range is working. And split an extent + * to two extents at given offset by calling xfs_bmap_split_extent. + * And shift all extent records which are laying between [offset, + * last allocated extent] to the right to reserve hole range. + * RETURNS: + * 0 on success + * errno on error + */ +int +xfs_insert_file_space( + struct xfs_inode *ip, + loff_t offset, + loff_t len) +{ + ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); + trace_xfs_insert_file_space(ip); + + return xfs_shift_file_space(ip, offset, len, SHIFT_RIGHT); +} + +/* * We need to check that the format of the data fork in the temporary inode is * valid for the target inode before doing the swap. This is not a problem with * attr1 because of the fixed fork offset, but attr2 has a dynamically sized @@ -1599,13 +1660,6 @@ xfs_swap_extent_flush( /* Verify O_DIRECT for ftmp */ if (VFS_I(ip)->i_mapping->nrpages) return -EINVAL; - - /* - * Don't try to swap extents on mmap()d files because we can't lock - * out races against page faults safely. - */ - if (mapping_mapped(VFS_I(ip)->i_mapping)) - return -EBUSY; return 0; } @@ -1633,13 +1687,14 @@ xfs_swap_extents( } /* - * Lock up the inodes against other IO and truncate to begin with. - * Then we can ensure the inodes are flushed and have no page cache - * safely. Once we have done this we can take the ilocks and do the rest - * of the checks. + * Lock the inodes against other IO, page faults and truncate to + * begin with. Then we can ensure the inodes are flushed and have no + * page cache safely. Once we have done this we can take the ilocks and + * do the rest of the checks. */ - lock_flags = XFS_IOLOCK_EXCL; + lock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL; xfs_lock_two_inodes(ip, tip, XFS_IOLOCK_EXCL); + xfs_lock_two_inodes(ip, tip, XFS_MMAPLOCK_EXCL); /* Verify that both files have the same format */ if ((ip->i_d.di_mode & S_IFMT) != (tip->i_d.di_mode & S_IFMT)) { @@ -1666,8 +1721,16 @@ xfs_swap_extents( xfs_trans_cancel(tp, 0); goto out_unlock; } + + /* + * Lock and join the inodes to the tansaction so that transaction commit + * or cancel will unlock the inodes from this point onwards. + */ xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL); lock_flags |= XFS_ILOCK_EXCL; + xfs_trans_ijoin(tp, ip, lock_flags); + xfs_trans_ijoin(tp, tip, lock_flags); + /* Verify all data are being swapped */ if (sxp->sx_offset != 0 || @@ -1720,9 +1783,6 @@ xfs_swap_extents( goto out_trans_cancel; } - xfs_trans_ijoin(tp, ip, lock_flags); - xfs_trans_ijoin(tp, tip, lock_flags); - /* * Before we've swapped the forks, lets set the owners of the forks * appropriately. We have to do this as we are demand paging the btree @@ -1856,5 +1916,5 @@ out_unlock: out_trans_cancel: xfs_trans_cancel(tp, 0); - goto out_unlock; + goto out; } diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h index 736429a..af97d9a 100644 --- a/fs/xfs/xfs_bmap_util.h +++ b/fs/xfs/xfs_bmap_util.h @@ -63,6 +63,8 @@ int xfs_zero_file_space(struct xfs_inode *ip, xfs_off_t offset, xfs_off_t len); int xfs_collapse_file_space(struct xfs_inode *, xfs_off_t offset, xfs_off_t len); +int xfs_insert_file_space(struct xfs_inode *, xfs_off_t offset, + xfs_off_t len); /* EOF block manipulation functions */ bool xfs_can_free_eofblocks(struct xfs_inode *ip, bool force); diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index 507d96a..092d652 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -537,9 +537,9 @@ xfs_buf_item_push( /* has a previous flush failed due to IO errors? */ if ((bp->b_flags & XBF_WRITE_FAIL) && - ___ratelimit(&xfs_buf_write_fail_rl_state, "XFS:")) { + ___ratelimit(&xfs_buf_write_fail_rl_state, "XFS: Failing async write")) { xfs_warn(bp->b_target->bt_mount, -"Detected failing async write on buffer block 0x%llx. Retrying async write.", +"Failing async write on buffer block 0x%llx. Retrying async write.", (long long)bp->b_bn); } diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c index 799e5a2..e85a951 100644 --- a/fs/xfs/xfs_discard.c +++ b/fs/xfs/xfs_discard.c @@ -84,7 +84,7 @@ xfs_trim_extents( error = xfs_alloc_get_rec(cur, &fbno, &flen, &i); if (error) goto out_del_cursor; - XFS_WANT_CORRUPTED_GOTO(i == 1, out_del_cursor); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_del_cursor); ASSERT(flen <= be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_longest)); /* diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c index 3ee186a..338e50b 100644 --- a/fs/xfs/xfs_error.c +++ b/fs/xfs/xfs_error.c @@ -131,7 +131,7 @@ xfs_error_report( { if (level <= xfs_error_level) { xfs_alert_tag(mp, XFS_PTAG_ERROR_REPORT, - "Internal error %s at line %d of file %s. Caller %pF", + "Internal error %s at line %d of file %s. Caller %pS", tag, linenum, filename, ra); xfs_stack_trace(); diff --git a/fs/xfs/xfs_error.h b/fs/xfs/xfs_error.h index 279a76e..c0394ed 100644 --- a/fs/xfs/xfs_error.h +++ b/fs/xfs/xfs_error.h @@ -40,25 +40,25 @@ extern void xfs_verifier_error(struct xfs_buf *bp); /* * Macros to set EFSCORRUPTED & return/branch. */ -#define XFS_WANT_CORRUPTED_GOTO(x,l) \ +#define XFS_WANT_CORRUPTED_GOTO(mp, x, l) \ { \ int fs_is_ok = (x); \ ASSERT(fs_is_ok); \ if (unlikely(!fs_is_ok)) { \ XFS_ERROR_REPORT("XFS_WANT_CORRUPTED_GOTO", \ - XFS_ERRLEVEL_LOW, NULL); \ + XFS_ERRLEVEL_LOW, mp); \ error = -EFSCORRUPTED; \ goto l; \ } \ } -#define XFS_WANT_CORRUPTED_RETURN(x) \ +#define XFS_WANT_CORRUPTED_RETURN(mp, x) \ { \ int fs_is_ok = (x); \ ASSERT(fs_is_ok); \ if (unlikely(!fs_is_ok)) { \ XFS_ERROR_REPORT("XFS_WANT_CORRUPTED_RETURN", \ - XFS_ERRLEVEL_LOW, NULL); \ + XFS_ERRLEVEL_LOW, mp); \ return -EFSCORRUPTED; \ } \ } diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c index b97359b..652cd3c 100644 --- a/fs/xfs/xfs_export.c +++ b/fs/xfs/xfs_export.c @@ -215,7 +215,7 @@ xfs_fs_get_parent( int error; struct xfs_inode *cip; - error = xfs_lookup(XFS_I(child->d_inode), &xfs_name_dotdot, &cip, NULL); + error = xfs_lookup(XFS_I(d_inode(child)), &xfs_name_dotdot, &cip, NULL); if (unlikely(error)) return ERR_PTR(error); diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index a2e1cb8..8121e75 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -38,7 +38,6 @@ #include "xfs_icache.h" #include "xfs_pnfs.h" -#include <linux/aio.h> #include <linux/dcache.h> #include <linux/falloc.h> #include <linux/pagevec.h> @@ -280,7 +279,7 @@ xfs_file_read_iter( XFS_STATS_INC(xs_read_calls); - if (unlikely(file->f_flags & O_DIRECT)) + if (unlikely(iocb->ki_flags & IOCB_DIRECT)) ioflags |= XFS_IO_ISDIRECT; if (file->f_mode & FMODE_NOCMTIME) ioflags |= XFS_IO_INVIS; @@ -545,21 +544,22 @@ xfs_zero_eof( */ STATIC ssize_t xfs_file_aio_write_checks( - struct file *file, - loff_t *pos, - size_t *count, + struct kiocb *iocb, + struct iov_iter *from, int *iolock) { + struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; struct xfs_inode *ip = XFS_I(inode); - int error = 0; + ssize_t error = 0; + size_t count = iov_iter_count(from); restart: - error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode)); - if (error) + error = generic_write_checks(iocb, from); + if (error <= 0) return error; - error = xfs_break_layouts(inode, iolock); + error = xfs_break_layouts(inode, iolock, true); if (error) return error; @@ -569,20 +569,42 @@ restart: * write. If zeroing is needed and we are currently holding the * iolock shared, we need to update it to exclusive which implies * having to redo all checks before. + * + * We need to serialise against EOF updates that occur in IO + * completions here. We want to make sure that nobody is changing the + * size while we do this check until we have placed an IO barrier (i.e. + * hold the XFS_IOLOCK_EXCL) that prevents new IO from being dispatched. + * The spinlock effectively forms a memory barrier once we have the + * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value + * and hence be able to correctly determine if we need to run zeroing. */ - if (*pos > i_size_read(inode)) { + spin_lock(&ip->i_flags_lock); + if (iocb->ki_pos > i_size_read(inode)) { bool zero = false; + spin_unlock(&ip->i_flags_lock); if (*iolock == XFS_IOLOCK_SHARED) { xfs_rw_iunlock(ip, *iolock); *iolock = XFS_IOLOCK_EXCL; xfs_rw_ilock(ip, *iolock); + iov_iter_reexpand(from, count); + + /* + * We now have an IO submission barrier in place, but + * AIO can do EOF updates during IO completion and hence + * we now need to wait for all of them to drain. Non-AIO + * DIO will have drained before we are given the + * XFS_IOLOCK_EXCL, and so for most cases this wait is a + * no-op. + */ + inode_dio_wait(inode); goto restart; } - error = xfs_zero_eof(ip, *pos, i_size_read(inode), &zero); + error = xfs_zero_eof(ip, iocb->ki_pos, i_size_read(inode), &zero); if (error) return error; - } + } else + spin_unlock(&ip->i_flags_lock); /* * Updating the timestamps will grab the ilock again from @@ -644,6 +666,8 @@ xfs_file_dio_aio_write( int iolock; size_t count = iov_iter_count(from); loff_t pos = iocb->ki_pos; + loff_t end; + struct iov_iter data; struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ? mp->m_rtdev_targp : mp->m_ddev_targp; @@ -679,14 +703,16 @@ xfs_file_dio_aio_write( xfs_rw_ilock(ip, iolock); } - ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock); + ret = xfs_file_aio_write_checks(iocb, from, &iolock); if (ret) goto out; - iov_iter_truncate(from, count); + count = iov_iter_count(from); + pos = iocb->ki_pos; + end = pos + count - 1; if (mapping->nrpages) { ret = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, - pos, pos + count - 1); + pos, end); if (ret) goto out; /* @@ -696,7 +722,7 @@ xfs_file_dio_aio_write( */ ret = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping, pos >> PAGE_CACHE_SHIFT, - (pos + count - 1) >> PAGE_CACHE_SHIFT); + end >> PAGE_CACHE_SHIFT); WARN_ON_ONCE(ret); ret = 0; } @@ -713,8 +739,22 @@ xfs_file_dio_aio_write( } trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0); - ret = generic_file_direct_write(iocb, from, pos); + data = *from; + ret = mapping->a_ops->direct_IO(iocb, &data, pos); + + /* see generic_file_direct_write() for why this is necessary */ + if (mapping->nrpages) { + invalidate_inode_pages2_range(mapping, + pos >> PAGE_CACHE_SHIFT, + end >> PAGE_CACHE_SHIFT); + } + + if (ret > 0) { + pos += ret; + iov_iter_advance(from, ret); + iocb->ki_pos = pos; + } out: xfs_rw_iunlock(ip, iolock); @@ -735,24 +775,22 @@ xfs_file_buffered_aio_write( ssize_t ret; int enospc = 0; int iolock = XFS_IOLOCK_EXCL; - loff_t pos = iocb->ki_pos; - size_t count = iov_iter_count(from); xfs_rw_ilock(ip, iolock); - ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock); + ret = xfs_file_aio_write_checks(iocb, from, &iolock); if (ret) goto out; - iov_iter_truncate(from, count); /* We can write back this queue in page reclaim */ current->backing_dev_info = inode_to_bdi(inode); write_retry: - trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0); - ret = generic_perform_write(file, from, pos); + trace_xfs_file_buffered_write(ip, iov_iter_count(from), + iocb->ki_pos, 0); + ret = generic_perform_write(file, from, iocb->ki_pos); if (likely(ret >= 0)) - iocb->ki_pos = pos + ret; + iocb->ki_pos += ret; /* * If we hit a space limit, try to free up some lingering preallocated @@ -804,7 +842,7 @@ xfs_file_write_iter( if (XFS_FORCED_SHUTDOWN(ip->i_mount)) return -EIO; - if (unlikely(file->f_flags & O_DIRECT)) + if (unlikely(iocb->ki_flags & IOCB_DIRECT)) ret = xfs_file_dio_aio_write(iocb, from); else ret = xfs_file_buffered_aio_write(iocb, from); @@ -822,6 +860,11 @@ xfs_file_write_iter( return ret; } +#define XFS_FALLOC_FL_SUPPORTED \ + (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \ + FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | \ + FALLOC_FL_INSERT_RANGE) + STATIC long xfs_file_fallocate( struct file *file, @@ -835,18 +878,21 @@ xfs_file_fallocate( enum xfs_prealloc_flags flags = 0; uint iolock = XFS_IOLOCK_EXCL; loff_t new_size = 0; + bool do_file_insert = 0; if (!S_ISREG(inode->i_mode)) return -EINVAL; - if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | - FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE)) + if (mode & ~XFS_FALLOC_FL_SUPPORTED) return -EOPNOTSUPP; xfs_ilock(ip, iolock); - error = xfs_break_layouts(inode, &iolock); + error = xfs_break_layouts(inode, &iolock, false); if (error) goto out_unlock; + xfs_ilock(ip, XFS_MMAPLOCK_EXCL); + iolock |= XFS_MMAPLOCK_EXCL; + if (mode & FALLOC_FL_PUNCH_HOLE) { error = xfs_free_file_space(ip, offset, len); if (error) @@ -873,6 +919,27 @@ xfs_file_fallocate( error = xfs_collapse_file_space(ip, offset, len); if (error) goto out_unlock; + } else if (mode & FALLOC_FL_INSERT_RANGE) { + unsigned blksize_mask = (1 << inode->i_blkbits) - 1; + + new_size = i_size_read(inode) + len; + if (offset & blksize_mask || len & blksize_mask) { + error = -EINVAL; + goto out_unlock; + } + + /* check the new inode size does not wrap through zero */ + if (new_size > inode->i_sb->s_maxbytes) { + error = -EFBIG; + goto out_unlock; + } + + /* Offset should be less than i_size */ + if (offset >= i_size_read(inode)) { + error = -EINVAL; + goto out_unlock; + } + do_file_insert = 1; } else { flags |= XFS_PREALLOC_SET; @@ -907,8 +974,19 @@ xfs_file_fallocate( iattr.ia_valid = ATTR_SIZE; iattr.ia_size = new_size; error = xfs_setattr_size(ip, &iattr); + if (error) + goto out_unlock; } + /* + * Perform hole insertion now that the file size has been + * updated so that if we crash during the operation we don't + * leave shifted extents past EOF and hence losing access to + * the data that is contained within them. + */ + if (do_file_insert) + error = xfs_insert_file_space(ip, offset, len); + out_unlock: xfs_iunlock(ip, iolock); return error; @@ -997,20 +1075,6 @@ xfs_file_mmap( } /* - * mmap()d file has taken write protection fault and is being made - * writable. We can set the page state up correctly for a writable - * page, which means we can do correct delalloc accounting (ENOSPC - * checking!) and unwritten extent mapping. - */ -STATIC int -xfs_vm_page_mkwrite( - struct vm_area_struct *vma, - struct vm_fault *vmf) -{ - return block_page_mkwrite(vma, vmf, xfs_get_blocks); -} - -/* * This type is designed to indicate the type of offset we would like * to search from page cache for xfs_seek_hole_data(). */ @@ -1385,10 +1449,57 @@ xfs_file_llseek( } } +/* + * Locking for serialisation of IO during page faults. This results in a lock + * ordering of: + * + * mmap_sem (MM) + * i_mmap_lock (XFS - truncate serialisation) + * page_lock (MM) + * i_lock (XFS - extent map serialisation) + */ +STATIC int +xfs_filemap_fault( + struct vm_area_struct *vma, + struct vm_fault *vmf) +{ + struct xfs_inode *ip = XFS_I(vma->vm_file->f_mapping->host); + int error; + + trace_xfs_filemap_fault(ip); + + xfs_ilock(ip, XFS_MMAPLOCK_SHARED); + error = filemap_fault(vma, vmf); + xfs_iunlock(ip, XFS_MMAPLOCK_SHARED); + + return error; +} + +/* + * mmap()d file has taken write protection fault and is being made writable. We + * can set the page state up correctly for a writable page, which means we can + * do correct delalloc accounting (ENOSPC checking!) and unwritten extent + * mapping. + */ +STATIC int +xfs_filemap_page_mkwrite( + struct vm_area_struct *vma, + struct vm_fault *vmf) +{ + struct xfs_inode *ip = XFS_I(vma->vm_file->f_mapping->host); + int error; + + trace_xfs_filemap_page_mkwrite(ip); + + xfs_ilock(ip, XFS_MMAPLOCK_SHARED); + error = block_page_mkwrite(vma, vmf, xfs_get_blocks); + xfs_iunlock(ip, XFS_MMAPLOCK_SHARED); + + return error; +} + const struct file_operations xfs_file_operations = { .llseek = xfs_file_llseek, - .read = new_sync_read, - .write = new_sync_write, .read_iter = xfs_file_read_iter, .write_iter = xfs_file_write_iter, .splice_read = xfs_file_splice_read, @@ -1417,7 +1528,7 @@ const struct file_operations xfs_dir_file_operations = { }; static const struct vm_operations_struct xfs_file_vm_ops = { - .fault = filemap_fault, + .fault = xfs_filemap_fault, .map_pages = filemap_map_pages, - .page_mkwrite = xfs_vm_page_mkwrite, + .page_mkwrite = xfs_filemap_page_mkwrite, }; diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c index a2e86e8..da82f1c 100644 --- a/fs/xfs/xfs_filestream.c +++ b/fs/xfs/xfs_filestream.c @@ -294,7 +294,7 @@ xfs_filestream_get_parent( if (!parent) goto out_dput; - dir = igrab(parent->d_inode); + dir = igrab(d_inode(parent)); dput(parent); out_dput: @@ -322,7 +322,7 @@ xfs_filestream_lookup_ag( pip = xfs_filestream_get_parent(ip); if (!pip) - goto out; + return NULLAGNUMBER; mru = xfs_mru_cache_lookup(mp->m_filestream, pip->i_ino); if (mru) { diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index 74efe5b..cb7e8a2 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -637,12 +637,13 @@ xfs_fs_counts( xfs_mount_t *mp, xfs_fsop_counts_t *cnt) { - xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT); + cnt->allocino = percpu_counter_read_positive(&mp->m_icount); + cnt->freeino = percpu_counter_read_positive(&mp->m_ifree); + cnt->freedata = percpu_counter_read_positive(&mp->m_fdblocks) - + XFS_ALLOC_SET_ASIDE(mp); + spin_lock(&mp->m_sb_lock); - cnt->freedata = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); cnt->freertx = mp->m_sb.sb_frextents; - cnt->freeino = mp->m_sb.sb_ifree; - cnt->allocino = mp->m_sb.sb_icount; spin_unlock(&mp->m_sb_lock); return 0; } @@ -692,14 +693,9 @@ xfs_reserve_blocks( * what to do. This means that the amount of free space can * change while we do this, so we need to retry if we end up * trying to reserve more space than is available. - * - * We also use the xfs_mod_incore_sb() interface so that we - * don't have to care about whether per cpu counter are - * enabled, disabled or even compiled in.... */ retry: spin_lock(&mp->m_sb_lock); - xfs_icsb_sync_counters_locked(mp, 0); /* * If our previous reservation was larger than the current value, @@ -716,7 +712,8 @@ retry: } else { __int64_t free; - free = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); + free = percpu_counter_sum(&mp->m_fdblocks) - + XFS_ALLOC_SET_ASIDE(mp); if (!free) goto out; /* ENOSPC and fdblks_delta = 0 */ @@ -755,8 +752,7 @@ out: * the extra reserve blocks from the reserve..... */ int error; - error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, - fdblks_delta, 0); + error = xfs_mod_fdblocks(mp, fdblks_delta, 0); if (error == -ENOSPC) goto retry; } diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index 9771b7e..76a9f278 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c @@ -439,11 +439,11 @@ again: *ipp = ip; /* - * If we have a real type for an on-disk inode, we can set ops(&unlock) + * If we have a real type for an on-disk inode, we can setup the inode * now. If it's a new inode being created, xfs_ialloc will handle it. */ if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0) - xfs_setup_inode(ip); + xfs_setup_existing_inode(ip); return 0; out_error_or_again: diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 6163767..d6ebc85 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -117,24 +117,34 @@ xfs_ilock_attr_map_shared( } /* - * The xfs inode contains 2 locks: a multi-reader lock called the - * i_iolock and a multi-reader lock called the i_lock. This routine - * allows either or both of the locks to be obtained. + * The xfs inode contains 3 multi-reader locks: the i_iolock the i_mmap_lock and + * the i_lock. This routine allows various combinations of the locks to be + * obtained. * - * The 2 locks should always be ordered so that the IO lock is - * obtained first in order to prevent deadlock. + * The 3 locks should always be ordered so that the IO lock is obtained first, + * the mmap lock second and the ilock last in order to prevent deadlock. * - * ip -- the inode being locked - * lock_flags -- this parameter indicates the inode's locks - * to be locked. It can be: - * XFS_IOLOCK_SHARED, - * XFS_IOLOCK_EXCL, - * XFS_ILOCK_SHARED, - * XFS_ILOCK_EXCL, - * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED, - * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL, - * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED, - * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL + * Basic locking order: + * + * i_iolock -> i_mmap_lock -> page_lock -> i_ilock + * + * mmap_sem locking order: + * + * i_iolock -> page lock -> mmap_sem + * mmap_sem -> i_mmap_lock -> page_lock + * + * The difference in mmap_sem locking order mean that we cannot hold the + * i_mmap_lock over syscall based read(2)/write(2) based IO. These IO paths can + * fault in pages during copy in/out (for buffered IO) or require the mmap_sem + * in get_user_pages() to map the user pages into the kernel address space for + * direct IO. Similarly the i_iolock cannot be taken inside a page fault because + * page faults already hold the mmap_sem. + * + * Hence to serialise fully against both syscall and mmap based IO, we need to + * take both the i_iolock and the i_mmap_lock. These locks should *only* be both + * taken in places where we need to invalidate the page cache in a race + * free manner (e.g. truncate, hole punch and other extent manipulation + * functions). */ void xfs_ilock( @@ -150,6 +160,8 @@ xfs_ilock( */ ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) != (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); + ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) != + (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)); ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0); @@ -159,6 +171,11 @@ xfs_ilock( else if (lock_flags & XFS_IOLOCK_SHARED) mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags)); + if (lock_flags & XFS_MMAPLOCK_EXCL) + mrupdate_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags)); + else if (lock_flags & XFS_MMAPLOCK_SHARED) + mraccess_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags)); + if (lock_flags & XFS_ILOCK_EXCL) mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags)); else if (lock_flags & XFS_ILOCK_SHARED) @@ -191,6 +208,8 @@ xfs_ilock_nowait( */ ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) != (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); + ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) != + (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)); ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0); @@ -202,21 +221,35 @@ xfs_ilock_nowait( if (!mrtryaccess(&ip->i_iolock)) goto out; } + + if (lock_flags & XFS_MMAPLOCK_EXCL) { + if (!mrtryupdate(&ip->i_mmaplock)) + goto out_undo_iolock; + } else if (lock_flags & XFS_MMAPLOCK_SHARED) { + if (!mrtryaccess(&ip->i_mmaplock)) + goto out_undo_iolock; + } + if (lock_flags & XFS_ILOCK_EXCL) { if (!mrtryupdate(&ip->i_lock)) - goto out_undo_iolock; + goto out_undo_mmaplock; } else if (lock_flags & XFS_ILOCK_SHARED) { if (!mrtryaccess(&ip->i_lock)) - goto out_undo_iolock; + goto out_undo_mmaplock; } return 1; - out_undo_iolock: +out_undo_mmaplock: + if (lock_flags & XFS_MMAPLOCK_EXCL) + mrunlock_excl(&ip->i_mmaplock); + else if (lock_flags & XFS_MMAPLOCK_SHARED) + mrunlock_shared(&ip->i_mmaplock); +out_undo_iolock: if (lock_flags & XFS_IOLOCK_EXCL) mrunlock_excl(&ip->i_iolock); else if (lock_flags & XFS_IOLOCK_SHARED) mrunlock_shared(&ip->i_iolock); - out: +out: return 0; } @@ -244,6 +277,8 @@ xfs_iunlock( */ ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) != (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); + ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) != + (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)); ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0); @@ -254,6 +289,11 @@ xfs_iunlock( else if (lock_flags & XFS_IOLOCK_SHARED) mrunlock_shared(&ip->i_iolock); + if (lock_flags & XFS_MMAPLOCK_EXCL) + mrunlock_excl(&ip->i_mmaplock); + else if (lock_flags & XFS_MMAPLOCK_SHARED) + mrunlock_shared(&ip->i_mmaplock); + if (lock_flags & XFS_ILOCK_EXCL) mrunlock_excl(&ip->i_lock); else if (lock_flags & XFS_ILOCK_SHARED) @@ -271,11 +311,14 @@ xfs_ilock_demote( xfs_inode_t *ip, uint lock_flags) { - ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)); - ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0); + ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)); + ASSERT((lock_flags & + ~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0); if (lock_flags & XFS_ILOCK_EXCL) mrdemote(&ip->i_lock); + if (lock_flags & XFS_MMAPLOCK_EXCL) + mrdemote(&ip->i_mmaplock); if (lock_flags & XFS_IOLOCK_EXCL) mrdemote(&ip->i_iolock); @@ -294,6 +337,12 @@ xfs_isilocked( return rwsem_is_locked(&ip->i_lock.mr_lock); } + if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) { + if (!(lock_flags & XFS_MMAPLOCK_SHARED)) + return !!ip->i_mmaplock.mr_writer; + return rwsem_is_locked(&ip->i_mmaplock.mr_lock); + } + if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) { if (!(lock_flags & XFS_IOLOCK_SHARED)) return !!ip->i_iolock.mr_writer; @@ -314,14 +363,27 @@ int xfs_lock_delays; #endif /* - * Bump the subclass so xfs_lock_inodes() acquires each lock with - * a different value + * Bump the subclass so xfs_lock_inodes() acquires each lock with a different + * value. This shouldn't be called for page fault locking, but we also need to + * ensure we don't overrun the number of lockdep subclasses for the iolock or + * mmaplock as that is limited to 12 by the mmap lock lockdep annotations. */ static inline int xfs_lock_inumorder(int lock_mode, int subclass) { - if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) + if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) { + ASSERT(subclass + XFS_LOCK_INUMORDER < + (1 << (XFS_MMAPLOCK_SHIFT - XFS_IOLOCK_SHIFT))); lock_mode |= (subclass + XFS_LOCK_INUMORDER) << XFS_IOLOCK_SHIFT; + } + + if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) { + ASSERT(subclass + XFS_LOCK_INUMORDER < + (1 << (XFS_ILOCK_SHIFT - XFS_MMAPLOCK_SHIFT))); + lock_mode |= (subclass + XFS_LOCK_INUMORDER) << + XFS_MMAPLOCK_SHIFT; + } + if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) lock_mode |= (subclass + XFS_LOCK_INUMORDER) << XFS_ILOCK_SHIFT; @@ -329,15 +391,14 @@ xfs_lock_inumorder(int lock_mode, int subclass) } /* - * The following routine will lock n inodes in exclusive mode. - * We assume the caller calls us with the inodes in i_ino order. + * The following routine will lock n inodes in exclusive mode. We assume the + * caller calls us with the inodes in i_ino order. * - * We need to detect deadlock where an inode that we lock - * is in the AIL and we start waiting for another inode that is locked - * by a thread in a long running transaction (such as truncate). This can - * result in deadlock since the long running trans might need to wait - * for the inode we just locked in order to push the tail and free space - * in the log. + * We need to detect deadlock where an inode that we lock is in the AIL and we + * start waiting for another inode that is locked by a thread in a long running + * transaction (such as truncate). This can result in deadlock since the long + * running trans might need to wait for the inode we just locked in order to + * push the tail and free space in the log. */ void xfs_lock_inodes( @@ -348,30 +409,27 @@ xfs_lock_inodes( int attempts = 0, i, j, try_lock; xfs_log_item_t *lp; - ASSERT(ips && (inodes >= 2)); /* we need at least two */ + /* currently supports between 2 and 5 inodes */ + ASSERT(ips && inodes >= 2 && inodes <= 5); try_lock = 0; i = 0; - again: for (; i < inodes; i++) { ASSERT(ips[i]); - if (i && (ips[i] == ips[i-1])) /* Already locked */ + if (i && (ips[i] == ips[i - 1])) /* Already locked */ continue; /* - * If try_lock is not set yet, make sure all locked inodes - * are not in the AIL. - * If any are, set try_lock to be used later. + * If try_lock is not set yet, make sure all locked inodes are + * not in the AIL. If any are, set try_lock to be used later. */ - if (!try_lock) { for (j = (i - 1); j >= 0 && !try_lock; j--) { lp = (xfs_log_item_t *)ips[j]->i_itemp; - if (lp && (lp->li_flags & XFS_LI_IN_AIL)) { + if (lp && (lp->li_flags & XFS_LI_IN_AIL)) try_lock++; - } } } @@ -381,51 +439,42 @@ again: * we can't get any, we must release all we have * and try again. */ + if (!try_lock) { + xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i)); + continue; + } + + /* try_lock means we have an inode locked that is in the AIL. */ + ASSERT(i != 0); + if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i))) + continue; - if (try_lock) { - /* try_lock must be 0 if i is 0. */ + /* + * Unlock all previous guys and try again. xfs_iunlock will try + * to push the tail if the inode is in the AIL. + */ + attempts++; + for (j = i - 1; j >= 0; j--) { /* - * try_lock means we have an inode locked - * that is in the AIL. + * Check to see if we've already unlocked this one. Not + * the first one going back, and the inode ptr is the + * same. */ - ASSERT(i != 0); - if (!xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i))) { - attempts++; - - /* - * Unlock all previous guys and try again. - * xfs_iunlock will try to push the tail - * if the inode is in the AIL. - */ - - for(j = i - 1; j >= 0; j--) { - - /* - * Check to see if we've already - * unlocked this one. - * Not the first one going back, - * and the inode ptr is the same. - */ - if ((j != (i - 1)) && ips[j] == - ips[j+1]) - continue; - - xfs_iunlock(ips[j], lock_mode); - } + if (j != (i - 1) && ips[j] == ips[j + 1]) + continue; + + xfs_iunlock(ips[j], lock_mode); + } - if ((attempts % 5) == 0) { - delay(1); /* Don't just spin the CPU */ + if ((attempts % 5) == 0) { + delay(1); /* Don't just spin the CPU */ #ifdef DEBUG - xfs_lock_delays++; + xfs_lock_delays++; #endif - } - i = 0; - try_lock = 0; - goto again; - } - } else { - xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i)); } + i = 0; + try_lock = 0; + goto again; } #ifdef DEBUG @@ -440,10 +489,10 @@ again: } /* - * xfs_lock_two_inodes() can only be used to lock one type of lock - * at a time - the iolock or the ilock, but not both at once. If - * we lock both at once, lockdep will report false positives saying - * we have violated locking orders. + * xfs_lock_two_inodes() can only be used to lock one type of lock at a time - + * the iolock, the mmaplock or the ilock, but not more than one at a time. If we + * lock more than one at a time, lockdep will report false positives saying we + * have violated locking orders. */ void xfs_lock_two_inodes( @@ -455,8 +504,12 @@ xfs_lock_two_inodes( int attempts = 0; xfs_log_item_t *lp; - if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) - ASSERT((lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) == 0); + if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) { + ASSERT(!(lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL))); + ASSERT(!(lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL))); + } else if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) + ASSERT(!(lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL))); + ASSERT(ip0->i_ino != ip1->i_ino); if (ip0->i_ino > ip1->i_ino) { @@ -818,7 +871,7 @@ xfs_ialloc( xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); xfs_trans_log_inode(tp, ip, flags); - /* now that we have an i_mode we can setup inode ops and unlock */ + /* now that we have an i_mode we can setup the inode structure */ xfs_setup_inode(ip); *ipp = ip; @@ -1235,12 +1288,14 @@ xfs_create( xfs_trans_cancel(tp, cancel_flags); out_release_inode: /* - * Wait until after the current transaction is aborted to - * release the inode. This prevents recursive transactions - * and deadlocks from xfs_inactive. + * Wait until after the current transaction is aborted to finish the + * setup of the inode and release the inode. This prevents recursive + * transactions and deadlocks from xfs_inactive. */ - if (ip) + if (ip) { + xfs_finish_inode_setup(ip); IRELE(ip); + } xfs_qm_dqrele(udqp); xfs_qm_dqrele(gdqp); @@ -1345,12 +1400,14 @@ xfs_create_tmpfile( xfs_trans_cancel(tp, cancel_flags); out_release_inode: /* - * Wait until after the current transaction is aborted to - * release the inode. This prevents recursive transactions - * and deadlocks from xfs_inactive. + * Wait until after the current transaction is aborted to finish the + * setup of the inode and release the inode. This prevents recursive + * transactions and deadlocks from xfs_inactive. */ - if (ip) + if (ip) { + xfs_finish_inode_setup(ip); IRELE(ip); + } xfs_qm_dqrele(udqp); xfs_qm_dqrele(gdqp); @@ -2611,19 +2668,22 @@ xfs_remove( /* * Enter all inodes for a rename transaction into a sorted array. */ +#define __XFS_SORT_INODES 5 STATIC void xfs_sort_for_rename( - xfs_inode_t *dp1, /* in: old (source) directory inode */ - xfs_inode_t *dp2, /* in: new (target) directory inode */ - xfs_inode_t *ip1, /* in: inode of old entry */ - xfs_inode_t *ip2, /* in: inode of new entry, if it - already exists, NULL otherwise. */ - xfs_inode_t **i_tab,/* out: array of inode returned, sorted */ - int *num_inodes) /* out: number of inodes in array */ + struct xfs_inode *dp1, /* in: old (source) directory inode */ + struct xfs_inode *dp2, /* in: new (target) directory inode */ + struct xfs_inode *ip1, /* in: inode of old entry */ + struct xfs_inode *ip2, /* in: inode of new entry */ + struct xfs_inode *wip, /* in: whiteout inode */ + struct xfs_inode **i_tab,/* out: sorted array of inodes */ + int *num_inodes) /* in/out: inodes in array */ { - xfs_inode_t *temp; int i, j; + ASSERT(*num_inodes == __XFS_SORT_INODES); + memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *)); + /* * i_tab contains a list of pointers to inodes. We initialize * the table here & we'll sort it. We will then use it to @@ -2631,25 +2691,24 @@ xfs_sort_for_rename( * * Note that the table may contain duplicates. e.g., dp1 == dp2. */ - i_tab[0] = dp1; - i_tab[1] = dp2; - i_tab[2] = ip1; - if (ip2) { - *num_inodes = 4; - i_tab[3] = ip2; - } else { - *num_inodes = 3; - i_tab[3] = NULL; - } + i = 0; + i_tab[i++] = dp1; + i_tab[i++] = dp2; + i_tab[i++] = ip1; + if (ip2) + i_tab[i++] = ip2; + if (wip) + i_tab[i++] = wip; + *num_inodes = i; /* * Sort the elements via bubble sort. (Remember, there are at - * most 4 elements to sort, so this is adequate.) + * most 5 elements to sort, so this is adequate.) */ for (i = 0; i < *num_inodes; i++) { for (j = 1; j < *num_inodes; j++) { if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) { - temp = i_tab[j]; + struct xfs_inode *temp = i_tab[j]; i_tab[j] = i_tab[j-1]; i_tab[j-1] = temp; } @@ -2657,6 +2716,31 @@ xfs_sort_for_rename( } } +static int +xfs_finish_rename( + struct xfs_trans *tp, + struct xfs_bmap_free *free_list) +{ + int committed = 0; + int error; + + /* + * If this is a synchronous mount, make sure that the rename transaction + * goes to disk before returning to the user. + */ + if (tp->t_mountp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) + xfs_trans_set_sync(tp); + + error = xfs_bmap_finish(&tp, free_list, &committed); + if (error) { + xfs_bmap_cancel(free_list); + xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT); + return error; + } + + return xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); +} + /* * xfs_cross_rename() * @@ -2685,14 +2769,14 @@ xfs_cross_rename( ip2->i_ino, first_block, free_list, spaceres); if (error) - goto out; + goto out_trans_abort; /* Swap inode number for dirent in second parent */ error = xfs_dir_replace(tp, dp2, name2, ip1->i_ino, first_block, free_list, spaceres); if (error) - goto out; + goto out_trans_abort; /* * If we're renaming one or more directories across different parents, @@ -2707,16 +2791,16 @@ xfs_cross_rename( dp1->i_ino, first_block, free_list, spaceres); if (error) - goto out; + goto out_trans_abort; /* transfer ip2 ".." reference to dp1 */ if (!S_ISDIR(ip1->i_d.di_mode)) { error = xfs_droplink(tp, dp2); if (error) - goto out; + goto out_trans_abort; error = xfs_bumplink(tp, dp1); if (error) - goto out; + goto out_trans_abort; } /* @@ -2734,16 +2818,16 @@ xfs_cross_rename( dp2->i_ino, first_block, free_list, spaceres); if (error) - goto out; + goto out_trans_abort; /* transfer ip1 ".." reference to dp2 */ if (!S_ISDIR(ip2->i_d.di_mode)) { error = xfs_droplink(tp, dp1); if (error) - goto out; + goto out_trans_abort; error = xfs_bumplink(tp, dp2); if (error) - goto out; + goto out_trans_abort; } /* @@ -2771,66 +2855,108 @@ xfs_cross_rename( } xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE); -out: + return xfs_finish_rename(tp, free_list); + +out_trans_abort: + xfs_bmap_cancel(free_list); + xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT); return error; } /* + * xfs_rename_alloc_whiteout() + * + * Return a referenced, unlinked, unlocked inode that that can be used as a + * whiteout in a rename transaction. We use a tmpfile inode here so that if we + * crash between allocating the inode and linking it into the rename transaction + * recovery will free the inode and we won't leak it. + */ +static int +xfs_rename_alloc_whiteout( + struct xfs_inode *dp, + struct xfs_inode **wip) +{ + struct xfs_inode *tmpfile; + int error; + + error = xfs_create_tmpfile(dp, NULL, S_IFCHR | WHITEOUT_MODE, &tmpfile); + if (error) + return error; + + /* Satisfy xfs_bumplink that this is a real tmpfile */ + xfs_finish_inode_setup(tmpfile); + VFS_I(tmpfile)->i_state |= I_LINKABLE; + + *wip = tmpfile; + return 0; +} + +/* * xfs_rename */ int xfs_rename( - xfs_inode_t *src_dp, - struct xfs_name *src_name, - xfs_inode_t *src_ip, - xfs_inode_t *target_dp, - struct xfs_name *target_name, - xfs_inode_t *target_ip, - unsigned int flags) + struct xfs_inode *src_dp, + struct xfs_name *src_name, + struct xfs_inode *src_ip, + struct xfs_inode *target_dp, + struct xfs_name *target_name, + struct xfs_inode *target_ip, + unsigned int flags) { - xfs_trans_t *tp = NULL; - xfs_mount_t *mp = src_dp->i_mount; - int new_parent; /* moving to a new dir */ - int src_is_directory; /* src_name is a directory */ - int error; - xfs_bmap_free_t free_list; - xfs_fsblock_t first_block; - int cancel_flags; - int committed; - xfs_inode_t *inodes[4]; - int spaceres; - int num_inodes; + struct xfs_mount *mp = src_dp->i_mount; + struct xfs_trans *tp; + struct xfs_bmap_free free_list; + xfs_fsblock_t first_block; + struct xfs_inode *wip = NULL; /* whiteout inode */ + struct xfs_inode *inodes[__XFS_SORT_INODES]; + int num_inodes = __XFS_SORT_INODES; + bool new_parent = (src_dp != target_dp); + bool src_is_directory = S_ISDIR(src_ip->i_d.di_mode); + int cancel_flags = 0; + int spaceres; + int error; trace_xfs_rename(src_dp, target_dp, src_name, target_name); - new_parent = (src_dp != target_dp); - src_is_directory = S_ISDIR(src_ip->i_d.di_mode); + if ((flags & RENAME_EXCHANGE) && !target_ip) + return -EINVAL; - xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, + /* + * If we are doing a whiteout operation, allocate the whiteout inode + * we will be placing at the target and ensure the type is set + * appropriately. + */ + if (flags & RENAME_WHITEOUT) { + ASSERT(!(flags & (RENAME_NOREPLACE | RENAME_EXCHANGE))); + error = xfs_rename_alloc_whiteout(target_dp, &wip); + if (error) + return error; + + /* setup target dirent info as whiteout */ + src_name->type = XFS_DIR3_FT_CHRDEV; + } + + xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip, inodes, &num_inodes); - xfs_bmap_init(&free_list, &first_block); tp = xfs_trans_alloc(mp, XFS_TRANS_RENAME); - cancel_flags = XFS_TRANS_RELEASE_LOG_RES; spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len); error = xfs_trans_reserve(tp, &M_RES(mp)->tr_rename, spaceres, 0); if (error == -ENOSPC) { spaceres = 0; error = xfs_trans_reserve(tp, &M_RES(mp)->tr_rename, 0, 0); } - if (error) { - xfs_trans_cancel(tp, 0); - goto std_return; - } + if (error) + goto out_trans_cancel; + cancel_flags = XFS_TRANS_RELEASE_LOG_RES; /* * Attach the dquots to the inodes */ error = xfs_qm_vop_rename_dqattach(inodes); - if (error) { - xfs_trans_cancel(tp, cancel_flags); - goto std_return; - } + if (error) + goto out_trans_cancel; /* * Lock all the participating inodes. Depending upon whether @@ -2851,6 +2977,8 @@ xfs_rename( xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL); if (target_ip) xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL); + if (wip) + xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL); /* * If we are using project inheritance, we only allow renames @@ -2860,24 +2988,16 @@ xfs_rename( if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) && (xfs_get_projid(target_dp) != xfs_get_projid(src_ip)))) { error = -EXDEV; - goto error_return; + goto out_trans_cancel; } - /* - * Handle RENAME_EXCHANGE flags - */ - if (flags & RENAME_EXCHANGE) { - if (target_ip == NULL) { - error = -EINVAL; - goto error_return; - } - error = xfs_cross_rename(tp, src_dp, src_name, src_ip, - target_dp, target_name, target_ip, - &free_list, &first_block, spaceres); - if (error) - goto abort_return; - goto finish_rename; - } + xfs_bmap_init(&free_list, &first_block); + + /* RENAME_EXCHANGE is unique from here on. */ + if (flags & RENAME_EXCHANGE) + return xfs_cross_rename(tp, src_dp, src_name, src_ip, + target_dp, target_name, target_ip, + &free_list, &first_block, spaceres); /* * Set up the target. @@ -2890,7 +3010,7 @@ xfs_rename( if (!spaceres) { error = xfs_dir_canenter(tp, target_dp, target_name); if (error) - goto error_return; + goto out_trans_cancel; } /* * If target does not exist and the rename crosses @@ -2901,9 +3021,9 @@ xfs_rename( src_ip->i_ino, &first_block, &free_list, spaceres); if (error == -ENOSPC) - goto error_return; + goto out_bmap_cancel; if (error) - goto abort_return; + goto out_trans_abort; xfs_trans_ichgtime(tp, target_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); @@ -2911,7 +3031,7 @@ xfs_rename( if (new_parent && src_is_directory) { error = xfs_bumplink(tp, target_dp); if (error) - goto abort_return; + goto out_trans_abort; } } else { /* target_ip != NULL */ /* @@ -2926,7 +3046,7 @@ xfs_rename( if (!(xfs_dir_isempty(target_ip)) || (target_ip->i_d.di_nlink > 2)) { error = -EEXIST; - goto error_return; + goto out_trans_cancel; } } @@ -2943,7 +3063,7 @@ xfs_rename( src_ip->i_ino, &first_block, &free_list, spaceres); if (error) - goto abort_return; + goto out_trans_abort; xfs_trans_ichgtime(tp, target_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); @@ -2954,7 +3074,7 @@ xfs_rename( */ error = xfs_droplink(tp, target_ip); if (error) - goto abort_return; + goto out_trans_abort; if (src_is_directory) { /* @@ -2962,7 +3082,7 @@ xfs_rename( */ error = xfs_droplink(tp, target_ip); if (error) - goto abort_return; + goto out_trans_abort; } } /* target_ip != NULL */ @@ -2979,7 +3099,7 @@ xfs_rename( &first_block, &free_list, spaceres); ASSERT(error != -EEXIST); if (error) - goto abort_return; + goto out_trans_abort; } /* @@ -3005,49 +3125,67 @@ xfs_rename( */ error = xfs_droplink(tp, src_dp); if (error) - goto abort_return; + goto out_trans_abort; } - error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino, + /* + * For whiteouts, we only need to update the source dirent with the + * inode number of the whiteout inode rather than removing it + * altogether. + */ + if (wip) { + error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino, &first_block, &free_list, spaceres); + } else + error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino, + &first_block, &free_list, spaceres); if (error) - goto abort_return; - - xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); - xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE); - if (new_parent) - xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE); + goto out_trans_abort; -finish_rename: /* - * If this is a synchronous mount, make sure that the - * rename transaction goes to disk before returning to - * the user. + * For whiteouts, we need to bump the link count on the whiteout inode. + * This means that failures all the way up to this point leave the inode + * on the unlinked list and so cleanup is a simple matter of dropping + * the remaining reference to it. If we fail here after bumping the link + * count, we're shutting down the filesystem so we'll never see the + * intermediate state on disk. */ - if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) { - xfs_trans_set_sync(tp); - } + if (wip) { + ASSERT(wip->i_d.di_nlink == 0); + error = xfs_bumplink(tp, wip); + if (error) + goto out_trans_abort; + error = xfs_iunlink_remove(tp, wip); + if (error) + goto out_trans_abort; + xfs_trans_log_inode(tp, wip, XFS_ILOG_CORE); - error = xfs_bmap_finish(&tp, &free_list, &committed); - if (error) { - xfs_bmap_cancel(&free_list); - xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | - XFS_TRANS_ABORT)); - goto std_return; + /* + * Now we have a real link, clear the "I'm a tmpfile" state + * flag from the inode so it doesn't accidentally get misused in + * future. + */ + VFS_I(wip)->i_state &= ~I_LINKABLE; } - /* - * trans_commit will unlock src_ip, target_ip & decrement - * the vnode references. - */ - return xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); + xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); + xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE); + if (new_parent) + xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE); - abort_return: + error = xfs_finish_rename(tp, &free_list); + if (wip) + IRELE(wip); + return error; + +out_trans_abort: cancel_flags |= XFS_TRANS_ABORT; - error_return: +out_bmap_cancel: xfs_bmap_cancel(&free_list); +out_trans_cancel: xfs_trans_cancel(tp, cancel_flags); - std_return: + if (wip) + IRELE(wip); return error; } diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index a1cd55f..8f22d20 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -56,6 +56,7 @@ typedef struct xfs_inode { struct xfs_inode_log_item *i_itemp; /* logging information */ mrlock_t i_lock; /* inode lock */ mrlock_t i_iolock; /* inode IO lock */ + mrlock_t i_mmaplock; /* inode mmap IO lock */ atomic_t i_pincount; /* inode pin count */ spinlock_t i_flags_lock; /* inode i_flags lock */ /* Miscellaneous state. */ @@ -263,15 +264,20 @@ static inline int xfs_isiflocked(struct xfs_inode *ip) #define XFS_IOLOCK_SHARED (1<<1) #define XFS_ILOCK_EXCL (1<<2) #define XFS_ILOCK_SHARED (1<<3) +#define XFS_MMAPLOCK_EXCL (1<<4) +#define XFS_MMAPLOCK_SHARED (1<<5) #define XFS_LOCK_MASK (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED \ - | XFS_ILOCK_EXCL | XFS_ILOCK_SHARED) + | XFS_ILOCK_EXCL | XFS_ILOCK_SHARED \ + | XFS_MMAPLOCK_EXCL | XFS_MMAPLOCK_SHARED) #define XFS_LOCK_FLAGS \ { XFS_IOLOCK_EXCL, "IOLOCK_EXCL" }, \ { XFS_IOLOCK_SHARED, "IOLOCK_SHARED" }, \ { XFS_ILOCK_EXCL, "ILOCK_EXCL" }, \ - { XFS_ILOCK_SHARED, "ILOCK_SHARED" } + { XFS_ILOCK_SHARED, "ILOCK_SHARED" }, \ + { XFS_MMAPLOCK_EXCL, "MMAPLOCK_EXCL" }, \ + { XFS_MMAPLOCK_SHARED, "MMAPLOCK_SHARED" } /* @@ -302,17 +308,26 @@ static inline int xfs_isiflocked(struct xfs_inode *ip) #define XFS_IOLOCK_SHIFT 16 #define XFS_IOLOCK_PARENT (XFS_LOCK_PARENT << XFS_IOLOCK_SHIFT) +#define XFS_MMAPLOCK_SHIFT 20 + #define XFS_ILOCK_SHIFT 24 #define XFS_ILOCK_PARENT (XFS_LOCK_PARENT << XFS_ILOCK_SHIFT) #define XFS_ILOCK_RTBITMAP (XFS_LOCK_RTBITMAP << XFS_ILOCK_SHIFT) #define XFS_ILOCK_RTSUM (XFS_LOCK_RTSUM << XFS_ILOCK_SHIFT) -#define XFS_IOLOCK_DEP_MASK 0x00ff0000 +#define XFS_IOLOCK_DEP_MASK 0x000f0000 +#define XFS_MMAPLOCK_DEP_MASK 0x00f00000 #define XFS_ILOCK_DEP_MASK 0xff000000 -#define XFS_LOCK_DEP_MASK (XFS_IOLOCK_DEP_MASK | XFS_ILOCK_DEP_MASK) +#define XFS_LOCK_DEP_MASK (XFS_IOLOCK_DEP_MASK | \ + XFS_MMAPLOCK_DEP_MASK | \ + XFS_ILOCK_DEP_MASK) -#define XFS_IOLOCK_DEP(flags) (((flags) & XFS_IOLOCK_DEP_MASK) >> XFS_IOLOCK_SHIFT) -#define XFS_ILOCK_DEP(flags) (((flags) & XFS_ILOCK_DEP_MASK) >> XFS_ILOCK_SHIFT) +#define XFS_IOLOCK_DEP(flags) (((flags) & XFS_IOLOCK_DEP_MASK) \ + >> XFS_IOLOCK_SHIFT) +#define XFS_MMAPLOCK_DEP(flags) (((flags) & XFS_MMAPLOCK_DEP_MASK) \ + >> XFS_MMAPLOCK_SHIFT) +#define XFS_ILOCK_DEP(flags) (((flags) & XFS_ILOCK_DEP_MASK) \ + >> XFS_ILOCK_SHIFT) /* * For multiple groups support: if S_ISGID bit is set in the parent @@ -391,6 +406,28 @@ int xfs_zero_eof(struct xfs_inode *ip, xfs_off_t offset, int xfs_iozero(struct xfs_inode *ip, loff_t pos, size_t count); +/* from xfs_iops.c */ +/* + * When setting up a newly allocated inode, we need to call + * xfs_finish_inode_setup() once the inode is fully instantiated at + * the VFS level to prevent the rest of the world seeing the inode + * before we've completed instantiation. Otherwise we can do it + * the moment the inode lookup is complete. + */ +extern void xfs_setup_inode(struct xfs_inode *ip); +static inline void xfs_finish_inode_setup(struct xfs_inode *ip) +{ + xfs_iflags_clear(ip, XFS_INEW); + barrier(); + unlock_new_inode(VFS_I(ip)); +} + +static inline void xfs_setup_existing_inode(struct xfs_inode *ip) +{ + xfs_setup_inode(ip); + xfs_finish_inode_setup(ip); +} + #define IHOLD(ip) \ do { \ ASSERT(atomic_read(&VFS_I(ip)->i_count) > 0) ; \ diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index ac4feae..87f67c6 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c @@ -82,7 +82,7 @@ xfs_find_handle( error = user_lpath((const char __user *)hreq->path, &path); if (error) return error; - inode = path.dentry->d_inode; + inode = d_inode(path.dentry); } ip = XFS_I(inode); @@ -210,7 +210,7 @@ xfs_open_by_handle( dentry = xfs_handlereq_to_dentry(parfilp, hreq); if (IS_ERR(dentry)) return PTR_ERR(dentry); - inode = dentry->d_inode; + inode = d_inode(dentry); /* Restrict xfs_open_by_handle to directories & regular files. */ if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) { @@ -303,7 +303,7 @@ xfs_readlink_by_handle( goto out_dput; } - error = xfs_readlink(XFS_I(dentry->d_inode), link); + error = xfs_readlink(XFS_I(d_inode(dentry)), link); if (error) goto out_kfree; error = readlink_copy(hreq->ohandle, olen, link); @@ -376,7 +376,7 @@ xfs_fssetdm_by_handle( return PTR_ERR(dentry); } - if (IS_IMMUTABLE(dentry->d_inode) || IS_APPEND(dentry->d_inode)) { + if (IS_IMMUTABLE(d_inode(dentry)) || IS_APPEND(d_inode(dentry))) { error = -EPERM; goto out; } @@ -386,7 +386,7 @@ xfs_fssetdm_by_handle( goto out; } - error = xfs_set_dmattrs(XFS_I(dentry->d_inode), fsd.fsd_dmevmask, + error = xfs_set_dmattrs(XFS_I(d_inode(dentry)), fsd.fsd_dmevmask, fsd.fsd_dmstate); out: @@ -429,7 +429,7 @@ xfs_attrlist_by_handle( goto out_dput; cursor = (attrlist_cursor_kern_t *)&al_hreq.pos; - error = xfs_attr_list(XFS_I(dentry->d_inode), kbuf, al_hreq.buflen, + error = xfs_attr_list(XFS_I(d_inode(dentry)), kbuf, al_hreq.buflen, al_hreq.flags, cursor); if (error) goto out_kfree; @@ -559,7 +559,7 @@ xfs_attrmulti_by_handle( switch (ops[i].am_opcode) { case ATTR_OP_GET: ops[i].am_error = xfs_attrmulti_attr_get( - dentry->d_inode, attr_name, + d_inode(dentry), attr_name, ops[i].am_attrvalue, &ops[i].am_length, ops[i].am_flags); break; @@ -568,7 +568,7 @@ xfs_attrmulti_by_handle( if (ops[i].am_error) break; ops[i].am_error = xfs_attrmulti_attr_set( - dentry->d_inode, attr_name, + d_inode(dentry), attr_name, ops[i].am_attrvalue, ops[i].am_length, ops[i].am_flags); mnt_drop_write_file(parfilp); @@ -578,7 +578,7 @@ xfs_attrmulti_by_handle( if (ops[i].am_error) break; ops[i].am_error = xfs_attrmulti_attr_remove( - dentry->d_inode, attr_name, + d_inode(dentry), attr_name, ops[i].am_flags); mnt_drop_write_file(parfilp); break; @@ -631,7 +631,7 @@ xfs_ioc_space( if (filp->f_flags & O_DSYNC) flags |= XFS_PREALLOC_SYNC; - if (ioflags & XFS_IO_INVIS) + if (ioflags & XFS_IO_INVIS) flags |= XFS_PREALLOC_INVISIBLE; error = mnt_want_write_file(filp); @@ -639,10 +639,13 @@ xfs_ioc_space( return error; xfs_ilock(ip, iolock); - error = xfs_break_layouts(inode, &iolock); + error = xfs_break_layouts(inode, &iolock, false); if (error) goto out_unlock; + xfs_ilock(ip, XFS_MMAPLOCK_EXCL); + iolock |= XFS_MMAPLOCK_EXCL; + switch (bf->l_whence) { case 0: /*SEEK_SET*/ break; diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c index bfc7c7c..b88bdc8 100644 --- a/fs/xfs/xfs_ioctl32.c +++ b/fs/xfs/xfs_ioctl32.c @@ -375,7 +375,7 @@ xfs_compat_attrlist_by_handle( goto out_dput; cursor = (attrlist_cursor_kern_t *)&al_hreq.pos; - error = xfs_attr_list(XFS_I(dentry->d_inode), kbuf, al_hreq.buflen, + error = xfs_attr_list(XFS_I(d_inode(dentry)), kbuf, al_hreq.buflen, al_hreq.flags, cursor); if (error) goto out_kfree; @@ -445,7 +445,7 @@ xfs_compat_attrmulti_by_handle( switch (ops[i].am_opcode) { case ATTR_OP_GET: ops[i].am_error = xfs_attrmulti_attr_get( - dentry->d_inode, attr_name, + d_inode(dentry), attr_name, compat_ptr(ops[i].am_attrvalue), &ops[i].am_length, ops[i].am_flags); break; @@ -454,7 +454,7 @@ xfs_compat_attrmulti_by_handle( if (ops[i].am_error) break; ops[i].am_error = xfs_attrmulti_attr_set( - dentry->d_inode, attr_name, + d_inode(dentry), attr_name, compat_ptr(ops[i].am_attrvalue), ops[i].am_length, ops[i].am_flags); mnt_drop_write_file(parfilp); @@ -464,7 +464,7 @@ xfs_compat_attrmulti_by_handle( if (ops[i].am_error) break; ops[i].am_error = xfs_attrmulti_attr_remove( - dentry->d_inode, attr_name, + d_inode(dentry), attr_name, ops[i].am_flags); mnt_drop_write_file(parfilp); break; @@ -504,7 +504,7 @@ xfs_compat_fssetdm_by_handle( if (IS_ERR(dentry)) return PTR_ERR(dentry); - if (IS_IMMUTABLE(dentry->d_inode) || IS_APPEND(dentry->d_inode)) { + if (IS_IMMUTABLE(d_inode(dentry)) || IS_APPEND(d_inode(dentry))) { error = -EPERM; goto out; } @@ -514,7 +514,7 @@ xfs_compat_fssetdm_by_handle( goto out; } - error = xfs_set_dmattrs(XFS_I(dentry->d_inode), fsd.fsd_dmevmask, + error = xfs_set_dmattrs(XFS_I(d_inode(dentry)), fsd.fsd_dmevmask, fsd.fsd_dmstate); out: diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index ccb1dd0..38e633b 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -460,8 +460,7 @@ xfs_iomap_prealloc_size( alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(MAXEXTLEN), alloc_blocks); - xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT); - freesp = mp->m_sb.sb_fdblocks; + freesp = percpu_counter_read_positive(&mp->m_fdblocks); if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) { shift = 2; if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT]) diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index e53a903..f4cd720 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c @@ -187,6 +187,8 @@ xfs_generic_create( else d_instantiate(dentry, inode); + xfs_finish_inode_setup(ip); + out_free_acl: if (default_acl) posix_acl_release(default_acl); @@ -195,6 +197,7 @@ xfs_generic_create( return error; out_cleanup_inode: + xfs_finish_inode_setup(ip); if (!tmpfile) xfs_cleanup_inode(dir, inode, dentry); iput(inode); @@ -301,7 +304,7 @@ xfs_vn_link( struct inode *dir, struct dentry *dentry) { - struct inode *inode = old_dentry->d_inode; + struct inode *inode = d_inode(old_dentry); struct xfs_name name; int error; @@ -326,7 +329,7 @@ xfs_vn_unlink( xfs_dentry_to_name(&name, dentry, 0); - error = xfs_remove(XFS_I(dir), &name, XFS_I(dentry->d_inode)); + error = xfs_remove(XFS_I(dir), &name, XFS_I(d_inode(dentry))); if (error) return error; @@ -367,9 +370,11 @@ xfs_vn_symlink( goto out_cleanup_inode; d_instantiate(dentry, inode); + xfs_finish_inode_setup(cip); return 0; out_cleanup_inode: + xfs_finish_inode_setup(cip); xfs_cleanup_inode(dir, inode, dentry); iput(inode); out: @@ -384,22 +389,22 @@ xfs_vn_rename( struct dentry *ndentry, unsigned int flags) { - struct inode *new_inode = ndentry->d_inode; + struct inode *new_inode = d_inode(ndentry); int omode = 0; struct xfs_name oname; struct xfs_name nname; - if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE)) + if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) return -EINVAL; /* if we are exchanging files, we need to set i_mode of both files */ if (flags & RENAME_EXCHANGE) - omode = ndentry->d_inode->i_mode; + omode = d_inode(ndentry)->i_mode; xfs_dentry_to_name(&oname, odentry, omode); - xfs_dentry_to_name(&nname, ndentry, odentry->d_inode->i_mode); + xfs_dentry_to_name(&nname, ndentry, d_inode(odentry)->i_mode); - return xfs_rename(XFS_I(odir), &oname, XFS_I(odentry->d_inode), + return xfs_rename(XFS_I(odir), &oname, XFS_I(d_inode(odentry)), XFS_I(ndir), &nname, new_inode ? XFS_I(new_inode) : NULL, flags); } @@ -421,7 +426,7 @@ xfs_vn_follow_link( if (!link) goto out_err; - error = xfs_readlink(XFS_I(dentry->d_inode), link); + error = xfs_readlink(XFS_I(d_inode(dentry)), link); if (unlikely(error)) goto out_kfree; @@ -441,7 +446,7 @@ xfs_vn_getattr( struct dentry *dentry, struct kstat *stat) { - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; @@ -766,6 +771,7 @@ xfs_setattr_size( return error; ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); + ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL)); ASSERT(S_ISREG(ip->i_d.di_mode)); ASSERT((iattr->ia_valid & (ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET| ATTR_MTIME_SET|ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0); @@ -829,55 +835,27 @@ xfs_setattr_size( inode_dio_wait(inode); /* - * Do all the page cache truncate work outside the transaction context - * as the "lock" order is page lock->log space reservation. i.e. - * locking pages inside the transaction can ABBA deadlock with - * writeback. We have to do the VFS inode size update before we truncate - * the pagecache, however, to avoid racing with page faults beyond the - * new EOF they are not serialised against truncate operations except by - * page locks and size updates. + * We've already locked out new page faults, so now we can safely remove + * pages from the page cache knowing they won't get refaulted until we + * drop the XFS_MMAP_EXCL lock after the extent manipulations are + * complete. The truncate_setsize() call also cleans partial EOF page + * PTEs on extending truncates and hence ensures sub-page block size + * filesystems are correctly handled, too. * - * Hence we are in a situation where a truncate can fail with ENOMEM - * from xfs_trans_reserve(), but having already truncated the in-memory - * version of the file (i.e. made user visible changes). There's not - * much we can do about this, except to hope that the caller sees ENOMEM - * and retries the truncate operation. + * We have to do all the page cache truncate work outside the + * transaction context as the "lock" order is page lock->log space + * reservation as defined by extent allocation in the writeback path. + * Hence a truncate can fail with ENOMEM from xfs_trans_reserve(), but + * having already truncated the in-memory version of the file (i.e. made + * user visible changes). There's not much we can do about this, except + * to hope that the caller sees ENOMEM and retries the truncate + * operation. */ error = block_truncate_page(inode->i_mapping, newsize, xfs_get_blocks); if (error) return error; truncate_setsize(inode, newsize); - /* - * The "we can't serialise against page faults" pain gets worse. - * - * If the file is mapped then we have to clean the page at the old EOF - * when extending the file. Extending the file can expose changes the - * underlying page mapping (e.g. from beyond EOF to a hole or - * unwritten), and so on the next attempt to write to that page we need - * to remap it for write. i.e. we need .page_mkwrite() to be called. - * Hence we need to clean the page to clean the pte and so a new write - * fault will be triggered appropriately. - * - * If we do it before we change the inode size, then we can race with a - * page fault that maps the page with exactly the same problem. If we do - * it after we change the file size, then a new page fault can come in - * and allocate space before we've run the rest of the truncate - * transaction. That's kinda grotesque, but it's better than have data - * over a hole, and so that's the lesser evil that has been chosen here. - * - * The real solution, however, is to have some mechanism for locking out - * page faults while a truncate is in progress. - */ - if (newsize > oldsize && mapping_mapped(VFS_I(ip)->i_mapping)) { - error = filemap_write_and_wait_range( - VFS_I(ip)->i_mapping, - round_down(oldsize, PAGE_CACHE_SIZE), - round_up(oldsize, PAGE_CACHE_SIZE) - 1); - if (error) - return error; - } - tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_SIZE); error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0); if (error) @@ -968,16 +946,20 @@ xfs_vn_setattr( struct dentry *dentry, struct iattr *iattr) { - struct xfs_inode *ip = XFS_I(dentry->d_inode); + struct xfs_inode *ip = XFS_I(d_inode(dentry)); int error; if (iattr->ia_valid & ATTR_SIZE) { uint iolock = XFS_IOLOCK_EXCL; xfs_ilock(ip, iolock); - error = xfs_break_layouts(dentry->d_inode, &iolock); - if (!error) + error = xfs_break_layouts(d_inode(dentry), &iolock, true); + if (!error) { + xfs_ilock(ip, XFS_MMAPLOCK_EXCL); + iolock |= XFS_MMAPLOCK_EXCL; + error = xfs_setattr_size(ip, iattr); + } xfs_iunlock(ip, iolock); } else { error = xfs_setattr_nonsize(ip, iattr, 0); @@ -1228,16 +1210,12 @@ xfs_diflags_to_iflags( } /* - * Initialize the Linux inode, set up the operation vectors and - * unlock the inode. + * Initialize the Linux inode and set up the operation vectors. * - * When reading existing inodes from disk this is called directly - * from xfs_iget, when creating a new inode it is called from - * xfs_ialloc after setting up the inode. - * - * We are always called with an uninitialised linux inode here. - * We need to initialise the necessary fields and take a reference - * on it. + * When reading existing inodes from disk this is called directly from xfs_iget, + * when creating a new inode it is called from xfs_ialloc after setting up the + * inode. These callers have different criteria for clearing XFS_INEW, so leave + * it up to the caller to deal with unlocking the inode appropriately. */ void xfs_setup_inode( @@ -1324,9 +1302,4 @@ xfs_setup_inode( inode_has_no_xattr(inode); cache_no_acl(inode); } - - xfs_iflags_clear(ip, XFS_INEW); - barrier(); - - unlock_new_inode(inode); } diff --git a/fs/xfs/xfs_iops.h b/fs/xfs/xfs_iops.h index ea7a98e..a0f84ab 100644 --- a/fs/xfs/xfs_iops.h +++ b/fs/xfs/xfs_iops.h @@ -25,8 +25,6 @@ extern const struct file_operations xfs_dir_file_operations; extern ssize_t xfs_vn_listxattr(struct dentry *, char *data, size_t size); -extern void xfs_setup_inode(struct xfs_inode *); - /* * Internal setattr interfaces. */ diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index 82e3142..8042989 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c @@ -229,7 +229,7 @@ xfs_bulkstat_grab_ichunk( error = xfs_inobt_get_rec(cur, irec, &stat); if (error) return error; - XFS_WANT_CORRUPTED_RETURN(stat == 1); + XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, stat == 1); /* Check if the record contains the inode in request */ if (irec->ir_startino + XFS_INODES_PER_CHUNK <= agino) { diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h index c31d2c2..7c7842c 100644 --- a/fs/xfs/xfs_linux.h +++ b/fs/xfs/xfs_linux.h @@ -116,15 +116,6 @@ typedef __uint64_t __psunsigned_t; #undef XFS_NATIVE_HOST #endif -/* - * Feature macros (disable/enable) - */ -#ifdef CONFIG_SMP -#define HAVE_PERCPU_SB /* per cpu superblock counters are a 2.6 feature */ -#else -#undef HAVE_PERCPU_SB /* per cpu superblock counters are a 2.6 feature */ -#endif - #define irix_sgid_inherit xfs_params.sgid_inherit.val #define irix_symlink_mode xfs_params.symlink_mode.val #define xfs_panic_mask xfs_params.panic_mask.val diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index a5a945f..4f5784f 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -4463,10 +4463,10 @@ xlog_do_recover( xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp)); ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC); ASSERT(xfs_sb_good_version(sbp)); + xfs_reinit_percpu_counters(log->l_mp); + xfs_buf_relse(bp); - /* We've re-read the superblock so re-initialize per-cpu counters */ - xfs_icsb_reinit_counters(log->l_mp); xlog_recover_check_summary(log); diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 4fa80e6..2ce7ee3 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -43,18 +43,6 @@ #include "xfs_sysfs.h" -#ifdef HAVE_PERCPU_SB -STATIC void xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t, - int); -STATIC void xfs_icsb_balance_counter_locked(xfs_mount_t *, xfs_sb_field_t, - int); -STATIC void xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t); -#else - -#define xfs_icsb_balance_counter(mp, a, b) do { } while (0) -#define xfs_icsb_balance_counter_locked(mp, a, b) do { } while (0) -#endif - static DEFINE_MUTEX(xfs_uuid_table_mutex); static int xfs_uuid_table_size; static uuid_t *xfs_uuid_table; @@ -347,8 +335,7 @@ reread: goto reread; } - /* Initialize per-cpu counters */ - xfs_icsb_reinit_counters(mp); + xfs_reinit_percpu_counters(mp); /* no need to be quiet anymore, so reset the buf ops */ bp->b_ops = &xfs_sb_buf_ops; @@ -1087,8 +1074,6 @@ xfs_log_sbcount(xfs_mount_t *mp) if (!xfs_fs_writable(mp, SB_FREEZE_COMPLETE)) return 0; - xfs_icsb_sync_counters(mp, 0); - /* * we don't need to do this if we are updating the superblock * counters on every modification. @@ -1099,253 +1084,136 @@ xfs_log_sbcount(xfs_mount_t *mp) return xfs_sync_sb(mp, true); } -/* - * xfs_mod_incore_sb_unlocked() is a utility routine commonly used to apply - * a delta to a specified field in the in-core superblock. Simply - * switch on the field indicated and apply the delta to that field. - * Fields are not allowed to dip below zero, so if the delta would - * do this do not apply it and return EINVAL. - * - * The m_sb_lock must be held when this routine is called. - */ -STATIC int -xfs_mod_incore_sb_unlocked( - xfs_mount_t *mp, - xfs_sb_field_t field, - int64_t delta, - int rsvd) +int +xfs_mod_icount( + struct xfs_mount *mp, + int64_t delta) { - int scounter; /* short counter for 32 bit fields */ - long long lcounter; /* long counter for 64 bit fields */ - long long res_used, rem; - - /* - * With the in-core superblock spin lock held, switch - * on the indicated field. Apply the delta to the - * proper field. If the fields value would dip below - * 0, then do not apply the delta and return EINVAL. - */ - switch (field) { - case XFS_SBS_ICOUNT: - lcounter = (long long)mp->m_sb.sb_icount; - lcounter += delta; - if (lcounter < 0) { - ASSERT(0); - return -EINVAL; - } - mp->m_sb.sb_icount = lcounter; - return 0; - case XFS_SBS_IFREE: - lcounter = (long long)mp->m_sb.sb_ifree; - lcounter += delta; - if (lcounter < 0) { - ASSERT(0); - return -EINVAL; - } - mp->m_sb.sb_ifree = lcounter; - return 0; - case XFS_SBS_FDBLOCKS: - lcounter = (long long) - mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); - res_used = (long long)(mp->m_resblks - mp->m_resblks_avail); - - if (delta > 0) { /* Putting blocks back */ - if (res_used > delta) { - mp->m_resblks_avail += delta; - } else { - rem = delta - res_used; - mp->m_resblks_avail = mp->m_resblks; - lcounter += rem; - } - } else { /* Taking blocks away */ - lcounter += delta; - if (lcounter >= 0) { - mp->m_sb.sb_fdblocks = lcounter + - XFS_ALLOC_SET_ASIDE(mp); - return 0; - } - - /* - * We are out of blocks, use any available reserved - * blocks if were allowed to. - */ - if (!rsvd) - return -ENOSPC; - - lcounter = (long long)mp->m_resblks_avail + delta; - if (lcounter >= 0) { - mp->m_resblks_avail = lcounter; - return 0; - } - printk_once(KERN_WARNING - "Filesystem \"%s\": reserve blocks depleted! " - "Consider increasing reserve pool size.", - mp->m_fsname); - return -ENOSPC; - } - - mp->m_sb.sb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp); - return 0; - case XFS_SBS_FREXTENTS: - lcounter = (long long)mp->m_sb.sb_frextents; - lcounter += delta; - if (lcounter < 0) { - return -ENOSPC; - } - mp->m_sb.sb_frextents = lcounter; - return 0; - case XFS_SBS_DBLOCKS: - lcounter = (long long)mp->m_sb.sb_dblocks; - lcounter += delta; - if (lcounter < 0) { - ASSERT(0); - return -EINVAL; - } - mp->m_sb.sb_dblocks = lcounter; - return 0; - case XFS_SBS_AGCOUNT: - scounter = mp->m_sb.sb_agcount; - scounter += delta; - if (scounter < 0) { - ASSERT(0); - return -EINVAL; - } - mp->m_sb.sb_agcount = scounter; - return 0; - case XFS_SBS_IMAX_PCT: - scounter = mp->m_sb.sb_imax_pct; - scounter += delta; - if (scounter < 0) { - ASSERT(0); - return -EINVAL; - } - mp->m_sb.sb_imax_pct = scounter; - return 0; - case XFS_SBS_REXTSIZE: - scounter = mp->m_sb.sb_rextsize; - scounter += delta; - if (scounter < 0) { - ASSERT(0); - return -EINVAL; - } - mp->m_sb.sb_rextsize = scounter; - return 0; - case XFS_SBS_RBMBLOCKS: - scounter = mp->m_sb.sb_rbmblocks; - scounter += delta; - if (scounter < 0) { - ASSERT(0); - return -EINVAL; - } - mp->m_sb.sb_rbmblocks = scounter; - return 0; - case XFS_SBS_RBLOCKS: - lcounter = (long long)mp->m_sb.sb_rblocks; - lcounter += delta; - if (lcounter < 0) { - ASSERT(0); - return -EINVAL; - } - mp->m_sb.sb_rblocks = lcounter; - return 0; - case XFS_SBS_REXTENTS: - lcounter = (long long)mp->m_sb.sb_rextents; - lcounter += delta; - if (lcounter < 0) { - ASSERT(0); - return -EINVAL; - } - mp->m_sb.sb_rextents = lcounter; - return 0; - case XFS_SBS_REXTSLOG: - scounter = mp->m_sb.sb_rextslog; - scounter += delta; - if (scounter < 0) { - ASSERT(0); - return -EINVAL; - } - mp->m_sb.sb_rextslog = scounter; - return 0; - default: + /* deltas are +/-64, hence the large batch size of 128. */ + __percpu_counter_add(&mp->m_icount, delta, 128); + if (percpu_counter_compare(&mp->m_icount, 0) < 0) { ASSERT(0); + percpu_counter_add(&mp->m_icount, -delta); return -EINVAL; } + return 0; } -/* - * xfs_mod_incore_sb() is used to change a field in the in-core - * superblock structure by the specified delta. This modification - * is protected by the m_sb_lock. Just use the xfs_mod_incore_sb_unlocked() - * routine to do the work. - */ int -xfs_mod_incore_sb( +xfs_mod_ifree( struct xfs_mount *mp, - xfs_sb_field_t field, - int64_t delta, - int rsvd) + int64_t delta) { - int status; - -#ifdef HAVE_PERCPU_SB - ASSERT(field < XFS_SBS_ICOUNT || field > XFS_SBS_FDBLOCKS); -#endif - spin_lock(&mp->m_sb_lock); - status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); - spin_unlock(&mp->m_sb_lock); - - return status; + percpu_counter_add(&mp->m_ifree, delta); + if (percpu_counter_compare(&mp->m_ifree, 0) < 0) { + ASSERT(0); + percpu_counter_add(&mp->m_ifree, -delta); + return -EINVAL; + } + return 0; } -/* - * Change more than one field in the in-core superblock structure at a time. - * - * The fields and changes to those fields are specified in the array of - * xfs_mod_sb structures passed in. Either all of the specified deltas - * will be applied or none of them will. If any modified field dips below 0, - * then all modifications will be backed out and EINVAL will be returned. - * - * Note that this function may not be used for the superblock values that - * are tracked with the in-memory per-cpu counters - a direct call to - * xfs_icsb_modify_counters is required for these. - */ int -xfs_mod_incore_sb_batch( +xfs_mod_fdblocks( struct xfs_mount *mp, - xfs_mod_sb_t *msb, - uint nmsb, - int rsvd) + int64_t delta, + bool rsvd) { - xfs_mod_sb_t *msbp; - int error = 0; + int64_t lcounter; + long long res_used; + s32 batch; + + if (delta > 0) { + /* + * If the reserve pool is depleted, put blocks back into it + * first. Most of the time the pool is full. + */ + if (likely(mp->m_resblks == mp->m_resblks_avail)) { + percpu_counter_add(&mp->m_fdblocks, delta); + return 0; + } + + spin_lock(&mp->m_sb_lock); + res_used = (long long)(mp->m_resblks - mp->m_resblks_avail); + + if (res_used > delta) { + mp->m_resblks_avail += delta; + } else { + delta -= res_used; + mp->m_resblks_avail = mp->m_resblks; + percpu_counter_add(&mp->m_fdblocks, delta); + } + spin_unlock(&mp->m_sb_lock); + return 0; + } /* - * Loop through the array of mod structures and apply each individually. - * If any fail, then back out all those which have already been applied. - * Do all of this within the scope of the m_sb_lock so that all of the - * changes will be atomic. + * Taking blocks away, need to be more accurate the closer we + * are to zero. + * + * batch size is set to a maximum of 1024 blocks - if we are + * allocating of freeing extents larger than this then we aren't + * going to be hammering the counter lock so a lock per update + * is not a problem. + * + * If the counter has a value of less than 2 * max batch size, + * then make everything serialise as we are real close to + * ENOSPC. + */ +#define __BATCH 1024 + if (percpu_counter_compare(&mp->m_fdblocks, 2 * __BATCH) < 0) + batch = 1; + else + batch = __BATCH; +#undef __BATCH + + __percpu_counter_add(&mp->m_fdblocks, delta, batch); + if (percpu_counter_compare(&mp->m_fdblocks, + XFS_ALLOC_SET_ASIDE(mp)) >= 0) { + /* we had space! */ + return 0; + } + + /* + * lock up the sb for dipping into reserves before releasing the space + * that took us to ENOSPC. */ spin_lock(&mp->m_sb_lock); - for (msbp = msb; msbp < (msb + nmsb); msbp++) { - ASSERT(msbp->msb_field < XFS_SBS_ICOUNT || - msbp->msb_field > XFS_SBS_FDBLOCKS); + percpu_counter_add(&mp->m_fdblocks, -delta); + if (!rsvd) + goto fdblocks_enospc; - error = xfs_mod_incore_sb_unlocked(mp, msbp->msb_field, - msbp->msb_delta, rsvd); - if (error) - goto unwind; + lcounter = (long long)mp->m_resblks_avail + delta; + if (lcounter >= 0) { + mp->m_resblks_avail = lcounter; + spin_unlock(&mp->m_sb_lock); + return 0; } + printk_once(KERN_WARNING + "Filesystem \"%s\": reserve blocks depleted! " + "Consider increasing reserve pool size.", + mp->m_fsname); +fdblocks_enospc: spin_unlock(&mp->m_sb_lock); - return 0; + return -ENOSPC; +} -unwind: - while (--msbp >= msb) { - error = xfs_mod_incore_sb_unlocked(mp, msbp->msb_field, - -msbp->msb_delta, rsvd); - ASSERT(error == 0); - } +int +xfs_mod_frextents( + struct xfs_mount *mp, + int64_t delta) +{ + int64_t lcounter; + int ret = 0; + + spin_lock(&mp->m_sb_lock); + lcounter = mp->m_sb.sb_frextents + delta; + if (lcounter < 0) + ret = -ENOSPC; + else + mp->m_sb.sb_frextents = lcounter; spin_unlock(&mp->m_sb_lock); - return error; + return ret; } /* @@ -1407,573 +1275,3 @@ xfs_dev_is_read_only( } return 0; } - -#ifdef HAVE_PERCPU_SB -/* - * Per-cpu incore superblock counters - * - * Simple concept, difficult implementation - * - * Basically, replace the incore superblock counters with a distributed per cpu - * counter for contended fields (e.g. free block count). - * - * Difficulties arise in that the incore sb is used for ENOSPC checking, and - * hence needs to be accurately read when we are running low on space. Hence - * there is a method to enable and disable the per-cpu counters based on how - * much "stuff" is available in them. - * - * Basically, a counter is enabled if there is enough free resource to justify - * running a per-cpu fast-path. If the per-cpu counter runs out (i.e. a local - * ENOSPC), then we disable the counters to synchronise all callers and - * re-distribute the available resources. - * - * If, once we redistributed the available resources, we still get a failure, - * we disable the per-cpu counter and go through the slow path. - * - * The slow path is the current xfs_mod_incore_sb() function. This means that - * when we disable a per-cpu counter, we need to drain its resources back to - * the global superblock. We do this after disabling the counter to prevent - * more threads from queueing up on the counter. - * - * Essentially, this means that we still need a lock in the fast path to enable - * synchronisation between the global counters and the per-cpu counters. This - * is not a problem because the lock will be local to a CPU almost all the time - * and have little contention except when we get to ENOSPC conditions. - * - * Basically, this lock becomes a barrier that enables us to lock out the fast - * path while we do things like enabling and disabling counters and - * synchronising the counters. - * - * Locking rules: - * - * 1. m_sb_lock before picking up per-cpu locks - * 2. per-cpu locks always picked up via for_each_online_cpu() order - * 3. accurate counter sync requires m_sb_lock + per cpu locks - * 4. modifying per-cpu counters requires holding per-cpu lock - * 5. modifying global counters requires holding m_sb_lock - * 6. enabling or disabling a counter requires holding the m_sb_lock - * and _none_ of the per-cpu locks. - * - * Disabled counters are only ever re-enabled by a balance operation - * that results in more free resources per CPU than a given threshold. - * To ensure counters don't remain disabled, they are rebalanced when - * the global resource goes above a higher threshold (i.e. some hysteresis - * is present to prevent thrashing). - */ - -#ifdef CONFIG_HOTPLUG_CPU -/* - * hot-plug CPU notifier support. - * - * We need a notifier per filesystem as we need to be able to identify - * the filesystem to balance the counters out. This is achieved by - * having a notifier block embedded in the xfs_mount_t and doing pointer - * magic to get the mount pointer from the notifier block address. - */ -STATIC int -xfs_icsb_cpu_notify( - struct notifier_block *nfb, - unsigned long action, - void *hcpu) -{ - xfs_icsb_cnts_t *cntp; - xfs_mount_t *mp; - - mp = (xfs_mount_t *)container_of(nfb, xfs_mount_t, m_icsb_notifier); - cntp = (xfs_icsb_cnts_t *) - per_cpu_ptr(mp->m_sb_cnts, (unsigned long)hcpu); - switch (action) { - case CPU_UP_PREPARE: - case CPU_UP_PREPARE_FROZEN: - /* Easy Case - initialize the area and locks, and - * then rebalance when online does everything else for us. */ - memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); - break; - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - xfs_icsb_lock(mp); - xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0); - xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0); - xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0); - xfs_icsb_unlock(mp); - break; - case CPU_DEAD: - case CPU_DEAD_FROZEN: - /* Disable all the counters, then fold the dead cpu's - * count into the total on the global superblock and - * re-enable the counters. */ - xfs_icsb_lock(mp); - spin_lock(&mp->m_sb_lock); - xfs_icsb_disable_counter(mp, XFS_SBS_ICOUNT); - xfs_icsb_disable_counter(mp, XFS_SBS_IFREE); - xfs_icsb_disable_counter(mp, XFS_SBS_FDBLOCKS); - - mp->m_sb.sb_icount += cntp->icsb_icount; - mp->m_sb.sb_ifree += cntp->icsb_ifree; - mp->m_sb.sb_fdblocks += cntp->icsb_fdblocks; - - memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); - - xfs_icsb_balance_counter_locked(mp, XFS_SBS_ICOUNT, 0); - xfs_icsb_balance_counter_locked(mp, XFS_SBS_IFREE, 0); - xfs_icsb_balance_counter_locked(mp, XFS_SBS_FDBLOCKS, 0); - spin_unlock(&mp->m_sb_lock); - xfs_icsb_unlock(mp); - break; - } - - return NOTIFY_OK; -} -#endif /* CONFIG_HOTPLUG_CPU */ - -int -xfs_icsb_init_counters( - xfs_mount_t *mp) -{ - xfs_icsb_cnts_t *cntp; - int i; - - mp->m_sb_cnts = alloc_percpu(xfs_icsb_cnts_t); - if (mp->m_sb_cnts == NULL) - return -ENOMEM; - - for_each_online_cpu(i) { - cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); - memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); - } - - mutex_init(&mp->m_icsb_mutex); - - /* - * start with all counters disabled so that the - * initial balance kicks us off correctly - */ - mp->m_icsb_counters = -1; - -#ifdef CONFIG_HOTPLUG_CPU - mp->m_icsb_notifier.notifier_call = xfs_icsb_cpu_notify; - mp->m_icsb_notifier.priority = 0; - register_hotcpu_notifier(&mp->m_icsb_notifier); -#endif /* CONFIG_HOTPLUG_CPU */ - - return 0; -} - -void -xfs_icsb_reinit_counters( - xfs_mount_t *mp) -{ - xfs_icsb_lock(mp); - /* - * start with all counters disabled so that the - * initial balance kicks us off correctly - */ - mp->m_icsb_counters = -1; - xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0); - xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0); - xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0); - xfs_icsb_unlock(mp); -} - -void -xfs_icsb_destroy_counters( - xfs_mount_t *mp) -{ - if (mp->m_sb_cnts) { - unregister_hotcpu_notifier(&mp->m_icsb_notifier); - free_percpu(mp->m_sb_cnts); - } - mutex_destroy(&mp->m_icsb_mutex); -} - -STATIC void -xfs_icsb_lock_cntr( - xfs_icsb_cnts_t *icsbp) -{ - while (test_and_set_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags)) { - ndelay(1000); - } -} - -STATIC void -xfs_icsb_unlock_cntr( - xfs_icsb_cnts_t *icsbp) -{ - clear_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags); -} - - -STATIC void -xfs_icsb_lock_all_counters( - xfs_mount_t *mp) -{ - xfs_icsb_cnts_t *cntp; - int i; - - for_each_online_cpu(i) { - cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); - xfs_icsb_lock_cntr(cntp); - } -} - -STATIC void -xfs_icsb_unlock_all_counters( - xfs_mount_t *mp) -{ - xfs_icsb_cnts_t *cntp; - int i; - - for_each_online_cpu(i) { - cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); - xfs_icsb_unlock_cntr(cntp); - } -} - -STATIC void -xfs_icsb_count( - xfs_mount_t *mp, - xfs_icsb_cnts_t *cnt, - int flags) -{ - xfs_icsb_cnts_t *cntp; - int i; - - memset(cnt, 0, sizeof(xfs_icsb_cnts_t)); - - if (!(flags & XFS_ICSB_LAZY_COUNT)) - xfs_icsb_lock_all_counters(mp); - - for_each_online_cpu(i) { - cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); - cnt->icsb_icount += cntp->icsb_icount; - cnt->icsb_ifree += cntp->icsb_ifree; - cnt->icsb_fdblocks += cntp->icsb_fdblocks; - } - - if (!(flags & XFS_ICSB_LAZY_COUNT)) - xfs_icsb_unlock_all_counters(mp); -} - -STATIC int -xfs_icsb_counter_disabled( - xfs_mount_t *mp, - xfs_sb_field_t field) -{ - ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS)); - return test_bit(field, &mp->m_icsb_counters); -} - -STATIC void -xfs_icsb_disable_counter( - xfs_mount_t *mp, - xfs_sb_field_t field) -{ - xfs_icsb_cnts_t cnt; - - ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS)); - - /* - * If we are already disabled, then there is nothing to do - * here. We check before locking all the counters to avoid - * the expensive lock operation when being called in the - * slow path and the counter is already disabled. This is - * safe because the only time we set or clear this state is under - * the m_icsb_mutex. - */ - if (xfs_icsb_counter_disabled(mp, field)) - return; - - xfs_icsb_lock_all_counters(mp); - if (!test_and_set_bit(field, &mp->m_icsb_counters)) { - /* drain back to superblock */ - - xfs_icsb_count(mp, &cnt, XFS_ICSB_LAZY_COUNT); - switch(field) { - case XFS_SBS_ICOUNT: - mp->m_sb.sb_icount = cnt.icsb_icount; - break; - case XFS_SBS_IFREE: - mp->m_sb.sb_ifree = cnt.icsb_ifree; - break; - case XFS_SBS_FDBLOCKS: - mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks; - break; - default: - BUG(); - } - } - - xfs_icsb_unlock_all_counters(mp); -} - -STATIC void -xfs_icsb_enable_counter( - xfs_mount_t *mp, - xfs_sb_field_t field, - uint64_t count, - uint64_t resid) -{ - xfs_icsb_cnts_t *cntp; - int i; - - ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS)); - - xfs_icsb_lock_all_counters(mp); - for_each_online_cpu(i) { - cntp = per_cpu_ptr(mp->m_sb_cnts, i); - switch (field) { - case XFS_SBS_ICOUNT: - cntp->icsb_icount = count + resid; - break; - case XFS_SBS_IFREE: - cntp->icsb_ifree = count + resid; - break; - case XFS_SBS_FDBLOCKS: - cntp->icsb_fdblocks = count + resid; - break; - default: - BUG(); - break; - } - resid = 0; - } - clear_bit(field, &mp->m_icsb_counters); - xfs_icsb_unlock_all_counters(mp); -} - -void -xfs_icsb_sync_counters_locked( - xfs_mount_t *mp, - int flags) -{ - xfs_icsb_cnts_t cnt; - - xfs_icsb_count(mp, &cnt, flags); - - if (!xfs_icsb_counter_disabled(mp, XFS_SBS_ICOUNT)) - mp->m_sb.sb_icount = cnt.icsb_icount; - if (!xfs_icsb_counter_disabled(mp, XFS_SBS_IFREE)) - mp->m_sb.sb_ifree = cnt.icsb_ifree; - if (!xfs_icsb_counter_disabled(mp, XFS_SBS_FDBLOCKS)) - mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks; -} - -/* - * Accurate update of per-cpu counters to incore superblock - */ -void -xfs_icsb_sync_counters( - xfs_mount_t *mp, - int flags) -{ - spin_lock(&mp->m_sb_lock); - xfs_icsb_sync_counters_locked(mp, flags); - spin_unlock(&mp->m_sb_lock); -} - -/* - * Balance and enable/disable counters as necessary. - * - * Thresholds for re-enabling counters are somewhat magic. inode counts are - * chosen to be the same number as single on disk allocation chunk per CPU, and - * free blocks is something far enough zero that we aren't going thrash when we - * get near ENOSPC. We also need to supply a minimum we require per cpu to - * prevent looping endlessly when xfs_alloc_space asks for more than will - * be distributed to a single CPU but each CPU has enough blocks to be - * reenabled. - * - * Note that we can be called when counters are already disabled. - * xfs_icsb_disable_counter() optimises the counter locking in this case to - * prevent locking every per-cpu counter needlessly. - */ - -#define XFS_ICSB_INO_CNTR_REENABLE (uint64_t)64 -#define XFS_ICSB_FDBLK_CNTR_REENABLE(mp) \ - (uint64_t)(512 + XFS_ALLOC_SET_ASIDE(mp)) -STATIC void -xfs_icsb_balance_counter_locked( - xfs_mount_t *mp, - xfs_sb_field_t field, - int min_per_cpu) -{ - uint64_t count, resid; - int weight = num_online_cpus(); - uint64_t min = (uint64_t)min_per_cpu; - - /* disable counter and sync counter */ - xfs_icsb_disable_counter(mp, field); - - /* update counters - first CPU gets residual*/ - switch (field) { - case XFS_SBS_ICOUNT: - count = mp->m_sb.sb_icount; - resid = do_div(count, weight); - if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE)) - return; - break; - case XFS_SBS_IFREE: - count = mp->m_sb.sb_ifree; - resid = do_div(count, weight); - if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE)) - return; - break; - case XFS_SBS_FDBLOCKS: - count = mp->m_sb.sb_fdblocks; - resid = do_div(count, weight); - if (count < max(min, XFS_ICSB_FDBLK_CNTR_REENABLE(mp))) - return; - break; - default: - BUG(); - count = resid = 0; /* quiet, gcc */ - break; - } - - xfs_icsb_enable_counter(mp, field, count, resid); -} - -STATIC void -xfs_icsb_balance_counter( - xfs_mount_t *mp, - xfs_sb_field_t fields, - int min_per_cpu) -{ - spin_lock(&mp->m_sb_lock); - xfs_icsb_balance_counter_locked(mp, fields, min_per_cpu); - spin_unlock(&mp->m_sb_lock); -} - -int -xfs_icsb_modify_counters( - xfs_mount_t *mp, - xfs_sb_field_t field, - int64_t delta, - int rsvd) -{ - xfs_icsb_cnts_t *icsbp; - long long lcounter; /* long counter for 64 bit fields */ - int ret = 0; - - might_sleep(); -again: - preempt_disable(); - icsbp = this_cpu_ptr(mp->m_sb_cnts); - - /* - * if the counter is disabled, go to slow path - */ - if (unlikely(xfs_icsb_counter_disabled(mp, field))) - goto slow_path; - xfs_icsb_lock_cntr(icsbp); - if (unlikely(xfs_icsb_counter_disabled(mp, field))) { - xfs_icsb_unlock_cntr(icsbp); - goto slow_path; - } - - switch (field) { - case XFS_SBS_ICOUNT: - lcounter = icsbp->icsb_icount; - lcounter += delta; - if (unlikely(lcounter < 0)) - goto balance_counter; - icsbp->icsb_icount = lcounter; - break; - - case XFS_SBS_IFREE: - lcounter = icsbp->icsb_ifree; - lcounter += delta; - if (unlikely(lcounter < 0)) - goto balance_counter; - icsbp->icsb_ifree = lcounter; - break; - - case XFS_SBS_FDBLOCKS: - BUG_ON((mp->m_resblks - mp->m_resblks_avail) != 0); - - lcounter = icsbp->icsb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); - lcounter += delta; - if (unlikely(lcounter < 0)) - goto balance_counter; - icsbp->icsb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp); - break; - default: - BUG(); - break; - } - xfs_icsb_unlock_cntr(icsbp); - preempt_enable(); - return 0; - -slow_path: - preempt_enable(); - - /* - * serialise with a mutex so we don't burn lots of cpu on - * the superblock lock. We still need to hold the superblock - * lock, however, when we modify the global structures. - */ - xfs_icsb_lock(mp); - - /* - * Now running atomically. - * - * If the counter is enabled, someone has beaten us to rebalancing. - * Drop the lock and try again in the fast path.... - */ - if (!(xfs_icsb_counter_disabled(mp, field))) { - xfs_icsb_unlock(mp); - goto again; - } - - /* - * The counter is currently disabled. Because we are - * running atomically here, we know a rebalance cannot - * be in progress. Hence we can go straight to operating - * on the global superblock. We do not call xfs_mod_incore_sb() - * here even though we need to get the m_sb_lock. Doing so - * will cause us to re-enter this function and deadlock. - * Hence we get the m_sb_lock ourselves and then call - * xfs_mod_incore_sb_unlocked() as the unlocked path operates - * directly on the global counters. - */ - spin_lock(&mp->m_sb_lock); - ret = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); - spin_unlock(&mp->m_sb_lock); - - /* - * Now that we've modified the global superblock, we - * may be able to re-enable the distributed counters - * (e.g. lots of space just got freed). After that - * we are done. - */ - if (ret != -ENOSPC) - xfs_icsb_balance_counter(mp, field, 0); - xfs_icsb_unlock(mp); - return ret; - -balance_counter: - xfs_icsb_unlock_cntr(icsbp); - preempt_enable(); - - /* - * We may have multiple threads here if multiple per-cpu - * counters run dry at the same time. This will mean we can - * do more balances than strictly necessary but it is not - * the common slowpath case. - */ - xfs_icsb_lock(mp); - - /* - * running atomically. - * - * This will leave the counter in the correct state for future - * accesses. After the rebalance, we simply try again and our retry - * will either succeed through the fast path or slow path without - * another balance operation being required. - */ - xfs_icsb_balance_counter(mp, field, delta); - xfs_icsb_unlock(mp); - goto again; -} - -#endif diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 0d8abd6..8c995a2 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -18,8 +18,6 @@ #ifndef __XFS_MOUNT_H__ #define __XFS_MOUNT_H__ -#ifdef __KERNEL__ - struct xlog; struct xfs_inode; struct xfs_mru_cache; @@ -29,44 +27,6 @@ struct xfs_quotainfo; struct xfs_dir_ops; struct xfs_da_geometry; -#ifdef HAVE_PERCPU_SB - -/* - * Valid per-cpu incore superblock counters. Note that if you add new counters, - * you may need to define new counter disabled bit field descriptors as there - * are more possible fields in the superblock that can fit in a bitfield on a - * 32 bit platform. The XFS_SBS_* values for the current current counters just - * fit. - */ -typedef struct xfs_icsb_cnts { - uint64_t icsb_fdblocks; - uint64_t icsb_ifree; - uint64_t icsb_icount; - unsigned long icsb_flags; -} xfs_icsb_cnts_t; - -#define XFS_ICSB_FLAG_LOCK (1 << 0) /* counter lock bit */ - -#define XFS_ICSB_LAZY_COUNT (1 << 1) /* accuracy not needed */ - -extern int xfs_icsb_init_counters(struct xfs_mount *); -extern void xfs_icsb_reinit_counters(struct xfs_mount *); -extern void xfs_icsb_destroy_counters(struct xfs_mount *); -extern void xfs_icsb_sync_counters(struct xfs_mount *, int); -extern void xfs_icsb_sync_counters_locked(struct xfs_mount *, int); -extern int xfs_icsb_modify_counters(struct xfs_mount *, xfs_sb_field_t, - int64_t, int); - -#else -#define xfs_icsb_init_counters(mp) (0) -#define xfs_icsb_destroy_counters(mp) do { } while (0) -#define xfs_icsb_reinit_counters(mp) do { } while (0) -#define xfs_icsb_sync_counters(mp, flags) do { } while (0) -#define xfs_icsb_sync_counters_locked(mp, flags) do { } while (0) -#define xfs_icsb_modify_counters(mp, field, delta, rsvd) \ - xfs_mod_incore_sb(mp, field, delta, rsvd) -#endif - /* dynamic preallocation free space thresholds, 5% down to 1% */ enum { XFS_LOWSP_1_PCNT = 0, @@ -81,8 +41,13 @@ typedef struct xfs_mount { struct super_block *m_super; xfs_tid_t m_tid; /* next unused tid for fs */ struct xfs_ail *m_ail; /* fs active log item list */ - xfs_sb_t m_sb; /* copy of fs superblock */ + + struct xfs_sb m_sb; /* copy of fs superblock */ spinlock_t m_sb_lock; /* sb counter lock */ + struct percpu_counter m_icount; /* allocated inodes counter */ + struct percpu_counter m_ifree; /* free inodes counter */ + struct percpu_counter m_fdblocks; /* free block counter */ + struct xfs_buf *m_sb_bp; /* buffer for superblock */ char *m_fsname; /* filesystem name */ int m_fsname_len; /* strlen of fs name */ @@ -152,12 +117,6 @@ typedef struct xfs_mount { const struct xfs_dir_ops *m_nondir_inode_ops; /* !dir inode ops */ uint m_chsize; /* size of next field */ atomic_t m_active_trans; /* number trans frozen */ -#ifdef HAVE_PERCPU_SB - xfs_icsb_cnts_t __percpu *m_sb_cnts; /* per-cpu superblock counters */ - unsigned long m_icsb_counters; /* disabled per-cpu counters */ - struct notifier_block m_icsb_notifier; /* hotplug cpu notifier */ - struct mutex m_icsb_mutex; /* balancer sync lock */ -#endif struct xfs_mru_cache *m_filestream; /* per-mount filestream data */ struct delayed_work m_reclaim_work; /* background inode reclaim */ struct delayed_work m_eofblocks_work; /* background eof blocks @@ -301,35 +260,6 @@ xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d) } /* - * Per-cpu superblock locking functions - */ -#ifdef HAVE_PERCPU_SB -static inline void -xfs_icsb_lock(xfs_mount_t *mp) -{ - mutex_lock(&mp->m_icsb_mutex); -} - -static inline void -xfs_icsb_unlock(xfs_mount_t *mp) -{ - mutex_unlock(&mp->m_icsb_mutex); -} -#else -#define xfs_icsb_lock(mp) -#define xfs_icsb_unlock(mp) -#endif - -/* - * This structure is for use by the xfs_mod_incore_sb_batch() routine. - * xfs_growfs can specify a few fields which are more than int limit - */ -typedef struct xfs_mod_sb { - xfs_sb_field_t msb_field; /* Field to modify, see below */ - int64_t msb_delta; /* Change to make to specified field */ -} xfs_mod_sb_t; - -/* * Per-ag incore structure, copies of information in agf and agi, to improve the * performance of allocation group selection. */ @@ -383,11 +313,14 @@ extern __uint64_t xfs_default_resblks(xfs_mount_t *mp); extern int xfs_mountfs(xfs_mount_t *mp); extern int xfs_initialize_perag(xfs_mount_t *mp, xfs_agnumber_t agcount, xfs_agnumber_t *maxagi); - extern void xfs_unmountfs(xfs_mount_t *); -extern int xfs_mod_incore_sb(xfs_mount_t *, xfs_sb_field_t, int64_t, int); -extern int xfs_mod_incore_sb_batch(xfs_mount_t *, xfs_mod_sb_t *, - uint, int); + +extern int xfs_mod_icount(struct xfs_mount *mp, int64_t delta); +extern int xfs_mod_ifree(struct xfs_mount *mp, int64_t delta); +extern int xfs_mod_fdblocks(struct xfs_mount *mp, int64_t delta, + bool reserved); +extern int xfs_mod_frextents(struct xfs_mount *mp, int64_t delta); + extern int xfs_mount_log_sb(xfs_mount_t *); extern struct xfs_buf *xfs_getsb(xfs_mount_t *, int); extern int xfs_readsb(xfs_mount_t *, int); @@ -399,6 +332,4 @@ extern int xfs_dev_is_read_only(struct xfs_mount *, char *); extern void xfs_set_low_space_thresholds(struct xfs_mount *); -#endif /* __KERNEL__ */ - #endif /* __XFS_MOUNT_H__ */ diff --git a/fs/xfs/xfs_mru_cache.c b/fs/xfs/xfs_mru_cache.c index 30ecca3..f8a674d 100644 --- a/fs/xfs/xfs_mru_cache.c +++ b/fs/xfs/xfs_mru_cache.c @@ -437,7 +437,7 @@ xfs_mru_cache_insert( if (!mru || !mru->lists) return -EINVAL; - if (radix_tree_preload(GFP_KERNEL)) + if (radix_tree_preload(GFP_NOFS)) return -ENOMEM; INIT_LIST_HEAD(&elem->list_node); diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c index 365dd57..981a657 100644 --- a/fs/xfs/xfs_pnfs.c +++ b/fs/xfs/xfs_pnfs.c @@ -31,7 +31,8 @@ int xfs_break_layouts( struct inode *inode, - uint *iolock) + uint *iolock, + bool with_imutex) { struct xfs_inode *ip = XFS_I(inode); int error; @@ -40,8 +41,12 @@ xfs_break_layouts( while ((error = break_layout(inode, false) == -EWOULDBLOCK)) { xfs_iunlock(ip, *iolock); + if (with_imutex && (*iolock & XFS_IOLOCK_EXCL)) + mutex_unlock(&inode->i_mutex); error = break_layout(inode, true); *iolock = XFS_IOLOCK_EXCL; + if (with_imutex) + mutex_lock(&inode->i_mutex); xfs_ilock(ip, *iolock); } diff --git a/fs/xfs/xfs_pnfs.h b/fs/xfs/xfs_pnfs.h index b7fbfce..8147ac1 100644 --- a/fs/xfs/xfs_pnfs.h +++ b/fs/xfs/xfs_pnfs.h @@ -8,9 +8,10 @@ int xfs_fs_map_blocks(struct inode *inode, loff_t offset, u64 length, int xfs_fs_commit_blocks(struct inode *inode, struct iomap *maps, int nr_maps, struct iattr *iattr); -int xfs_break_layouts(struct inode *inode, uint *iolock); +int xfs_break_layouts(struct inode *inode, uint *iolock, bool with_imutex); #else -static inline int xfs_break_layouts(struct inode *inode, uint *iolock) +static inline int +xfs_break_layouts(struct inode *inode, uint *iolock, bool with_imutex) { return 0; } diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c index fbbb9e6..5538468 100644 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c @@ -719,6 +719,7 @@ xfs_qm_qino_alloc( xfs_trans_t *tp; int error; int committed; + bool need_alloc = true; *ip = NULL; /* @@ -747,6 +748,7 @@ xfs_qm_qino_alloc( return error; mp->m_sb.sb_gquotino = NULLFSINO; mp->m_sb.sb_pquotino = NULLFSINO; + need_alloc = false; } } @@ -758,7 +760,7 @@ xfs_qm_qino_alloc( return error; } - if (!*ip) { + if (need_alloc) { error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip, &committed); if (error) { @@ -794,11 +796,14 @@ xfs_qm_qino_alloc( spin_unlock(&mp->m_sb_lock); xfs_log_sb(tp); - if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) { + error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); + if (error) { + ASSERT(XFS_FORCED_SHUTDOWN(mp)); xfs_alert(mp, "%s failed (error %d)!", __func__, error); - return error; } - return 0; + if (need_alloc) + xfs_finish_inode_setup(*ip); + return error; } diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h index 0d4d359..996a040 100644 --- a/fs/xfs/xfs_qm.h +++ b/fs/xfs/xfs_qm.h @@ -168,10 +168,6 @@ extern int xfs_qm_scall_getquota(struct xfs_mount *, xfs_dqid_t, uint, struct qc_dqblk *); extern int xfs_qm_scall_setqlim(struct xfs_mount *, xfs_dqid_t, uint, struct qc_dqblk *); -extern int xfs_qm_scall_getqstat(struct xfs_mount *, - struct fs_quota_stat *); -extern int xfs_qm_scall_getqstatv(struct xfs_mount *, - struct fs_quota_statv *); extern int xfs_qm_scall_quotaon(struct xfs_mount *, uint); extern int xfs_qm_scall_quotaoff(struct xfs_mount *, uint); diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c index 9b965db4..9a25c92 100644 --- a/fs/xfs/xfs_qm_syscalls.c +++ b/fs/xfs/xfs_qm_syscalls.c @@ -38,7 +38,6 @@ STATIC int xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint); STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *, uint); -STATIC uint xfs_qm_export_flags(uint); /* * Turn off quota accounting and/or enforcement for all udquots and/or @@ -389,159 +388,6 @@ xfs_qm_scall_quotaon( return 0; } - -/* - * Return quota status information, such as uquota-off, enforcements, etc. - * for Q_XGETQSTAT command. - */ -int -xfs_qm_scall_getqstat( - struct xfs_mount *mp, - struct fs_quota_stat *out) -{ - struct xfs_quotainfo *q = mp->m_quotainfo; - struct xfs_inode *uip = NULL; - struct xfs_inode *gip = NULL; - struct xfs_inode *pip = NULL; - bool tempuqip = false; - bool tempgqip = false; - bool temppqip = false; - - memset(out, 0, sizeof(fs_quota_stat_t)); - - out->qs_version = FS_QSTAT_VERSION; - out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags & - (XFS_ALL_QUOTA_ACCT| - XFS_ALL_QUOTA_ENFD)); - uip = q->qi_uquotaip; - gip = q->qi_gquotaip; - pip = q->qi_pquotaip; - if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) { - if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, - 0, 0, &uip) == 0) - tempuqip = true; - } - if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) { - if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, - 0, 0, &gip) == 0) - tempgqip = true; - } - /* - * Q_XGETQSTAT doesn't have room for both group and project quotas. - * So, allow the project quota values to be copied out only if - * there is no group quota information available. - */ - if (!gip) { - if (!pip && mp->m_sb.sb_pquotino != NULLFSINO) { - if (xfs_iget(mp, NULL, mp->m_sb.sb_pquotino, - 0, 0, &pip) == 0) - temppqip = true; - } - } else - pip = NULL; - if (uip) { - out->qs_uquota.qfs_ino = mp->m_sb.sb_uquotino; - out->qs_uquota.qfs_nblks = uip->i_d.di_nblocks; - out->qs_uquota.qfs_nextents = uip->i_d.di_nextents; - if (tempuqip) - IRELE(uip); - } - - if (gip) { - out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino; - out->qs_gquota.qfs_nblks = gip->i_d.di_nblocks; - out->qs_gquota.qfs_nextents = gip->i_d.di_nextents; - if (tempgqip) - IRELE(gip); - } - if (pip) { - out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino; - out->qs_gquota.qfs_nblks = pip->i_d.di_nblocks; - out->qs_gquota.qfs_nextents = pip->i_d.di_nextents; - if (temppqip) - IRELE(pip); - } - out->qs_incoredqs = q->qi_dquots; - out->qs_btimelimit = q->qi_btimelimit; - out->qs_itimelimit = q->qi_itimelimit; - out->qs_rtbtimelimit = q->qi_rtbtimelimit; - out->qs_bwarnlimit = q->qi_bwarnlimit; - out->qs_iwarnlimit = q->qi_iwarnlimit; - - return 0; -} - -/* - * Return quota status information, such as uquota-off, enforcements, etc. - * for Q_XGETQSTATV command, to support separate project quota field. - */ -int -xfs_qm_scall_getqstatv( - struct xfs_mount *mp, - struct fs_quota_statv *out) -{ - struct xfs_quotainfo *q = mp->m_quotainfo; - struct xfs_inode *uip = NULL; - struct xfs_inode *gip = NULL; - struct xfs_inode *pip = NULL; - bool tempuqip = false; - bool tempgqip = false; - bool temppqip = false; - - out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags & - (XFS_ALL_QUOTA_ACCT| - XFS_ALL_QUOTA_ENFD)); - out->qs_uquota.qfs_ino = mp->m_sb.sb_uquotino; - out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino; - out->qs_pquota.qfs_ino = mp->m_sb.sb_pquotino; - - uip = q->qi_uquotaip; - gip = q->qi_gquotaip; - pip = q->qi_pquotaip; - if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) { - if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, - 0, 0, &uip) == 0) - tempuqip = true; - } - if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) { - if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, - 0, 0, &gip) == 0) - tempgqip = true; - } - if (!pip && mp->m_sb.sb_pquotino != NULLFSINO) { - if (xfs_iget(mp, NULL, mp->m_sb.sb_pquotino, - 0, 0, &pip) == 0) - temppqip = true; - } - if (uip) { - out->qs_uquota.qfs_nblks = uip->i_d.di_nblocks; - out->qs_uquota.qfs_nextents = uip->i_d.di_nextents; - if (tempuqip) - IRELE(uip); - } - - if (gip) { - out->qs_gquota.qfs_nblks = gip->i_d.di_nblocks; - out->qs_gquota.qfs_nextents = gip->i_d.di_nextents; - if (tempgqip) - IRELE(gip); - } - if (pip) { - out->qs_pquota.qfs_nblks = pip->i_d.di_nblocks; - out->qs_pquota.qfs_nextents = pip->i_d.di_nextents; - if (temppqip) - IRELE(pip); - } - out->qs_incoredqs = q->qi_dquots; - out->qs_btimelimit = q->qi_btimelimit; - out->qs_itimelimit = q->qi_itimelimit; - out->qs_rtbtimelimit = q->qi_rtbtimelimit; - out->qs_bwarnlimit = q->qi_bwarnlimit; - out->qs_iwarnlimit = q->qi_iwarnlimit; - - return 0; -} - #define XFS_QC_MASK \ (QC_LIMIT_MASK | QC_TIMER_MASK | QC_WARNS_MASK) @@ -873,28 +719,6 @@ out_put: return error; } -STATIC uint -xfs_qm_export_flags( - uint flags) -{ - uint uflags; - - uflags = 0; - if (flags & XFS_UQUOTA_ACCT) - uflags |= FS_QUOTA_UDQ_ACCT; - if (flags & XFS_GQUOTA_ACCT) - uflags |= FS_QUOTA_GDQ_ACCT; - if (flags & XFS_PQUOTA_ACCT) - uflags |= FS_QUOTA_PDQ_ACCT; - if (flags & XFS_UQUOTA_ENFD) - uflags |= FS_QUOTA_UDQ_ENFD; - if (flags & XFS_GQUOTA_ENFD) - uflags |= FS_QUOTA_GDQ_ENFD; - if (flags & XFS_PQUOTA_ENFD) - uflags |= FS_QUOTA_PDQ_ENFD; - return uflags; -} - STATIC int xfs_dqrele_inode( diff --git a/fs/xfs/xfs_quotaops.c b/fs/xfs/xfs_quotaops.c index 6923905..7795e0d 100644 --- a/fs/xfs/xfs_quotaops.c +++ b/fs/xfs/xfs_quotaops.c @@ -23,10 +23,81 @@ #include "xfs_inode.h" #include "xfs_quota.h" #include "xfs_trans.h" +#include "xfs_trace.h" +#include "xfs_icache.h" #include "xfs_qm.h" #include <linux/quota.h> +static void +xfs_qm_fill_state( + struct qc_type_state *tstate, + struct xfs_mount *mp, + struct xfs_inode *ip, + xfs_ino_t ino) +{ + struct xfs_quotainfo *q = mp->m_quotainfo; + bool tempqip = false; + + tstate->ino = ino; + if (!ip && ino == NULLFSINO) + return; + if (!ip) { + if (xfs_iget(mp, NULL, ino, 0, 0, &ip)) + return; + tempqip = true; + } + tstate->flags |= QCI_SYSFILE; + tstate->blocks = ip->i_d.di_nblocks; + tstate->nextents = ip->i_d.di_nextents; + tstate->spc_timelimit = q->qi_btimelimit; + tstate->ino_timelimit = q->qi_itimelimit; + tstate->rt_spc_timelimit = q->qi_rtbtimelimit; + tstate->spc_warnlimit = q->qi_bwarnlimit; + tstate->ino_warnlimit = q->qi_iwarnlimit; + tstate->rt_spc_warnlimit = q->qi_rtbwarnlimit; + if (tempqip) + IRELE(ip); +} + +/* + * Return quota status information, such as enforcements, quota file inode + * numbers etc. + */ +static int +xfs_fs_get_quota_state( + struct super_block *sb, + struct qc_state *state) +{ + struct xfs_mount *mp = XFS_M(sb); + struct xfs_quotainfo *q = mp->m_quotainfo; + + memset(state, 0, sizeof(*state)); + if (!XFS_IS_QUOTA_RUNNING(mp)) + return 0; + state->s_incoredqs = q->qi_dquots; + if (XFS_IS_UQUOTA_RUNNING(mp)) + state->s_state[USRQUOTA].flags |= QCI_ACCT_ENABLED; + if (XFS_IS_UQUOTA_ENFORCED(mp)) + state->s_state[USRQUOTA].flags |= QCI_LIMITS_ENFORCED; + if (XFS_IS_GQUOTA_RUNNING(mp)) + state->s_state[GRPQUOTA].flags |= QCI_ACCT_ENABLED; + if (XFS_IS_GQUOTA_ENFORCED(mp)) + state->s_state[GRPQUOTA].flags |= QCI_LIMITS_ENFORCED; + if (XFS_IS_PQUOTA_RUNNING(mp)) + state->s_state[PRJQUOTA].flags |= QCI_ACCT_ENABLED; + if (XFS_IS_PQUOTA_ENFORCED(mp)) + state->s_state[PRJQUOTA].flags |= QCI_LIMITS_ENFORCED; + + xfs_qm_fill_state(&state->s_state[USRQUOTA], mp, q->qi_uquotaip, + mp->m_sb.sb_uquotino); + xfs_qm_fill_state(&state->s_state[GRPQUOTA], mp, q->qi_gquotaip, + mp->m_sb.sb_gquotino); + xfs_qm_fill_state(&state->s_state[PRJQUOTA], mp, q->qi_pquotaip, + mp->m_sb.sb_pquotino); + return 0; +} + STATIC int xfs_quota_type(int type) { @@ -40,28 +111,40 @@ xfs_quota_type(int type) } } -STATIC int -xfs_fs_get_xstate( +#define XFS_QC_SETINFO_MASK (QC_TIMER_MASK | QC_WARNS_MASK) + +/* + * Adjust quota timers & warnings + */ +static int +xfs_fs_set_info( struct super_block *sb, - struct fs_quota_stat *fqs) + int type, + struct qc_info *info) { - struct xfs_mount *mp = XFS_M(sb); + struct xfs_mount *mp = XFS_M(sb); + struct qc_dqblk newlim; + if (sb->s_flags & MS_RDONLY) + return -EROFS; if (!XFS_IS_QUOTA_RUNNING(mp)) return -ENOSYS; - return xfs_qm_scall_getqstat(mp, fqs); -} + if (!XFS_IS_QUOTA_ON(mp)) + return -ESRCH; + if (info->i_fieldmask & ~XFS_QC_SETINFO_MASK) + return -EINVAL; + if ((info->i_fieldmask & XFS_QC_SETINFO_MASK) == 0) + return 0; -STATIC int -xfs_fs_get_xstatev( - struct super_block *sb, - struct fs_quota_statv *fqs) -{ - struct xfs_mount *mp = XFS_M(sb); + newlim.d_fieldmask = info->i_fieldmask; + newlim.d_spc_timer = info->i_spc_timelimit; + newlim.d_ino_timer = info->i_ino_timelimit; + newlim.d_rt_spc_timer = info->i_rt_spc_timelimit; + newlim.d_ino_warns = info->i_ino_warnlimit; + newlim.d_spc_warns = info->i_spc_warnlimit; + newlim.d_rt_spc_warns = info->i_rt_spc_warnlimit; - if (!XFS_IS_QUOTA_RUNNING(mp)) - return -ENOSYS; - return xfs_qm_scall_getqstatv(mp, fqs); + return xfs_qm_scall_setqlim(mp, 0, xfs_quota_type(type), &newlim); } static unsigned int @@ -178,8 +261,8 @@ xfs_fs_set_dqblk( } const struct quotactl_ops xfs_quotactl_operations = { - .get_xstatev = xfs_fs_get_xstatev, - .get_xstate = xfs_fs_get_xstate, + .get_state = xfs_fs_get_quota_state, + .set_info = xfs_fs_set_info, .quota_enable = xfs_quota_enable, .quota_disable = xfs_quota_disable, .rm_xquota = xfs_fs_rm_xquota, diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 8fcc4cc..858e1e6 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -109,8 +109,6 @@ static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */ #define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */ #define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */ #define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */ -#define MNTOPT_DELAYLOG "delaylog" /* Delayed logging enabled */ -#define MNTOPT_NODELAYLOG "nodelaylog" /* Delayed logging disabled */ #define MNTOPT_DISCARD "discard" /* Discard unused blocks */ #define MNTOPT_NODISCARD "nodiscard" /* Do not discard unused blocks */ @@ -361,28 +359,10 @@ xfs_parseargs( } else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) { mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE); mp->m_qflags &= ~XFS_GQUOTA_ENFD; - } else if (!strcmp(this_char, MNTOPT_DELAYLOG)) { - xfs_warn(mp, - "delaylog is the default now, option is deprecated."); - } else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) { - xfs_warn(mp, - "nodelaylog support has been removed, option is deprecated."); } else if (!strcmp(this_char, MNTOPT_DISCARD)) { mp->m_flags |= XFS_MOUNT_DISCARD; } else if (!strcmp(this_char, MNTOPT_NODISCARD)) { mp->m_flags &= ~XFS_MOUNT_DISCARD; - } else if (!strcmp(this_char, "ihashsize")) { - xfs_warn(mp, - "ihashsize no longer used, option is deprecated."); - } else if (!strcmp(this_char, "osyncisdsync")) { - xfs_warn(mp, - "osyncisdsync has no effect, option is deprecated."); - } else if (!strcmp(this_char, "osyncisosync")) { - xfs_warn(mp, - "osyncisosync has no effect, option is deprecated."); - } else if (!strcmp(this_char, "irixsgid")) { - xfs_warn(mp, - "irixsgid is now a sysctl(2) variable, option is deprecated."); } else { xfs_warn(mp, "unknown mount option [%s].", this_char); return -EINVAL; @@ -986,6 +966,8 @@ xfs_fs_inode_init_once( atomic_set(&ip->i_pincount, 0); spin_lock_init(&ip->i_flags_lock); + mrlock_init(&ip->i_mmaplock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER, + "xfsino", ip->i_ino); mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER, "xfsino", ip->i_ino); } @@ -1033,23 +1015,6 @@ xfs_free_fsname( kfree(mp->m_logname); } -STATIC void -xfs_fs_put_super( - struct super_block *sb) -{ - struct xfs_mount *mp = XFS_M(sb); - - xfs_filestream_unmount(mp); - xfs_unmountfs(mp); - - xfs_freesb(mp); - xfs_icsb_destroy_counters(mp); - xfs_destroy_mount_workqueues(mp); - xfs_close_devices(mp); - xfs_free_fsname(mp); - kfree(mp); -} - STATIC int xfs_fs_sync_fs( struct super_block *sb, @@ -1083,8 +1048,11 @@ xfs_fs_statfs( { struct xfs_mount *mp = XFS_M(dentry->d_sb); xfs_sb_t *sbp = &mp->m_sb; - struct xfs_inode *ip = XFS_I(dentry->d_inode); + struct xfs_inode *ip = XFS_I(d_inode(dentry)); __uint64_t fakeinos, id; + __uint64_t icount; + __uint64_t ifree; + __uint64_t fdblocks; xfs_extlen_t lsize; __int64_t ffree; @@ -1095,17 +1063,21 @@ xfs_fs_statfs( statp->f_fsid.val[0] = (u32)id; statp->f_fsid.val[1] = (u32)(id >> 32); - xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT); + icount = percpu_counter_sum(&mp->m_icount); + ifree = percpu_counter_sum(&mp->m_ifree); + fdblocks = percpu_counter_sum(&mp->m_fdblocks); spin_lock(&mp->m_sb_lock); statp->f_bsize = sbp->sb_blocksize; lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0; statp->f_blocks = sbp->sb_dblocks - lsize; - statp->f_bfree = statp->f_bavail = - sbp->sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); + spin_unlock(&mp->m_sb_lock); + + statp->f_bfree = fdblocks - XFS_ALLOC_SET_ASIDE(mp); + statp->f_bavail = statp->f_bfree; + fakeinos = statp->f_bfree << sbp->sb_inopblog; - statp->f_files = - MIN(sbp->sb_icount + fakeinos, (__uint64_t)XFS_MAXINUMBER); + statp->f_files = MIN(icount + fakeinos, (__uint64_t)XFS_MAXINUMBER); if (mp->m_maxicount) statp->f_files = min_t(typeof(statp->f_files), statp->f_files, @@ -1117,10 +1089,9 @@ xfs_fs_statfs( sbp->sb_icount); /* make sure statp->f_ffree does not underflow */ - ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree); + ffree = statp->f_files - (icount - ifree); statp->f_ffree = max_t(__int64_t, ffree, 0); - spin_unlock(&mp->m_sb_lock); if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) && ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) == @@ -1256,6 +1227,12 @@ xfs_fs_remount( /* ro -> rw */ if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) { + if (mp->m_flags & XFS_MOUNT_NORECOVERY) { + xfs_warn(mp, + "ro->rw transition prohibited on norecovery mount"); + return -EINVAL; + } + mp->m_flags &= ~XFS_MOUNT_RDONLY; /* @@ -1401,6 +1378,51 @@ xfs_finish_flags( return 0; } +static int +xfs_init_percpu_counters( + struct xfs_mount *mp) +{ + int error; + + error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL); + if (error) + return -ENOMEM; + + error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL); + if (error) + goto free_icount; + + error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL); + if (error) + goto free_ifree; + + return 0; + +free_ifree: + percpu_counter_destroy(&mp->m_ifree); +free_icount: + percpu_counter_destroy(&mp->m_icount); + return -ENOMEM; +} + +void +xfs_reinit_percpu_counters( + struct xfs_mount *mp) +{ + percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount); + percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree); + percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks); +} + +static void +xfs_destroy_percpu_counters( + struct xfs_mount *mp) +{ + percpu_counter_destroy(&mp->m_icount); + percpu_counter_destroy(&mp->m_ifree); + percpu_counter_destroy(&mp->m_fdblocks); +} + STATIC int xfs_fs_fill_super( struct super_block *sb, @@ -1449,7 +1471,7 @@ xfs_fs_fill_super( if (error) goto out_close_devices; - error = xfs_icsb_init_counters(mp); + error = xfs_init_percpu_counters(mp); if (error) goto out_destroy_workqueues; @@ -1507,7 +1529,7 @@ xfs_fs_fill_super( out_free_sb: xfs_freesb(mp); out_destroy_counters: - xfs_icsb_destroy_counters(mp); + xfs_destroy_percpu_counters(mp); out_destroy_workqueues: xfs_destroy_mount_workqueues(mp); out_close_devices: @@ -1524,6 +1546,24 @@ out_destroy_workqueues: goto out_free_sb; } +STATIC void +xfs_fs_put_super( + struct super_block *sb) +{ + struct xfs_mount *mp = XFS_M(sb); + + xfs_notice(mp, "Unmounting Filesystem"); + xfs_filestream_unmount(mp); + xfs_unmountfs(mp); + + xfs_freesb(mp); + xfs_destroy_percpu_counters(mp); + xfs_destroy_mount_workqueues(mp); + xfs_close_devices(mp); + xfs_free_fsname(mp); + kfree(mp); +} + STATIC struct dentry * xfs_fs_mount( struct file_system_type *fs_type, diff --git a/fs/xfs/xfs_super.h b/fs/xfs/xfs_super.h index 2b830c2..499058f 100644 --- a/fs/xfs/xfs_super.h +++ b/fs/xfs/xfs_super.h @@ -72,6 +72,8 @@ extern const struct export_operations xfs_export_operations; extern const struct xattr_handler *xfs_xattr_handlers[]; extern const struct quotactl_ops xfs_quotactl_operations; +extern void xfs_reinit_percpu_counters(struct xfs_mount *mp); + #define XFS_M(sb) ((struct xfs_mount *)((sb)->s_fs_info)) #endif /* __XFS_SUPER_H__ */ diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c index 25791df..3df411e 100644 --- a/fs/xfs/xfs_symlink.c +++ b/fs/xfs/xfs_symlink.c @@ -177,7 +177,7 @@ xfs_symlink( int pathlen; struct xfs_bmap_free free_list; xfs_fsblock_t first_block; - bool unlock_dp_on_error = false; + bool unlock_dp_on_error = false; uint cancel_flags; int committed; xfs_fileoff_t first_fsb; @@ -221,7 +221,7 @@ xfs_symlink( XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp, &pdqp); if (error) - goto std_return; + return error; tp = xfs_trans_alloc(mp, XFS_TRANS_SYMLINK); cancel_flags = XFS_TRANS_RELEASE_LOG_RES; @@ -241,7 +241,7 @@ xfs_symlink( } if (error) { cancel_flags = 0; - goto error_return; + goto out_trans_cancel; } xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT); @@ -252,7 +252,7 @@ xfs_symlink( */ if (dp->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) { error = -EPERM; - goto error_return; + goto out_trans_cancel; } /* @@ -261,7 +261,7 @@ xfs_symlink( error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp, pdqp, resblks, 1, 0); if (error) - goto error_return; + goto out_trans_cancel; /* * Check for ability to enter directory entry, if no space reserved. @@ -269,7 +269,7 @@ xfs_symlink( if (!resblks) { error = xfs_dir_canenter(tp, dp, link_name); if (error) - goto error_return; + goto out_trans_cancel; } /* * Initialize the bmap freelist prior to calling either @@ -282,15 +282,14 @@ xfs_symlink( */ error = xfs_dir_ialloc(&tp, dp, S_IFLNK | (mode & ~S_IFMT), 1, 0, prid, resblks > 0, &ip, NULL); - if (error) { - if (error == -ENOSPC) - goto error_return; - goto error1; - } + if (error) + goto out_trans_cancel; /* - * An error after we've joined dp to the transaction will result in the - * transaction cancel unlocking dp so don't do it explicitly in the + * Now we join the directory inode to the transaction. We do not do it + * earlier because xfs_dir_ialloc might commit the previous transaction + * (and release all the locks). An error from here on will result in + * the transaction cancel unlocking dp so don't do it explicitly in the * error path. */ xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); @@ -330,7 +329,7 @@ xfs_symlink( XFS_BMAPI_METADATA, &first_block, resblks, mval, &nmaps, &free_list); if (error) - goto error2; + goto out_bmap_cancel; if (resblks) resblks -= fs_blocks; @@ -348,7 +347,7 @@ xfs_symlink( BTOBB(byte_cnt), 0); if (!bp) { error = -ENOMEM; - goto error2; + goto out_bmap_cancel; } bp->b_ops = &xfs_symlink_buf_ops; @@ -378,7 +377,7 @@ xfs_symlink( error = xfs_dir_createname(tp, dp, link_name, ip->i_ino, &first_block, &free_list, resblks); if (error) - goto error2; + goto out_bmap_cancel; xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); @@ -392,10 +391,13 @@ xfs_symlink( } error = xfs_bmap_finish(&tp, &free_list, &committed); - if (error) { - goto error2; - } + if (error) + goto out_bmap_cancel; + error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); + if (error) + goto out_release_inode; + xfs_qm_dqrele(udqp); xfs_qm_dqrele(gdqp); xfs_qm_dqrele(pdqp); @@ -403,20 +405,28 @@ xfs_symlink( *ipp = ip; return 0; - error2: - IRELE(ip); - error1: +out_bmap_cancel: xfs_bmap_cancel(&free_list); cancel_flags |= XFS_TRANS_ABORT; - error_return: +out_trans_cancel: xfs_trans_cancel(tp, cancel_flags); +out_release_inode: + /* + * Wait until after the current transaction is aborted to finish the + * setup of the inode and release the inode. This prevents recursive + * transactions and deadlocks from xfs_inactive. + */ + if (ip) { + xfs_finish_inode_setup(ip); + IRELE(ip); + } + xfs_qm_dqrele(udqp); xfs_qm_dqrele(gdqp); xfs_qm_dqrele(pdqp); if (unlock_dp_on_error) xfs_iunlock(dp, XFS_ILOCK_EXCL); - std_return: return error; } diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index 51372e3..615781b 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h @@ -115,7 +115,7 @@ DECLARE_EVENT_CLASS(xfs_perag_class, __entry->refcount = refcount; __entry->caller_ip = caller_ip; ), - TP_printk("dev %d:%d agno %u refcount %d caller %pf", + TP_printk("dev %d:%d agno %u refcount %d caller %ps", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->agno, __entry->refcount, @@ -239,7 +239,7 @@ TRACE_EVENT(xfs_iext_insert, __entry->caller_ip = caller_ip; ), TP_printk("dev %d:%d ino 0x%llx state %s idx %ld " - "offset %lld block %lld count %lld flag %d caller %pf", + "offset %lld block %lld count %lld flag %d caller %ps", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS), @@ -283,7 +283,7 @@ DECLARE_EVENT_CLASS(xfs_bmap_class, __entry->caller_ip = caller_ip; ), TP_printk("dev %d:%d ino 0x%llx state %s idx %ld " - "offset %lld block %lld count %lld flag %d caller %pf", + "offset %lld block %lld count %lld flag %d caller %ps", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS), @@ -329,7 +329,7 @@ DECLARE_EVENT_CLASS(xfs_buf_class, __entry->caller_ip = caller_ip; ), TP_printk("dev %d:%d bno 0x%llx nblks 0x%x hold %d pincount %d " - "lock %d flags %s caller %pf", + "lock %d flags %s caller %ps", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->bno, __entry->nblks, @@ -402,7 +402,7 @@ DECLARE_EVENT_CLASS(xfs_buf_flags_class, __entry->caller_ip = caller_ip; ), TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " - "lock %d flags %s caller %pf", + "lock %d flags %s caller %ps", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->bno, __entry->buffer_length, @@ -447,7 +447,7 @@ TRACE_EVENT(xfs_buf_ioerror, __entry->caller_ip = caller_ip; ), TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " - "lock %d error %d flags %s caller %pf", + "lock %d error %d flags %s caller %ps", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->bno, __entry->buffer_length, @@ -613,7 +613,7 @@ DECLARE_EVENT_CLASS(xfs_lock_class, __entry->lock_flags = lock_flags; __entry->caller_ip = caller_ip; ), - TP_printk("dev %d:%d ino 0x%llx flags %s caller %pf", + TP_printk("dev %d:%d ino 0x%llx flags %s caller %ps", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __print_flags(__entry->lock_flags, "|", XFS_LOCK_FLAGS), @@ -664,6 +664,7 @@ DEFINE_INODE_EVENT(xfs_alloc_file_space); DEFINE_INODE_EVENT(xfs_free_file_space); DEFINE_INODE_EVENT(xfs_zero_file_space); DEFINE_INODE_EVENT(xfs_collapse_file_space); +DEFINE_INODE_EVENT(xfs_insert_file_space); DEFINE_INODE_EVENT(xfs_readdir); #ifdef CONFIG_XFS_POSIX_ACL DEFINE_INODE_EVENT(xfs_get_acl); @@ -685,6 +686,9 @@ DEFINE_INODE_EVENT(xfs_inode_set_eofblocks_tag); DEFINE_INODE_EVENT(xfs_inode_clear_eofblocks_tag); DEFINE_INODE_EVENT(xfs_inode_free_eofblocks_invalid); +DEFINE_INODE_EVENT(xfs_filemap_fault); +DEFINE_INODE_EVENT(xfs_filemap_page_mkwrite); + DECLARE_EVENT_CLASS(xfs_iref_class, TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip), TP_ARGS(ip, caller_ip), @@ -702,7 +706,7 @@ DECLARE_EVENT_CLASS(xfs_iref_class, __entry->pincount = atomic_read(&ip->i_pincount); __entry->caller_ip = caller_ip; ), - TP_printk("dev %d:%d ino 0x%llx count %d pincount %d caller %pf", + TP_printk("dev %d:%d ino 0x%llx count %d pincount %d caller %ps", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->count, @@ -1217,6 +1221,11 @@ DEFINE_IOMAP_EVENT(xfs_map_blocks_found); DEFINE_IOMAP_EVENT(xfs_map_blocks_alloc); DEFINE_IOMAP_EVENT(xfs_get_blocks_found); DEFINE_IOMAP_EVENT(xfs_get_blocks_alloc); +DEFINE_IOMAP_EVENT(xfs_gbmap_direct); +DEFINE_IOMAP_EVENT(xfs_gbmap_direct_new); +DEFINE_IOMAP_EVENT(xfs_gbmap_direct_update); +DEFINE_IOMAP_EVENT(xfs_gbmap_direct_none); +DEFINE_IOMAP_EVENT(xfs_gbmap_direct_endio); DECLARE_EVENT_CLASS(xfs_simple_io_class, TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count), @@ -1333,7 +1342,7 @@ TRACE_EVENT(xfs_bunmap, __entry->flags = flags; ), TP_printk("dev %d:%d ino 0x%llx size 0x%llx bno 0x%llx len 0x%llx" - "flags %s caller %pf", + "flags %s caller %ps", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->size, @@ -1466,7 +1475,7 @@ TRACE_EVENT(xfs_agf, ), TP_printk("dev %d:%d agno %u flags %s length %u roots b %u c %u " "levels b %u c %u flfirst %u fllast %u flcount %u " - "freeblks %u longest %u caller %pf", + "freeblks %u longest %u caller %ps", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->agno, __print_flags(__entry->flags, "|", XFS_AGF_FLAGS), diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index eb90cd5..220ef2c 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -173,7 +173,7 @@ xfs_trans_reserve( uint rtextents) { int error = 0; - int rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; + bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; /* Mark this thread as being in a transaction */ current_set_flags_nested(&tp->t_pflags, PF_FSTRANS); @@ -184,8 +184,7 @@ xfs_trans_reserve( * fail if the count would go below zero. */ if (blocks > 0) { - error = xfs_icsb_modify_counters(tp->t_mountp, XFS_SBS_FDBLOCKS, - -((int64_t)blocks), rsvd); + error = xfs_mod_fdblocks(tp->t_mountp, -((int64_t)blocks), rsvd); if (error != 0) { current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS); return -ENOSPC; @@ -236,8 +235,7 @@ xfs_trans_reserve( * fail if the count would go below zero. */ if (rtextents > 0) { - error = xfs_mod_incore_sb(tp->t_mountp, XFS_SBS_FREXTENTS, - -((int64_t)rtextents), rsvd); + error = xfs_mod_frextents(tp->t_mountp, -((int64_t)rtextents)); if (error) { error = -ENOSPC; goto undo_log; @@ -268,8 +266,7 @@ undo_log: undo_blocks: if (blocks > 0) { - xfs_icsb_modify_counters(tp->t_mountp, XFS_SBS_FDBLOCKS, - (int64_t)blocks, rsvd); + xfs_mod_fdblocks(tp->t_mountp, -((int64_t)blocks), rsvd); tp->t_blk_res = 0; } @@ -488,6 +485,54 @@ xfs_trans_apply_sb_deltas( sizeof(sbp->sb_frextents) - 1); } +STATIC int +xfs_sb_mod8( + uint8_t *field, + int8_t delta) +{ + int8_t counter = *field; + + counter += delta; + if (counter < 0) { + ASSERT(0); + return -EINVAL; + } + *field = counter; + return 0; +} + +STATIC int +xfs_sb_mod32( + uint32_t *field, + int32_t delta) +{ + int32_t counter = *field; + + counter += delta; + if (counter < 0) { + ASSERT(0); + return -EINVAL; + } + *field = counter; + return 0; +} + +STATIC int +xfs_sb_mod64( + uint64_t *field, + int64_t delta) +{ + int64_t counter = *field; + + counter += delta; + if (counter < 0) { + ASSERT(0); + return -EINVAL; + } + *field = counter; + return 0; +} + /* * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations * and apply superblock counter changes to the in-core superblock. The @@ -495,13 +540,6 @@ xfs_trans_apply_sb_deltas( * applied to the in-core superblock. The idea is that that has already been * done. * - * This is done efficiently with a single call to xfs_mod_incore_sb_batch(). - * However, we have to ensure that we only modify each superblock field only - * once because the application of the delta values may not be atomic. That can - * lead to ENOSPC races occurring if we have two separate modifcations of the - * free space counter to put back the entire reservation and then take away - * what we used. - * * If we are not logging superblock counters, then the inode allocated/free and * used block counts are not updated in the on disk superblock. In this case, * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we @@ -509,21 +547,15 @@ xfs_trans_apply_sb_deltas( */ void xfs_trans_unreserve_and_mod_sb( - xfs_trans_t *tp) + struct xfs_trans *tp) { - xfs_mod_sb_t msb[9]; /* If you add cases, add entries */ - xfs_mod_sb_t *msbp; - xfs_mount_t *mp = tp->t_mountp; - /* REFERENCED */ - int error; - int rsvd; - int64_t blkdelta = 0; - int64_t rtxdelta = 0; - int64_t idelta = 0; - int64_t ifreedelta = 0; - - msbp = msb; - rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; + struct xfs_mount *mp = tp->t_mountp; + bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; + int64_t blkdelta = 0; + int64_t rtxdelta = 0; + int64_t idelta = 0; + int64_t ifreedelta = 0; + int error; /* calculate deltas */ if (tp->t_blk_res > 0) @@ -547,97 +579,115 @@ xfs_trans_unreserve_and_mod_sb( /* apply the per-cpu counters */ if (blkdelta) { - error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, - blkdelta, rsvd); + error = xfs_mod_fdblocks(mp, blkdelta, rsvd); if (error) goto out; } if (idelta) { - error = xfs_icsb_modify_counters(mp, XFS_SBS_ICOUNT, - idelta, rsvd); + error = xfs_mod_icount(mp, idelta); if (error) goto out_undo_fdblocks; } if (ifreedelta) { - error = xfs_icsb_modify_counters(mp, XFS_SBS_IFREE, - ifreedelta, rsvd); + error = xfs_mod_ifree(mp, ifreedelta); if (error) goto out_undo_icount; } + if (rtxdelta == 0 && !(tp->t_flags & XFS_TRANS_SB_DIRTY)) + return; + /* apply remaining deltas */ - if (rtxdelta != 0) { - msbp->msb_field = XFS_SBS_FREXTENTS; - msbp->msb_delta = rtxdelta; - msbp++; + spin_lock(&mp->m_sb_lock); + if (rtxdelta) { + error = xfs_sb_mod64(&mp->m_sb.sb_frextents, rtxdelta); + if (error) + goto out_undo_ifree; } - if (tp->t_flags & XFS_TRANS_SB_DIRTY) { - if (tp->t_dblocks_delta != 0) { - msbp->msb_field = XFS_SBS_DBLOCKS; - msbp->msb_delta = tp->t_dblocks_delta; - msbp++; - } - if (tp->t_agcount_delta != 0) { - msbp->msb_field = XFS_SBS_AGCOUNT; - msbp->msb_delta = tp->t_agcount_delta; - msbp++; - } - if (tp->t_imaxpct_delta != 0) { - msbp->msb_field = XFS_SBS_IMAX_PCT; - msbp->msb_delta = tp->t_imaxpct_delta; - msbp++; - } - if (tp->t_rextsize_delta != 0) { - msbp->msb_field = XFS_SBS_REXTSIZE; - msbp->msb_delta = tp->t_rextsize_delta; - msbp++; - } - if (tp->t_rbmblocks_delta != 0) { - msbp->msb_field = XFS_SBS_RBMBLOCKS; - msbp->msb_delta = tp->t_rbmblocks_delta; - msbp++; - } - if (tp->t_rblocks_delta != 0) { - msbp->msb_field = XFS_SBS_RBLOCKS; - msbp->msb_delta = tp->t_rblocks_delta; - msbp++; - } - if (tp->t_rextents_delta != 0) { - msbp->msb_field = XFS_SBS_REXTENTS; - msbp->msb_delta = tp->t_rextents_delta; - msbp++; - } - if (tp->t_rextslog_delta != 0) { - msbp->msb_field = XFS_SBS_REXTSLOG; - msbp->msb_delta = tp->t_rextslog_delta; - msbp++; - } + if (tp->t_dblocks_delta != 0) { + error = xfs_sb_mod64(&mp->m_sb.sb_dblocks, tp->t_dblocks_delta); + if (error) + goto out_undo_frextents; } - - /* - * If we need to change anything, do it. - */ - if (msbp > msb) { - error = xfs_mod_incore_sb_batch(tp->t_mountp, msb, - (uint)(msbp - msb), rsvd); + if (tp->t_agcount_delta != 0) { + error = xfs_sb_mod32(&mp->m_sb.sb_agcount, tp->t_agcount_delta); if (error) - goto out_undo_ifreecount; + goto out_undo_dblocks; } - + if (tp->t_imaxpct_delta != 0) { + error = xfs_sb_mod8(&mp->m_sb.sb_imax_pct, tp->t_imaxpct_delta); + if (error) + goto out_undo_agcount; + } + if (tp->t_rextsize_delta != 0) { + error = xfs_sb_mod32(&mp->m_sb.sb_rextsize, + tp->t_rextsize_delta); + if (error) + goto out_undo_imaxpct; + } + if (tp->t_rbmblocks_delta != 0) { + error = xfs_sb_mod32(&mp->m_sb.sb_rbmblocks, + tp->t_rbmblocks_delta); + if (error) + goto out_undo_rextsize; + } + if (tp->t_rblocks_delta != 0) { + error = xfs_sb_mod64(&mp->m_sb.sb_rblocks, tp->t_rblocks_delta); + if (error) + goto out_undo_rbmblocks; + } + if (tp->t_rextents_delta != 0) { + error = xfs_sb_mod64(&mp->m_sb.sb_rextents, + tp->t_rextents_delta); + if (error) + goto out_undo_rblocks; + } + if (tp->t_rextslog_delta != 0) { + error = xfs_sb_mod8(&mp->m_sb.sb_rextslog, + tp->t_rextslog_delta); + if (error) + goto out_undo_rextents; + } + spin_unlock(&mp->m_sb_lock); return; -out_undo_ifreecount: +out_undo_rextents: + if (tp->t_rextents_delta) + xfs_sb_mod64(&mp->m_sb.sb_rextents, -tp->t_rextents_delta); +out_undo_rblocks: + if (tp->t_rblocks_delta) + xfs_sb_mod64(&mp->m_sb.sb_rblocks, -tp->t_rblocks_delta); +out_undo_rbmblocks: + if (tp->t_rbmblocks_delta) + xfs_sb_mod32(&mp->m_sb.sb_rbmblocks, -tp->t_rbmblocks_delta); +out_undo_rextsize: + if (tp->t_rextsize_delta) + xfs_sb_mod32(&mp->m_sb.sb_rextsize, -tp->t_rextsize_delta); +out_undo_imaxpct: + if (tp->t_rextsize_delta) + xfs_sb_mod8(&mp->m_sb.sb_imax_pct, -tp->t_imaxpct_delta); +out_undo_agcount: + if (tp->t_agcount_delta) + xfs_sb_mod32(&mp->m_sb.sb_agcount, -tp->t_agcount_delta); +out_undo_dblocks: + if (tp->t_dblocks_delta) + xfs_sb_mod64(&mp->m_sb.sb_dblocks, -tp->t_dblocks_delta); +out_undo_frextents: + if (rtxdelta) + xfs_sb_mod64(&mp->m_sb.sb_frextents, -rtxdelta); +out_undo_ifree: + spin_unlock(&mp->m_sb_lock); if (ifreedelta) - xfs_icsb_modify_counters(mp, XFS_SBS_IFREE, -ifreedelta, rsvd); + xfs_mod_ifree(mp, -ifreedelta); out_undo_icount: if (idelta) - xfs_icsb_modify_counters(mp, XFS_SBS_ICOUNT, -idelta, rsvd); + xfs_mod_icount(mp, -idelta); out_undo_fdblocks: if (blkdelta) - xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, -blkdelta, rsvd); + xfs_mod_fdblocks(mp, -blkdelta, rsvd); out: ASSERT(error == 0); return; diff --git a/fs/xfs/xfs_xattr.c b/fs/xfs/xfs_xattr.c index 69f6e47..c0368151 100644 --- a/fs/xfs/xfs_xattr.c +++ b/fs/xfs/xfs_xattr.c @@ -35,7 +35,7 @@ static int xfs_xattr_get(struct dentry *dentry, const char *name, void *value, size_t size, int xflags) { - struct xfs_inode *ip = XFS_I(dentry->d_inode); + struct xfs_inode *ip = XFS_I(d_inode(dentry)); int error, asize = size; if (strcmp(name, "") == 0) @@ -57,7 +57,7 @@ static int xfs_xattr_set(struct dentry *dentry, const char *name, const void *value, size_t size, int flags, int xflags) { - struct xfs_inode *ip = XFS_I(dentry->d_inode); + struct xfs_inode *ip = XFS_I(d_inode(dentry)); if (strcmp(name, "") == 0) return -EINVAL; @@ -197,7 +197,7 @@ xfs_vn_listxattr(struct dentry *dentry, char *data, size_t size) { struct xfs_attr_list_context context; struct attrlist_cursor_kern cursor = { 0 }; - struct inode *inode = dentry->d_inode; + struct inode *inode = d_inode(dentry); int error; /* |