summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/vfs_file.c2
-rw-r--r--fs/Kconfig2
-rw-r--r--fs/afs/rxrpc.c14
-rw-r--r--fs/aio.c7
-rw-r--r--fs/btrfs/Kconfig1
-rw-r--r--fs/btrfs/ctree.h1
-rw-r--r--fs/btrfs/extent-tree.c2
-rw-r--r--fs/btrfs/extent_io.c4
-rw-r--r--fs/btrfs/file.c1
-rw-r--r--fs/btrfs/scrub.c4
-rw-r--r--fs/btrfs/super.c14
-rw-r--r--fs/btrfs/transaction.c2
-rw-r--r--fs/btrfs/tree-log.c1
-rw-r--r--fs/ceph/addr.c1
-rw-r--r--fs/ceph/locks.c64
-rw-r--r--fs/ceph/mds_client.c4
-rw-r--r--fs/cifs/cifs_debug.c6
-rw-r--r--fs/cifs/file.c41
-rw-r--r--fs/cifs/ioctl.c21
-rw-r--r--fs/cifs/smbencrypt.c2
-rw-r--r--fs/dlm/netlink.c7
-rw-r--r--fs/efivarfs/Kconfig1
-rw-r--r--fs/efivarfs/super.c2
-rw-r--r--fs/ext3/super.c2
-rw-r--r--fs/ext4/file.c1
-rw-r--r--fs/ext4/super.c42
-rw-r--r--fs/f2fs/file.c1
-rw-r--r--fs/fuse/file.c1
-rw-r--r--fs/gfs2/acl.c2
-rw-r--r--fs/gfs2/dir.c3
-rw-r--r--fs/gfs2/file.c1
-rw-r--r--fs/gfs2/glock.c13
-rw-r--r--fs/gfs2/inode.c3
-rw-r--r--fs/gfs2/quota.c49
-rw-r--r--fs/gfs2/recovery.c2
-rw-r--r--fs/gfs2/sys.c2
-rw-r--r--fs/inode.c4
-rw-r--r--fs/ioctl.c5
-rw-r--r--fs/isofs/util.c18
-rw-r--r--fs/jfs/endian24.h49
-rw-r--r--fs/jfs/jfs_dtree.c4
-rw-r--r--fs/jfs/jfs_types.h55
-rw-r--r--fs/jfs/jfs_xtree.h25
-rw-r--r--fs/jfs/super.c3
-rw-r--r--fs/lockd/mon.c13
-rw-r--r--fs/lockd/svclock.c4
-rw-r--r--fs/lockd/svcsubs.c26
-rw-r--r--fs/lockd/xdr.c8
-rw-r--r--fs/locks.c593
-rw-r--r--fs/nfs/Kconfig5
-rw-r--r--fs/nfs/Makefile3
-rw-r--r--fs/nfs/blocklayout/blocklayout.c2
-rw-r--r--fs/nfs/callback.c8
-rw-r--r--fs/nfs/delegation.c43
-rw-r--r--fs/nfs/direct.c118
-rw-r--r--fs/nfs/file.c1
-rw-r--r--fs/nfs/filelayout/filelayout.c315
-rw-r--r--fs/nfs/filelayout/filelayout.h40
-rw-r--r--fs/nfs/filelayout/filelayoutdev.c469
-rw-r--r--fs/nfs/flexfilelayout/Makefile5
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c1574
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.h155
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayoutdev.c552
-rw-r--r--fs/nfs/idmap.c3
-rw-r--r--fs/nfs/inode.c12
-rw-r--r--fs/nfs/internal.h55
-rw-r--r--fs/nfs/nfs2xdr.c10
-rw-r--r--fs/nfs/nfs3_fs.h2
-rw-r--r--fs/nfs/nfs3client.c41
-rw-r--r--fs/nfs/nfs3proc.c9
-rw-r--r--fs/nfs/nfs3super.c2
-rw-r--r--fs/nfs/nfs3xdr.c3
-rw-r--r--fs/nfs/nfs4_fs.h9
-rw-r--r--fs/nfs/nfs4client.c9
-rw-r--r--fs/nfs/nfs4proc.c315
-rw-r--r--fs/nfs/nfs4state.c101
-rw-r--r--fs/nfs/nfs4super.c3
-rw-r--r--fs/nfs/nfs4xdr.c113
-rw-r--r--fs/nfs/nfsroot.c4
-rw-r--r--fs/nfs/objlayout/objio_osd.c5
-rw-r--r--fs/nfs/pagelist.c300
-rw-r--r--fs/nfs/pnfs.c471
-rw-r--r--fs/nfs/pnfs.h135
-rw-r--r--fs/nfs/pnfs_nfs.c840
-rw-r--r--fs/nfs/read.c33
-rw-r--r--fs/nfs/super.c9
-rw-r--r--fs/nfs/write.c93
-rw-r--r--fs/nfsd/Kconfig10
-rw-r--r--fs/nfsd/Makefile8
-rw-r--r--fs/nfsd/blocklayout.c189
-rw-r--r--fs/nfsd/blocklayoutxdr.c157
-rw-r--r--fs/nfsd/blocklayoutxdr.h62
-rw-r--r--fs/nfsd/export.c8
-rw-r--r--fs/nfsd/export.h2
-rw-r--r--fs/nfsd/nfs4callback.c99
-rw-r--r--fs/nfsd/nfs4layouts.c721
-rw-r--r--fs/nfsd/nfs4proc.c310
-rw-r--r--fs/nfsd/nfs4state.c97
-rw-r--r--fs/nfsd/nfs4xdr.c362
-rw-r--r--fs/nfsd/nfsctl.c9
-rw-r--r--fs/nfsd/nfsd.h16
-rw-r--r--fs/nfsd/nfsfh.h18
-rw-r--r--fs/nfsd/nfssvc.c1
-rw-r--r--fs/nfsd/pnfs.h81
-rw-r--r--fs/nfsd/state.h43
-rw-r--r--fs/nfsd/trace.c5
-rw-r--r--fs/nfsd/trace.h54
-rw-r--r--fs/nfsd/xdr4.h59
-rw-r--r--fs/nfsd/xdr4cb.h7
-rw-r--r--fs/nilfs2/file.c1
-rw-r--r--fs/nilfs2/nilfs.h2
-rw-r--r--fs/nilfs2/segment.c44
-rw-r--r--fs/nilfs2/segment.h5
-rw-r--r--fs/notify/Kconfig1
-rw-r--r--fs/notify/fanotify/fanotify.c2
-rw-r--r--fs/notify/fanotify/fanotify_user.c35
-rw-r--r--fs/ocfs2/acl.c14
-rw-r--r--fs/ocfs2/alloc.c18
-rw-r--r--fs/ocfs2/cluster/tcp.c3
-rw-r--r--fs/ocfs2/cluster/tcp_internal.h12
-rw-r--r--fs/ocfs2/dir.c10
-rw-r--r--fs/ocfs2/dlm/dlmast.c6
-rw-r--r--fs/ocfs2/dlm/dlmdebug.c4
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c14
-rw-r--r--fs/ocfs2/dlm/dlmdomain.h1
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c7
-rw-r--r--fs/ocfs2/dlmglue.c3
-rw-r--r--fs/ocfs2/file.c2
-rw-r--r--fs/ocfs2/journal.c1
-rw-r--r--fs/ocfs2/mmap.c1
-rw-r--r--fs/ocfs2/ocfs2.h2
-rw-r--r--fs/ocfs2/quota.h1
-rw-r--r--fs/ocfs2/quota_local.c20
-rw-r--r--fs/ocfs2/refcounttree.c2
-rw-r--r--fs/ocfs2/reservations.c2
-rw-r--r--fs/ocfs2/super.c49
-rw-r--r--fs/ocfs2/xattr.c10
-rw-r--r--fs/proc/page.c16
-rw-r--r--fs/proc/task_mmu.c234
-rw-r--r--fs/pstore/Kconfig10
-rw-r--r--fs/pstore/Makefile2
-rw-r--r--fs/pstore/inode.c26
-rw-r--r--fs/pstore/internal.h6
-rw-r--r--fs/pstore/platform.c5
-rw-r--r--fs/pstore/pmsg.c114
-rw-r--r--fs/pstore/ram.c53
-rw-r--r--fs/quota/Kconfig1
-rw-r--r--fs/quota/dquot.c186
-rw-r--r--fs/quota/quota.c214
-rw-r--r--fs/quota/quota_v1.c4
-rw-r--r--fs/quota/quota_v2.c16
-rw-r--r--fs/read_write.c2
-rw-r--r--fs/ubifs/file.c1
-rw-r--r--fs/udf/Kconfig10
-rw-r--r--fs/udf/file.c2
-rw-r--r--fs/udf/inode.c32
-rw-r--r--fs/udf/super.c5
-rw-r--r--fs/xfs/kmem.c10
-rw-r--r--fs/xfs/kmem.h5
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.c2
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c20
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h33
-rw-r--r--fs/xfs/libxfs/xfs_format.h24
-rw-r--r--fs/xfs/libxfs/xfs_fs.h (renamed from fs/xfs/xfs_fs.h)0
-rw-r--r--fs/xfs/libxfs/xfs_sb.c320
-rw-r--r--fs/xfs/libxfs/xfs_sb.h11
-rw-r--r--fs/xfs/libxfs/xfs_shared.h33
-rw-r--r--fs/xfs/libxfs/xfs_symlink_remote.c2
-rw-r--r--fs/xfs/libxfs/xfs_trans_resv.c14
-rw-r--r--fs/xfs/libxfs/xfs_trans_resv.h1
-rw-r--r--fs/xfs/libxfs/xfs_types.h (renamed from fs/xfs/xfs_types.h)0
-rw-r--r--fs/xfs/xfs_aops.c149
-rw-r--r--fs/xfs/xfs_aops.h3
-rw-r--r--fs/xfs/xfs_bmap_util.h37
-rw-r--r--fs/xfs/xfs_buf_item.c6
-rw-r--r--fs/xfs/xfs_dquot.h2
-rw-r--r--fs/xfs/xfs_file.c65
-rw-r--r--fs/xfs/xfs_fsops.c34
-rw-r--r--fs/xfs/xfs_inode.c136
-rw-r--r--fs/xfs/xfs_inode.h11
-rw-r--r--fs/xfs/xfs_ioctl.c501
-rw-r--r--fs/xfs/xfs_ioctl32.c2
-rw-r--r--fs/xfs/xfs_iomap.c2
-rw-r--r--fs/xfs/xfs_iomap.h2
-rw-r--r--fs/xfs/xfs_iops.c21
-rw-r--r--fs/xfs/xfs_log.c28
-rw-r--r--fs/xfs/xfs_mount.c107
-rw-r--r--fs/xfs/xfs_mount.h5
-rw-r--r--fs/xfs/xfs_qm.c43
-rw-r--r--fs/xfs/xfs_qm.h5
-rw-r--r--fs/xfs/xfs_qm_syscalls.c244
-rw-r--r--fs/xfs/xfs_quotaops.c67
-rw-r--r--fs/xfs/xfs_super.c20
-rw-r--r--fs/xfs/xfs_sysctl.c18
-rw-r--r--fs/xfs/xfs_trans.c1
-rw-r--r--fs/xfs/xfs_trans_buf.c5
196 files changed, 9164 insertions, 3354 deletions
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 5594505..b401337 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -831,7 +831,6 @@ static const struct vm_operations_struct v9fs_file_vm_ops = {
.fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = v9fs_vm_page_mkwrite,
- .remap_pages = generic_file_remap_pages,
};
static const struct vm_operations_struct v9fs_mmap_file_vm_ops = {
@@ -839,7 +838,6 @@ static const struct vm_operations_struct v9fs_mmap_file_vm_ops = {
.fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = v9fs_vm_page_mkwrite,
- .remap_pages = generic_file_remap_pages,
};
diff --git a/fs/Kconfig b/fs/Kconfig
index 664991a..a6bb530 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -165,6 +165,7 @@ config HUGETLB_PAGE
def_bool HUGETLBFS
source "fs/configfs/Kconfig"
+source "fs/efivarfs/Kconfig"
endmenu
@@ -209,7 +210,6 @@ source "fs/sysv/Kconfig"
source "fs/ufs/Kconfig"
source "fs/exofs/Kconfig"
source "fs/f2fs/Kconfig"
-source "fs/efivarfs/Kconfig"
endif # MISC_FILESYSTEMS
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 06e14bf..dbc732e 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -306,8 +306,8 @@ static int afs_send_pages(struct afs_call *call, struct msghdr *msg,
_debug("- range %u-%u%s",
offset, to, msg->msg_flags ? " [more]" : "");
- iov_iter_init(&msg->msg_iter, WRITE,
- (struct iovec *) iov, 1, to - offset);
+ iov_iter_kvec(&msg->msg_iter, WRITE | ITER_KVEC,
+ iov, 1, to - offset);
/* have to change the state *before* sending the last
* packet as RxRPC might give us the reply before it
@@ -384,7 +384,7 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
msg.msg_name = NULL;
msg.msg_namelen = 0;
- iov_iter_init(&msg.msg_iter, WRITE, (struct iovec *)iov, 1,
+ iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iov, 1,
call->request_size);
msg.msg_control = NULL;
msg.msg_controllen = 0;
@@ -770,7 +770,7 @@ static int afs_deliver_cm_op_id(struct afs_call *call, struct sk_buff *skb,
void afs_send_empty_reply(struct afs_call *call)
{
struct msghdr msg;
- struct iovec iov[1];
+ struct kvec iov[1];
_enter("");
@@ -778,7 +778,7 @@ void afs_send_empty_reply(struct afs_call *call)
iov[0].iov_len = 0;
msg.msg_name = NULL;
msg.msg_namelen = 0;
- iov_iter_init(&msg.msg_iter, WRITE, iov, 0, 0); /* WTF? */
+ iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iov, 0, 0); /* WTF? */
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_flags = 0;
@@ -805,7 +805,7 @@ void afs_send_empty_reply(struct afs_call *call)
void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
{
struct msghdr msg;
- struct iovec iov[1];
+ struct kvec iov[1];
int n;
_enter("");
@@ -814,7 +814,7 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
iov[0].iov_len = len;
msg.msg_name = NULL;
msg.msg_namelen = 0;
- iov_iter_init(&msg.msg_iter, WRITE, iov, 1, len);
+ iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iov, 1, len);
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_flags = 0;
diff --git a/fs/aio.c b/fs/aio.c
index 3bf8b1d..118a2e0 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1127,6 +1127,13 @@ static long aio_read_events_ring(struct kioctx *ctx,
long ret = 0;
int copy_ret;
+ /*
+ * The mutex can block and wake us up and that will cause
+ * wait_event_interruptible_hrtimeout() to schedule without sleeping
+ * and repeat. This should be rare enough that it doesn't cause
+ * peformance issues. See the comment in read_events() for more detail.
+ */
+ sched_annotate_sleep();
mutex_lock(&ctx->ring_lock);
/* Access to ->ring_pages here is protected by ctx->ring_lock. */
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
index a66768e..80e9c18 100644
--- a/fs/btrfs/Kconfig
+++ b/fs/btrfs/Kconfig
@@ -8,6 +8,7 @@ config BTRFS_FS
select LZO_DECOMPRESS
select RAID6_PQ
select XOR_BLOCKS
+ select SRCU
help
Btrfs is a general purpose copy-on-write filesystem with extents,
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 7e60741..0b18070 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1171,6 +1171,7 @@ struct btrfs_space_info {
struct percpu_counter total_bytes_pinned;
struct list_head list;
+ /* Protected by the spinlock 'lock'. */
struct list_head ro_bgs;
struct rw_semaphore groups_sem;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 1511658..a684086 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -9422,7 +9422,6 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
* are still on the list after taking the semaphore
*/
list_del_init(&block_group->list);
- list_del_init(&block_group->ro_list);
if (list_empty(&block_group->space_info->block_groups[index])) {
kobj = block_group->space_info->block_group_kobjs[index];
block_group->space_info->block_group_kobjs[index] = NULL;
@@ -9464,6 +9463,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
btrfs_remove_free_space_cache(block_group);
spin_lock(&block_group->space_info->lock);
+ list_del_init(&block_group->ro_list);
block_group->space_info->total_bytes -= block_group->key.offset;
block_group->space_info->bytes_readonly -= block_group->key.offset;
block_group->space_info->disk_total -= block_group->key.offset * factor;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 4ebabd2..c73df6a 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1407,8 +1407,8 @@ int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
while (index <= end_index) {
page = find_get_page(inode->i_mapping, index);
BUG_ON(!page); /* Pages should be in the extent_io_tree */
- account_page_redirty(page);
__set_page_dirty_nobuffers(page);
+ account_page_redirty(page);
page_cache_release(page);
index++;
}
@@ -2190,7 +2190,7 @@ void btrfs_free_io_failure_record(struct inode *inode, u64 start, u64 end)
next = next_state(state);
- failrec = (struct io_failure_record *)state->private;
+ failrec = (struct io_failure_record *)(unsigned long)state->private;
free_extent_state(state);
kfree(failrec);
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 835c04a..b78bbba 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -2081,7 +2081,6 @@ static const struct vm_operations_struct btrfs_file_vm_ops = {
.fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = btrfs_page_mkwrite,
- .remap_pages = generic_file_remap_pages,
};
static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 9e1569f..e427cb7e 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -3053,7 +3053,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
ppath = btrfs_alloc_path();
if (!ppath) {
- btrfs_free_path(ppath);
+ btrfs_free_path(path);
return -ENOMEM;
}
@@ -3065,6 +3065,8 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
path->search_commit_root = 1;
path->skip_locking = 1;
+ ppath->search_commit_root = 1;
+ ppath->skip_locking = 1;
/*
* trigger the readahead for extent tree csum tree and wait for
* completion. During readahead, the scrub is officially paused
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 60f7cbe..6f49b28 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1000,10 +1000,20 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
*/
if (fs_info->pending_changes == 0)
return 0;
+ /*
+ * A non-blocking test if the fs is frozen. We must not
+ * start a new transaction here otherwise a deadlock
+ * happens. The pending operations are delayed to the
+ * next commit after thawing.
+ */
+ if (__sb_start_write(sb, SB_FREEZE_WRITE, false))
+ __sb_end_write(sb, SB_FREEZE_WRITE);
+ else
+ return 0;
trans = btrfs_start_transaction(root, 0);
- } else {
- return PTR_ERR(trans);
}
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
}
return btrfs_commit_transaction(trans, root);
}
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index a605d4e..e88b59d 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -2118,7 +2118,7 @@ void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info)
unsigned long prev;
unsigned long bit;
- prev = cmpxchg(&fs_info->pending_changes, 0, 0);
+ prev = xchg(&fs_info->pending_changes, 0);
if (!prev)
return;
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 9a02da1..1a9585d 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -2591,6 +2591,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
}
if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) {
+ blk_finish_plug(&plug);
mutex_unlock(&log_root_tree->log_mutex);
ret = root_log_ctx.log_ret;
goto out;
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index c81c0e00..24be059 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -1569,7 +1569,6 @@ out:
static struct vm_operations_struct ceph_vmops = {
.fault = ceph_filemap_fault,
.page_mkwrite = ceph_page_mkwrite,
- .remap_pages = generic_file_remap_pages,
};
int ceph_mmap(struct file *file, struct vm_area_struct *vma)
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index c35c5c6..06ea5cd 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -239,23 +239,21 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
return err;
}
-/**
- * Must be called with lock_flocks() already held. Fills in the passed
- * counter variables, so you can prepare pagelist metadata before calling
- * ceph_encode_locks.
+/*
+ * Fills in the passed counter variables, so you can prepare pagelist metadata
+ * before calling ceph_encode_locks.
*/
void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count)
{
- struct file_lock *lock;
+ struct file_lock_context *ctx;
*fcntl_count = 0;
*flock_count = 0;
- for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) {
- if (lock->fl_flags & FL_POSIX)
- ++(*fcntl_count);
- else if (lock->fl_flags & FL_FLOCK)
- ++(*flock_count);
+ ctx = inode->i_flctx;
+ if (ctx) {
+ *fcntl_count = ctx->flc_posix_cnt;
+ *flock_count = ctx->flc_flock_cnt;
}
dout("counted %d flock locks and %d fcntl locks",
*flock_count, *fcntl_count);
@@ -271,6 +269,7 @@ int ceph_encode_locks_to_buffer(struct inode *inode,
int num_fcntl_locks, int num_flock_locks)
{
struct file_lock *lock;
+ struct file_lock_context *ctx = inode->i_flctx;
int err = 0;
int seen_fcntl = 0;
int seen_flock = 0;
@@ -279,33 +278,34 @@ int ceph_encode_locks_to_buffer(struct inode *inode,
dout("encoding %d flock and %d fcntl locks", num_flock_locks,
num_fcntl_locks);
- for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) {
- if (lock->fl_flags & FL_POSIX) {
- ++seen_fcntl;
- if (seen_fcntl > num_fcntl_locks) {
- err = -ENOSPC;
- goto fail;
- }
- err = lock_to_ceph_filelock(lock, &flocks[l]);
- if (err)
- goto fail;
- ++l;
+ if (!ctx)
+ return 0;
+
+ spin_lock(&ctx->flc_lock);
+ list_for_each_entry(lock, &ctx->flc_flock, fl_list) {
+ ++seen_fcntl;
+ if (seen_fcntl > num_fcntl_locks) {
+ err = -ENOSPC;
+ goto fail;
}
+ err = lock_to_ceph_filelock(lock, &flocks[l]);
+ if (err)
+ goto fail;
+ ++l;
}
- for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) {
- if (lock->fl_flags & FL_FLOCK) {
- ++seen_flock;
- if (seen_flock > num_flock_locks) {
- err = -ENOSPC;
- goto fail;
- }
- err = lock_to_ceph_filelock(lock, &flocks[l]);
- if (err)
- goto fail;
- ++l;
+ list_for_each_entry(lock, &ctx->flc_flock, fl_list) {
+ ++seen_flock;
+ if (seen_flock > num_flock_locks) {
+ err = -ENOSPC;
+ goto fail;
}
+ err = lock_to_ceph_filelock(lock, &flocks[l]);
+ if (err)
+ goto fail;
+ ++l;
}
fail:
+ spin_unlock(&ctx->flc_lock);
return err;
}
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index d2171f4..5f62fb7 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -2700,20 +2700,16 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
struct ceph_filelock *flocks;
encode_again:
- spin_lock(&inode->i_lock);
ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks);
- spin_unlock(&inode->i_lock);
flocks = kmalloc((num_fcntl_locks+num_flock_locks) *
sizeof(struct ceph_filelock), GFP_NOFS);
if (!flocks) {
err = -ENOMEM;
goto out_free;
}
- spin_lock(&inode->i_lock);
err = ceph_encode_locks_to_buffer(inode, flocks,
num_fcntl_locks,
num_flock_locks);
- spin_unlock(&inode->i_lock);
if (err) {
kfree(flocks);
if (err == -ENOSPC)
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 9c56ef7..7febcf2 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -606,9 +606,11 @@ cifs_security_flags_handle_must_flags(unsigned int *flags)
*flags = CIFSSEC_MUST_NTLMV2;
else if ((*flags & CIFSSEC_MUST_NTLM) == CIFSSEC_MUST_NTLM)
*flags = CIFSSEC_MUST_NTLM;
- else if ((*flags & CIFSSEC_MUST_LANMAN) == CIFSSEC_MUST_LANMAN)
+ else if (CIFSSEC_MUST_LANMAN &&
+ (*flags & CIFSSEC_MUST_LANMAN) == CIFSSEC_MUST_LANMAN)
*flags = CIFSSEC_MUST_LANMAN;
- else if ((*flags & CIFSSEC_MUST_PLNTXT) == CIFSSEC_MUST_PLNTXT)
+ else if (CIFSSEC_MUST_PLNTXT &&
+ (*flags & CIFSSEC_MUST_PLNTXT) == CIFSSEC_MUST_PLNTXT)
*flags = CIFSSEC_MUST_PLNTXT;
*flags |= signflags;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 96b7e9b..8fe1f7a 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -366,6 +366,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
struct cifsLockInfo *li, *tmp;
struct cifs_fid fid;
struct cifs_pending_open open;
+ bool oplock_break_cancelled;
spin_lock(&cifs_file_list_lock);
if (--cifs_file->count > 0) {
@@ -397,7 +398,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
}
spin_unlock(&cifs_file_list_lock);
- cancel_work_sync(&cifs_file->oplock_break);
+ oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
struct TCP_Server_Info *server = tcon->ses->server;
@@ -409,6 +410,9 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
_free_xid(xid);
}
+ if (oplock_break_cancelled)
+ cifs_done_oplock_break(cifsi);
+
cifs_del_pending_open(&open);
/*
@@ -1109,11 +1113,6 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
return rc;
}
-/* copied from fs/locks.c with a name change */
-#define cifs_for_each_lock(inode, lockp) \
- for (lockp = &inode->i_flock; *lockp != NULL; \
- lockp = &(*lockp)->fl_next)
-
struct lock_to_push {
struct list_head llist;
__u64 offset;
@@ -1128,8 +1127,9 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
{
struct inode *inode = cfile->dentry->d_inode;
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
- struct file_lock *flock, **before;
- unsigned int count = 0, i = 0;
+ struct file_lock *flock;
+ struct file_lock_context *flctx = inode->i_flctx;
+ unsigned int i;
int rc = 0, xid, type;
struct list_head locks_to_send, *el;
struct lock_to_push *lck, *tmp;
@@ -1137,21 +1137,17 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
xid = get_xid();
- spin_lock(&inode->i_lock);
- cifs_for_each_lock(inode, before) {
- if ((*before)->fl_flags & FL_POSIX)
- count++;
- }
- spin_unlock(&inode->i_lock);
+ if (!flctx)
+ goto out;
INIT_LIST_HEAD(&locks_to_send);
/*
- * Allocating count locks is enough because no FL_POSIX locks can be
- * added to the list while we are holding cinode->lock_sem that
+ * Allocating flc_posix_cnt locks is enough because no FL_POSIX locks
+ * can be added to the list while we are holding cinode->lock_sem that
* protects locking operations of this inode.
*/
- for (; i < count; i++) {
+ for (i = 0; i < flctx->flc_posix_cnt; i++) {
lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
if (!lck) {
rc = -ENOMEM;
@@ -1161,11 +1157,8 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
}
el = locks_to_send.next;
- spin_lock(&inode->i_lock);
- cifs_for_each_lock(inode, before) {
- flock = *before;
- if ((flock->fl_flags & FL_POSIX) == 0)
- continue;
+ spin_lock(&flctx->flc_lock);
+ list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
if (el == &locks_to_send) {
/*
* The list ended. We don't have enough allocated
@@ -1185,9 +1178,8 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
lck->length = length;
lck->type = type;
lck->offset = flock->fl_start;
- el = el->next;
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&flctx->flc_lock);
list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
int stored_rc;
@@ -3244,7 +3236,6 @@ static struct vm_operations_struct cifs_file_vm_ops = {
.fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = cifs_page_mkwrite,
- .remap_pages = generic_file_remap_pages,
};
int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 45cb59b..8b7898b 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -86,21 +86,16 @@ static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
}
src_inode = file_inode(src_file.file);
+ rc = -EINVAL;
+ if (S_ISDIR(src_inode->i_mode))
+ goto out_fput;
/*
* Note: cifs case is easier than btrfs since server responsible for
* checks for proper open modes and file type and if it wants
* server could even support copy of range where source = target
*/
-
- /* so we do not deadlock racing two ioctls on same files */
- if (target_inode < src_inode) {
- mutex_lock_nested(&target_inode->i_mutex, I_MUTEX_PARENT);
- mutex_lock_nested(&src_inode->i_mutex, I_MUTEX_CHILD);
- } else {
- mutex_lock_nested(&src_inode->i_mutex, I_MUTEX_PARENT);
- mutex_lock_nested(&target_inode->i_mutex, I_MUTEX_CHILD);
- }
+ lock_two_nondirectories(target_inode, src_inode);
/* determine range to clone */
rc = -EINVAL;
@@ -124,13 +119,7 @@ static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
out_unlock:
/* although unlocking in the reverse order from locking is not
strictly necessary here it is a little cleaner to be consistent */
- if (target_inode < src_inode) {
- mutex_unlock(&src_inode->i_mutex);
- mutex_unlock(&target_inode->i_mutex);
- } else {
- mutex_unlock(&target_inode->i_mutex);
- mutex_unlock(&src_inode->i_mutex);
- }
+ unlock_two_nondirectories(src_inode, target_inode);
out_fput:
fdput(src_file);
out_drop_write:
diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c
index 6c15663..a4232ec 100644
--- a/fs/cifs/smbencrypt.c
+++ b/fs/cifs/smbencrypt.c
@@ -221,7 +221,7 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16,
}
rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__le16));
- memset(wpwd, 0, 129 * sizeof(__le16));
+ memzero_explicit(wpwd, sizeof(wpwd));
return rc;
}
diff --git a/fs/dlm/netlink.c b/fs/dlm/netlink.c
index e7cfbaf..1e6e227 100644
--- a/fs/dlm/netlink.c
+++ b/fs/dlm/netlink.c
@@ -56,13 +56,8 @@ static int send_data(struct sk_buff *skb)
{
struct genlmsghdr *genlhdr = nlmsg_data((struct nlmsghdr *)skb->data);
void *data = genlmsg_data(genlhdr);
- int rv;
- rv = genlmsg_end(skb, data);
- if (rv < 0) {
- nlmsg_free(skb);
- return rv;
- }
+ genlmsg_end(skb, data);
return genlmsg_unicast(&init_net, skb, listener_nlportid);
}
diff --git a/fs/efivarfs/Kconfig b/fs/efivarfs/Kconfig
index 367bbb1..c2499ef 100644
--- a/fs/efivarfs/Kconfig
+++ b/fs/efivarfs/Kconfig
@@ -1,6 +1,7 @@
config EFIVAR_FS
tristate "EFI Variable filesystem"
depends on EFI
+ default m
help
efivarfs is a replacement filesystem for the old EFI
variable support via sysfs, as it doesn't suffer from the
diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
index 6dad117..ddbce42 100644
--- a/fs/efivarfs/super.c
+++ b/fs/efivarfs/super.c
@@ -140,7 +140,7 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
name[len] = '-';
- efi_guid_unparse(&entry->var.VendorGuid, name + len + 1);
+ efi_guid_to_str(&entry->var.VendorGuid, name + len + 1);
name[len + EFI_VARIABLE_GUID_LEN+1] = '\0';
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 9b4e7d7..d4dbf3c 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -466,6 +466,8 @@ static void ext3_put_super (struct super_block * sb)
}
sb->s_fs_info = NULL;
kfree(sbi->s_blockgroup_lock);
+ mutex_destroy(&sbi->s_orphan_lock);
+ mutex_destroy(&sbi->s_resize_lock);
kfree(sbi);
}
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 8131be8..7cb5923 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -195,7 +195,6 @@ static const struct vm_operations_struct ext4_file_vm_ops = {
.fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = ext4_page_mkwrite,
- .remap_pages = generic_file_remap_pages,
};
static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index ad88e60..64c39c7 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1046,10 +1046,7 @@ static int ext4_mark_dquot_dirty(struct dquot *dquot);
static int ext4_write_info(struct super_block *sb, int type);
static int ext4_quota_on(struct super_block *sb, int type, int format_id,
struct path *path);
-static int ext4_quota_on_sysfile(struct super_block *sb, int type,
- int format_id);
static int ext4_quota_off(struct super_block *sb, int type);
-static int ext4_quota_off_sysfile(struct super_block *sb, int type);
static int ext4_quota_on_mount(struct super_block *sb, int type);
static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
size_t len, loff_t off);
@@ -1084,16 +1081,6 @@ static const struct quotactl_ops ext4_qctl_operations = {
.get_dqblk = dquot_get_dqblk,
.set_dqblk = dquot_set_dqblk
};
-
-static const struct quotactl_ops ext4_qctl_sysfile_operations = {
- .quota_on_meta = ext4_quota_on_sysfile,
- .quota_off = ext4_quota_off_sysfile,
- .quota_sync = dquot_quota_sync,
- .get_info = dquot_get_dqinfo,
- .set_info = dquot_set_dqinfo,
- .get_dqblk = dquot_get_dqblk,
- .set_dqblk = dquot_set_dqblk
-};
#endif
static const struct super_operations ext4_sops = {
@@ -3935,7 +3922,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
#ifdef CONFIG_QUOTA
sb->dq_op = &ext4_quota_operations;
if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA))
- sb->s_qcop = &ext4_qctl_sysfile_operations;
+ sb->s_qcop = &dquot_quotactl_sysfile_ops;
else
sb->s_qcop = &ext4_qctl_operations;
sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
@@ -5288,21 +5275,6 @@ static int ext4_enable_quotas(struct super_block *sb)
return 0;
}
-/*
- * quota_on function that is used when QUOTA feature is set.
- */
-static int ext4_quota_on_sysfile(struct super_block *sb, int type,
- int format_id)
-{
- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA))
- return -EINVAL;
-
- /*
- * USAGE was enabled at mount time. Only need to enable LIMITS now.
- */
- return ext4_quota_enable(sb, type, format_id, DQUOT_LIMITS_ENABLED);
-}
-
static int ext4_quota_off(struct super_block *sb, int type)
{
struct inode *inode = sb_dqopt(sb)->files[type];
@@ -5329,18 +5301,6 @@ out:
return dquot_quota_off(sb, type);
}
-/*
- * quota_off function that is used when QUOTA feature is set.
- */
-static int ext4_quota_off_sysfile(struct super_block *sb, int type)
-{
- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA))
- return -EINVAL;
-
- /* Disable only the limits. */
- return dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
-}
-
/* Read data from quotafile - avoid pagecache and such because we cannot afford
* acquiring the locks... As quota files are never truncated and quota code
* itself serializes the operations (and no one else should touch the files)
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 3c27e0e..5674ba1 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -92,7 +92,6 @@ static const struct vm_operations_struct f2fs_file_vm_ops = {
.fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = f2fs_vm_page_mkwrite,
- .remap_pages = generic_file_remap_pages,
};
static int get_parent_ino(struct inode *inode, nid_t *pino)
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 19d80b8..c01ec3b 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -2062,7 +2062,6 @@ static const struct vm_operations_struct fuse_file_vm_ops = {
.fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = fuse_page_mkwrite,
- .remap_pages = generic_file_remap_pages,
};
static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
index 3088e2a..7b31430 100644
--- a/fs/gfs2/acl.c
+++ b/fs/gfs2/acl.c
@@ -73,7 +73,7 @@ int gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
BUG_ON(name == NULL);
- if (acl->a_count > GFS2_ACL_MAX_ENTRIES(GFS2_SB(inode)))
+ if (acl && acl->a_count > GFS2_ACL_MAX_ENTRIES(GFS2_SB(inode)))
return -E2BIG;
if (type == ACL_TYPE_ACCESS) {
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index c5a34f0..6371192 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -1896,7 +1896,8 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
ht = kzalloc(size, GFP_NOFS | __GFP_NOWARN);
if (ht == NULL)
- ht = vzalloc(size);
+ ht = __vmalloc(size, GFP_NOFS | __GFP_NOWARN | __GFP_ZERO,
+ PAGE_KERNEL);
if (!ht)
return -ENOMEM;
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 6e600ab..ec9c2d3 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -498,7 +498,6 @@ static const struct vm_operations_struct gfs2_vm_ops = {
.fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = gfs2_page_mkwrite,
- .remap_pages = generic_file_remap_pages,
};
/**
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 08ea717..f42dffb 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -173,19 +173,14 @@ void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
spin_unlock(&lru_lock);
}
-static void __gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
+static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
{
+ spin_lock(&lru_lock);
if (!list_empty(&gl->gl_lru)) {
list_del_init(&gl->gl_lru);
atomic_dec(&lru_count);
clear_bit(GLF_LRU, &gl->gl_flags);
}
-}
-
-static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
-{
- spin_lock(&lru_lock);
- __gfs2_glock_remove_from_lru(gl);
spin_unlock(&lru_lock);
}
@@ -205,9 +200,7 @@ void gfs2_glock_put(struct gfs2_glock *gl)
lockref_mark_dead(&gl->gl_lockref);
- spin_lock(&lru_lock);
- __gfs2_glock_remove_from_lru(gl);
- spin_unlock(&lru_lock);
+ gfs2_glock_remove_from_lru(gl);
spin_unlock(&gl->gl_lockref.lock);
spin_lock_bucket(gl->gl_hash);
hlist_bl_del_rcu(&gl->gl_list);
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 9054002..73c72253 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -543,10 +543,7 @@ static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
}
error = gfs2_dir_add(&dip->i_inode, name, ip, da);
- if (error)
- goto fail_end_trans;
-fail_end_trans:
gfs2_trans_end(sdp);
fail_ipreserv:
gfs2_inplace_release(dip);
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index c8b148b..3e193cb 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -667,7 +667,7 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change)
static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
s64 change, struct gfs2_quota_data *qd,
- struct fs_disk_quota *fdq)
+ struct qc_dqblk *fdq)
{
struct inode *inode = &ip->i_inode;
struct gfs2_sbd *sdp = GFS2_SB(inode);
@@ -697,16 +697,16 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
be64_add_cpu(&q.qu_value, change);
qd->qd_qb.qb_value = q.qu_value;
if (fdq) {
- if (fdq->d_fieldmask & FS_DQ_BSOFT) {
- q.qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift);
+ if (fdq->d_fieldmask & QC_SPC_SOFT) {
+ q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift);
qd->qd_qb.qb_warn = q.qu_warn;
}
- if (fdq->d_fieldmask & FS_DQ_BHARD) {
- q.qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift);
+ if (fdq->d_fieldmask & QC_SPC_HARD) {
+ q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift);
qd->qd_qb.qb_limit = q.qu_limit;
}
- if (fdq->d_fieldmask & FS_DQ_BCOUNT) {
- q.qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift);
+ if (fdq->d_fieldmask & QC_SPACE) {
+ q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift);
qd->qd_qb.qb_value = q.qu_value;
}
}
@@ -1497,7 +1497,7 @@ static int gfs2_quota_get_xstate(struct super_block *sb,
}
static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
- struct fs_disk_quota *fdq)
+ struct qc_dqblk *fdq)
{
struct gfs2_sbd *sdp = sb->s_fs_info;
struct gfs2_quota_lvb *qlvb;
@@ -1505,7 +1505,7 @@ static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
struct gfs2_holder q_gh;
int error;
- memset(fdq, 0, sizeof(struct fs_disk_quota));
+ memset(fdq, 0, sizeof(*fdq));
if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
return -ESRCH; /* Crazy XFS error code */
@@ -1522,12 +1522,9 @@ static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
goto out;
qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
- fdq->d_version = FS_DQUOT_VERSION;
- fdq->d_flags = (qid.type == USRQUOTA) ? FS_USER_QUOTA : FS_GROUP_QUOTA;
- fdq->d_id = from_kqid_munged(current_user_ns(), qid);
- fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_fsb2bb_shift;
- fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_fsb2bb_shift;
- fdq->d_bcount = be64_to_cpu(qlvb->qb_value) << sdp->sd_fsb2bb_shift;
+ fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift;
+ fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift;
+ fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift;
gfs2_glock_dq_uninit(&q_gh);
out:
@@ -1536,10 +1533,10 @@ out:
}
/* GFS2 only supports a subset of the XFS fields */
-#define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD|FS_DQ_BCOUNT)
+#define GFS2_FIELDMASK (QC_SPC_SOFT|QC_SPC_HARD|QC_SPACE)
static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
- struct fs_disk_quota *fdq)
+ struct qc_dqblk *fdq)
{
struct gfs2_sbd *sdp = sb->s_fs_info;
struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
@@ -1583,17 +1580,17 @@ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
goto out_i;
/* If nothing has changed, this is a no-op */
- if ((fdq->d_fieldmask & FS_DQ_BSOFT) &&
- ((fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
- fdq->d_fieldmask ^= FS_DQ_BSOFT;
+ if ((fdq->d_fieldmask & QC_SPC_SOFT) &&
+ ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
+ fdq->d_fieldmask ^= QC_SPC_SOFT;
- if ((fdq->d_fieldmask & FS_DQ_BHARD) &&
- ((fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
- fdq->d_fieldmask ^= FS_DQ_BHARD;
+ if ((fdq->d_fieldmask & QC_SPC_HARD) &&
+ ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
+ fdq->d_fieldmask ^= QC_SPC_HARD;
- if ((fdq->d_fieldmask & FS_DQ_BCOUNT) &&
- ((fdq->d_bcount >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
- fdq->d_fieldmask ^= FS_DQ_BCOUNT;
+ if ((fdq->d_fieldmask & QC_SPACE) &&
+ ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
+ fdq->d_fieldmask ^= QC_SPACE;
if (fdq->d_fieldmask == 0)
goto out_i;
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index 573bd3b..1b64577 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -439,7 +439,7 @@ static void gfs2_recovery_done(struct gfs2_sbd *sdp, unsigned int jid,
ls->ls_recover_jid_done = jid;
ls->ls_recover_jid_status = message;
- sprintf(env_jid, "JID=%d", jid);
+ sprintf(env_jid, "JID=%u", jid);
sprintf(env_status, "RECOVERY=%s",
message == LM_RD_SUCCESS ? "Done" : "Failed");
kobject_uevent_env(&sdp->sd_kobj, KOBJ_CHANGE, envp);
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index 3ab566b..ae8e881 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -96,7 +96,7 @@ static ssize_t freeze_show(struct gfs2_sbd *sdp, char *buf)
struct super_block *sb = sdp->sd_vfs;
int frozen = (sb->s_writers.frozen == SB_UNFROZEN) ? 0 : 1;
- return snprintf(buf, PAGE_SIZE, "%u\n", frozen);
+ return snprintf(buf, PAGE_SIZE, "%d\n", frozen);
}
static ssize_t freeze_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
diff --git a/fs/inode.c b/fs/inode.c
index e4e8caa..b787157 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -181,7 +181,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
#ifdef CONFIG_FSNOTIFY
inode->i_fsnotify_mask = 0;
#endif
-
+ inode->i_flctx = NULL;
this_cpu_inc(nr_inodes);
return 0;
@@ -224,6 +224,7 @@ void __destroy_inode(struct inode *inode)
BUG_ON(inode_has_buffers(inode));
security_inode_free(inode);
fsnotify_inode_delete(inode);
+ locks_free_lock_context(inode->i_flctx);
if (!inode->i_nlink) {
WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0);
atomic_long_dec(&inode->i_sb->s_remove_count);
@@ -342,7 +343,6 @@ void address_space_init_once(struct address_space *mapping)
INIT_LIST_HEAD(&mapping->private_list);
spin_lock_init(&mapping->private_lock);
mapping->i_mmap = RB_ROOT;
- INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
}
EXPORT_SYMBOL(address_space_init_once);
diff --git a/fs/ioctl.c b/fs/ioctl.c
index 214c3c1..5d01d26 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -379,6 +379,11 @@ int __generic_block_fiemap(struct inode *inode,
past_eof = true;
}
cond_resched();
+ if (fatal_signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
+
} while (1);
/* If ret is 1 then we just hit the end of the extent array */
diff --git a/fs/isofs/util.c b/fs/isofs/util.c
index 01e1ee7..005a15c 100644
--- a/fs/isofs/util.c
+++ b/fs/isofs/util.c
@@ -2,6 +2,7 @@
* linux/fs/isofs/util.c
*/
+#include <linux/time.h>
#include "isofs.h"
/*
@@ -17,9 +18,9 @@
int iso_date(char * p, int flag)
{
int year, month, day, hour, minute, second, tz;
- int crtime, days, i;
+ int crtime;
- year = p[0] - 70;
+ year = p[0];
month = p[1];
day = p[2];
hour = p[3];
@@ -31,18 +32,7 @@ int iso_date(char * p, int flag)
if (year < 0) {
crtime = 0;
} else {
- int monlen[12] = {31,28,31,30,31,30,31,31,30,31,30,31};
-
- days = year * 365;
- if (year > 2)
- days += (year+1) / 4;
- for (i = 1; i < month; i++)
- days += monlen[i-1];
- if (((year+2) % 4) == 0 && month > 2)
- days++;
- days += day - 1;
- crtime = ((((days * 24) + hour) * 60 + minute) * 60)
- + second;
+ crtime = mktime64(year+1900, month, day, hour, minute, second);
/* sign extend */
if (tz & 0x80)
diff --git a/fs/jfs/endian24.h b/fs/jfs/endian24.h
deleted file mode 100644
index fa92f7f1..0000000
--- a/fs/jfs/endian24.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (C) International Business Machines Corp., 2001
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef _H_ENDIAN24
-#define _H_ENDIAN24
-
-/*
- * endian24.h:
- *
- * Endian conversion for 24-byte data
- *
- */
-#define __swab24(x) \
-({ \
- __u32 __x = (x); \
- ((__u32)( \
- ((__x & (__u32)0x000000ffUL) << 16) | \
- (__x & (__u32)0x0000ff00UL) | \
- ((__x & (__u32)0x00ff0000UL) >> 16) )); \
-})
-
-#if (defined(__KERNEL__) && defined(__LITTLE_ENDIAN)) || (defined(__BYTE_ORDER) && (__BYTE_ORDER == __LITTLE_ENDIAN))
- #define __cpu_to_le24(x) ((__u32)(x))
- #define __le24_to_cpu(x) ((__u32)(x))
-#else
- #define __cpu_to_le24(x) __swab24(x)
- #define __le24_to_cpu(x) __swab24(x)
-#endif
-
-#ifdef __KERNEL__
- #define cpu_to_le24 __cpu_to_le24
- #define le24_to_cpu __le24_to_cpu
-#endif
-
-#endif /* !_H_ENDIAN24 */
diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c
index 984c2bb..d88576e 100644
--- a/fs/jfs/jfs_dtree.c
+++ b/fs/jfs/jfs_dtree.c
@@ -1040,8 +1040,8 @@ static int dtSplitUp(tid_t tid,
pxdlist.maxnpxd = 1;
pxdlist.npxd = 0;
pxd = &pxdlist.pxd[0];
- PXDaddress(pxd, nxaddr)
- PXDlength(pxd, xlen + n);
+ PXDaddress(pxd, nxaddr);
+ PXDlength(pxd, xlen + n);
split->pxdlist = &pxdlist;
if ((rc = dtExtendPage(tid, ip, split, btstack))) {
nxaddr = addressPXD(pxd);
diff --git a/fs/jfs/jfs_types.h b/fs/jfs/jfs_types.h
index 43ea371..8f602dc 100644
--- a/fs/jfs/jfs_types.h
+++ b/fs/jfs/jfs_types.h
@@ -30,8 +30,6 @@
#include <linux/types.h>
#include <linux/nls.h>
-#include "endian24.h"
-
/*
* transaction and lock id's
*
@@ -59,26 +57,42 @@ struct timestruc_t {
/*
* physical xd (pxd)
+ *
+ * The leftmost 24 bits of len_addr are the extent length.
+ * The rightmost 8 bits of len_addr are the most signficant bits of
+ * the extent address
*/
typedef struct {
- unsigned len:24;
- unsigned addr1:8;
+ __le32 len_addr;
__le32 addr2;
} pxd_t;
/* xd_t field construction */
-#define PXDlength(pxd, length32) ((pxd)->len = __cpu_to_le24(length32))
-#define PXDaddress(pxd, address64)\
-{\
- (pxd)->addr1 = ((s64)address64) >> 32;\
- (pxd)->addr2 = __cpu_to_le32((address64) & 0xffffffff);\
+static inline void PXDlength(pxd_t *pxd, __u32 len)
+{
+ pxd->len_addr = (pxd->len_addr & cpu_to_le32(~0xffffff)) |
+ cpu_to_le32(len & 0xffffff);
+}
+
+static inline void PXDaddress(pxd_t *pxd, __u64 addr)
+{
+ pxd->len_addr = (pxd->len_addr & cpu_to_le32(0xffffff)) |
+ cpu_to_le32((addr >> 32)<<24);
+ pxd->addr2 = cpu_to_le32(addr & 0xffffffff);
}
/* xd_t field extraction */
-#define lengthPXD(pxd) __le24_to_cpu((pxd)->len)
-#define addressPXD(pxd)\
- ( ((s64)((pxd)->addr1)) << 32 | __le32_to_cpu((pxd)->addr2))
+static inline __u32 lengthPXD(pxd_t *pxd)
+{
+ return le32_to_cpu((pxd)->len_addr) & 0xffffff;
+}
+
+static inline __u64 addressPXD(pxd_t *pxd)
+{
+ __u64 n = le32_to_cpu(pxd->len_addr) & ~0xffffff;
+ return (n << 8) + le32_to_cpu(pxd->addr2);
+}
#define MAXTREEHEIGHT 8
/* pxd list */
@@ -93,12 +107,10 @@ struct pxdlist {
* data extent descriptor (dxd)
*/
typedef struct {
- unsigned flag:8; /* 1: flags */
- unsigned rsrvd:24;
+ __u8 flag; /* 1: flags */
+ __u8 rsrvd[3];
__le32 size; /* 4: size in byte */
- unsigned len:24; /* 3: length in unit of fsblksize */
- unsigned addr1:8; /* 1: address in unit of fsblksize */
- __le32 addr2; /* 4: address in unit of fsblksize */
+ pxd_t loc; /* 8: address and length in unit of fsblksize */
} dxd_t; /* - 16 - */
/* dxd_t flags */
@@ -109,12 +121,11 @@ typedef struct {
#define DXD_CORRUPT 0x08 /* Inconsistency detected */
/* dxd_t field construction
- * Conveniently, the PXD macros work for DXD
*/
-#define DXDlength PXDlength
-#define DXDaddress PXDaddress
-#define lengthDXD lengthPXD
-#define addressDXD addressPXD
+#define DXDlength(dxd, len) PXDlength(&(dxd)->loc, len)
+#define DXDaddress(dxd, addr) PXDaddress(&(dxd)->loc, addr)
+#define lengthDXD(dxd) lengthPXD(&(dxd)->loc)
+#define addressDXD(dxd) addressPXD(&(dxd)->loc)
#define DXDsize(dxd, size32) ((dxd)->size = cpu_to_le32(size32))
#define sizeDXD(dxd) le32_to_cpu((dxd)->size)
diff --git a/fs/jfs/jfs_xtree.h b/fs/jfs/jfs_xtree.h
index 08c0c74..1e09879 100644
--- a/fs/jfs/jfs_xtree.h
+++ b/fs/jfs/jfs_xtree.h
@@ -29,13 +29,11 @@
* extent allocation descriptor (xad)
*/
typedef struct xad {
- unsigned flag:8; /* 1: flag */
- unsigned rsvrd:16; /* 2: reserved */
- unsigned off1:8; /* 1: offset in unit of fsblksize */
- __le32 off2; /* 4: offset in unit of fsblksize */
- unsigned len:24; /* 3: length in unit of fsblksize */
- unsigned addr1:8; /* 1: address in unit of fsblksize */
- __le32 addr2; /* 4: address in unit of fsblksize */
+ __u8 flag; /* 1: flag */
+ __u8 rsvrd[2]; /* 2: reserved */
+ __u8 off1; /* 1: offset in unit of fsblksize */
+ __le32 off2; /* 4: offset in unit of fsblksize */
+ pxd_t loc; /* 8: length and address in unit of fsblksize */
} xad_t; /* (16) */
#define MAXXLEN ((1 << 24) - 1)
@@ -49,19 +47,14 @@ typedef struct xad {
(xad)->off1 = ((u64)offset64) >> 32;\
(xad)->off2 = __cpu_to_le32((offset64) & 0xffffffff);\
}
-#define XADaddress(xad, address64)\
-{\
- (xad)->addr1 = ((u64)address64) >> 32;\
- (xad)->addr2 = __cpu_to_le32((address64) & 0xffffffff);\
-}
-#define XADlength(xad, length32) (xad)->len = __cpu_to_le24(length32)
+#define XADaddress(xad, address64) PXDaddress(&(xad)->loc, address64)
+#define XADlength(xad, length32) PXDlength(&(xad)->loc, length32)
/* xad_t field extraction */
#define offsetXAD(xad)\
( ((s64)((xad)->off1)) << 32 | __le32_to_cpu((xad)->off2))
-#define addressXAD(xad)\
- ( ((s64)((xad)->addr1)) << 32 | __le32_to_cpu((xad)->addr2))
-#define lengthXAD(xad) __le24_to_cpu((xad)->len)
+#define addressXAD(xad) addressPXD(&(xad)->loc)
+#define lengthXAD(xad) lengthPXD(&(xad)->loc)
/* xad list */
struct xadlist {
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 16c3a955..5d30c56 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -619,8 +619,7 @@ out_mount_failed:
iput(sbi->direct_inode);
sbi->direct_inode = NULL;
out_unload:
- if (sbi->nls_tab)
- unload_nls(sbi->nls_tab);
+ unload_nls(sbi->nls_tab);
out_kfree:
kfree(sbi);
return ret;
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
index 1cc6ec5..47a32b6 100644
--- a/fs/lockd/mon.c
+++ b/fs/lockd/mon.c
@@ -65,7 +65,7 @@ static inline struct sockaddr *nsm_addr(const struct nsm_handle *nsm)
return (struct sockaddr *)&nsm->sm_addr;
}
-static struct rpc_clnt *nsm_create(struct net *net)
+static struct rpc_clnt *nsm_create(struct net *net, const char *nodename)
{
struct sockaddr_in sin = {
.sin_family = AF_INET,
@@ -77,6 +77,7 @@ static struct rpc_clnt *nsm_create(struct net *net)
.address = (struct sockaddr *)&sin,
.addrsize = sizeof(sin),
.servername = "rpc.statd",
+ .nodename = nodename,
.program = &nsm_program,
.version = NSM_VERSION,
.authflavor = RPC_AUTH_NULL,
@@ -102,7 +103,7 @@ out:
return clnt;
}
-static struct rpc_clnt *nsm_client_get(struct net *net)
+static struct rpc_clnt *nsm_client_get(struct net *net, const char *nodename)
{
struct rpc_clnt *clnt, *new;
struct lockd_net *ln = net_generic(net, lockd_net_id);
@@ -111,7 +112,7 @@ static struct rpc_clnt *nsm_client_get(struct net *net)
if (clnt != NULL)
goto out;
- clnt = new = nsm_create(net);
+ clnt = new = nsm_create(net, nodename);
if (IS_ERR(clnt))
goto out;
@@ -190,19 +191,23 @@ int nsm_monitor(const struct nlm_host *host)
struct nsm_res res;
int status;
struct rpc_clnt *clnt;
+ const char *nodename = NULL;
dprintk("lockd: nsm_monitor(%s)\n", nsm->sm_name);
if (nsm->sm_monitored)
return 0;
+ if (host->h_rpcclnt)
+ nodename = host->h_rpcclnt->cl_nodename;
+
/*
* Choose whether to record the caller_name or IP address of
* this peer in the local rpc.statd's database.
*/
nsm->sm_mon_name = nsm_use_hostnames ? nsm->sm_name : nsm->sm_addrbuf;
- clnt = nsm_client_get(host->net);
+ clnt = nsm_client_get(host->net, nodename);
if (IS_ERR(clnt)) {
status = PTR_ERR(clnt);
dprintk("lockd: failed to create NSM upcall transport, "
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index 5659874..5581e02 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -57,8 +57,8 @@ static DEFINE_SPINLOCK(nlm_blocked_lock);
static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie)
{
/*
- * We can get away with a static buffer because we're only
- * called with BKL held.
+ * We can get away with a static buffer because this is only called
+ * from lockd, which is single-threaded.
*/
static char buf[2*NLM_MAXCOOKIELEN+1];
unsigned int i, len = sizeof(buf);
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
index d12ff4e..665ef5a 100644
--- a/fs/lockd/svcsubs.c
+++ b/fs/lockd/svcsubs.c
@@ -164,12 +164,15 @@ nlm_traverse_locks(struct nlm_host *host, struct nlm_file *file,
{
struct inode *inode = nlmsvc_file_inode(file);
struct file_lock *fl;
+ struct file_lock_context *flctx = inode->i_flctx;
struct nlm_host *lockhost;
+ if (!flctx || list_empty_careful(&flctx->flc_posix))
+ return 0;
again:
file->f_locks = 0;
- spin_lock(&inode->i_lock);
- for (fl = inode->i_flock; fl; fl = fl->fl_next) {
+ spin_lock(&flctx->flc_lock);
+ list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
if (fl->fl_lmops != &nlmsvc_lock_operations)
continue;
@@ -180,7 +183,7 @@ again:
if (match(lockhost, host)) {
struct file_lock lock = *fl;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&flctx->flc_lock);
lock.fl_type = F_UNLCK;
lock.fl_start = 0;
lock.fl_end = OFFSET_MAX;
@@ -192,7 +195,7 @@ again:
goto again;
}
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&flctx->flc_lock);
return 0;
}
@@ -223,18 +226,21 @@ nlm_file_inuse(struct nlm_file *file)
{
struct inode *inode = nlmsvc_file_inode(file);
struct file_lock *fl;
+ struct file_lock_context *flctx = inode->i_flctx;
if (file->f_count || !list_empty(&file->f_blocks) || file->f_shares)
return 1;
- spin_lock(&inode->i_lock);
- for (fl = inode->i_flock; fl; fl = fl->fl_next) {
- if (fl->fl_lmops == &nlmsvc_lock_operations) {
- spin_unlock(&inode->i_lock);
- return 1;
+ if (flctx && !list_empty_careful(&flctx->flc_posix)) {
+ spin_lock(&flctx->flc_lock);
+ list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
+ if (fl->fl_lmops == &nlmsvc_lock_operations) {
+ spin_unlock(&flctx->flc_lock);
+ return 1;
+ }
}
+ spin_unlock(&flctx->flc_lock);
}
- spin_unlock(&inode->i_lock);
file->f_locks = 0;
return 0;
}
diff --git a/fs/lockd/xdr.c b/fs/lockd/xdr.c
index 9340e7e..5b651da 100644
--- a/fs/lockd/xdr.c
+++ b/fs/lockd/xdr.c
@@ -95,14 +95,6 @@ nlm_decode_fh(__be32 *p, struct nfs_fh *f)
return p + XDR_QUADLEN(NFS2_FHSIZE);
}
-static inline __be32 *
-nlm_encode_fh(__be32 *p, struct nfs_fh *f)
-{
- *p++ = htonl(NFS2_FHSIZE);
- memcpy(p, f->data, NFS2_FHSIZE);
- return p + XDR_QUADLEN(NFS2_FHSIZE);
-}
-
/*
* Encode and decode owner handle
*/
diff --git a/fs/locks.c b/fs/locks.c
index 59e2f90..4753218 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -137,7 +137,7 @@
#define IS_POSIX(fl) (fl->fl_flags & FL_POSIX)
#define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK)
-#define IS_LEASE(fl) (fl->fl_flags & (FL_LEASE|FL_DELEG))
+#define IS_LEASE(fl) (fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT))
#define IS_OFDLCK(fl) (fl->fl_flags & FL_OFDLCK)
static bool lease_breaking(struct file_lock *fl)
@@ -157,14 +157,11 @@ static int target_leasetype(struct file_lock *fl)
int leases_enable = 1;
int lease_break_time = 45;
-#define for_each_lock(inode, lockp) \
- for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next)
-
/*
* The global file_lock_list is only used for displaying /proc/locks, so we
* keep a list on each CPU, with each list protected by its own spinlock via
* the file_lock_lglock. Note that alterations to the list also require that
- * the relevant i_lock is held.
+ * the relevant flc_lock is held.
*/
DEFINE_STATIC_LGLOCK(file_lock_lglock);
static DEFINE_PER_CPU(struct hlist_head, file_lock_list);
@@ -192,21 +189,68 @@ static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
* contrast to those that are acting as records of acquired locks).
*
* Note that when we acquire this lock in order to change the above fields,
- * we often hold the i_lock as well. In certain cases, when reading the fields
+ * we often hold the flc_lock as well. In certain cases, when reading the fields
* protected by this lock, we can skip acquiring it iff we already hold the
- * i_lock.
+ * flc_lock.
*
* In particular, adding an entry to the fl_block list requires that you hold
- * both the i_lock and the blocked_lock_lock (acquired in that order). Deleting
- * an entry from the list however only requires the file_lock_lock.
+ * both the flc_lock and the blocked_lock_lock (acquired in that order).
+ * Deleting an entry from the list however only requires the file_lock_lock.
*/
static DEFINE_SPINLOCK(blocked_lock_lock);
+static struct kmem_cache *flctx_cache __read_mostly;
static struct kmem_cache *filelock_cache __read_mostly;
+static struct file_lock_context *
+locks_get_lock_context(struct inode *inode)
+{
+ struct file_lock_context *new;
+
+ if (likely(inode->i_flctx))
+ goto out;
+
+ new = kmem_cache_alloc(flctx_cache, GFP_KERNEL);
+ if (!new)
+ goto out;
+
+ spin_lock_init(&new->flc_lock);
+ INIT_LIST_HEAD(&new->flc_flock);
+ INIT_LIST_HEAD(&new->flc_posix);
+ INIT_LIST_HEAD(&new->flc_lease);
+
+ /*
+ * Assign the pointer if it's not already assigned. If it is, then
+ * free the context we just allocated.
+ */
+ spin_lock(&inode->i_lock);
+ if (likely(!inode->i_flctx)) {
+ inode->i_flctx = new;
+ new = NULL;
+ }
+ spin_unlock(&inode->i_lock);
+
+ if (new)
+ kmem_cache_free(flctx_cache, new);
+out:
+ return inode->i_flctx;
+}
+
+void
+locks_free_lock_context(struct file_lock_context *ctx)
+{
+ if (ctx) {
+ WARN_ON_ONCE(!list_empty(&ctx->flc_flock));
+ WARN_ON_ONCE(!list_empty(&ctx->flc_posix));
+ WARN_ON_ONCE(!list_empty(&ctx->flc_lease));
+ kmem_cache_free(flctx_cache, ctx);
+ }
+}
+
static void locks_init_lock_heads(struct file_lock *fl)
{
INIT_HLIST_NODE(&fl->fl_link);
+ INIT_LIST_HEAD(&fl->fl_list);
INIT_LIST_HEAD(&fl->fl_block);
init_waitqueue_head(&fl->fl_wait);
}
@@ -243,6 +287,7 @@ EXPORT_SYMBOL_GPL(locks_release_private);
void locks_free_lock(struct file_lock *fl)
{
BUG_ON(waitqueue_active(&fl->fl_wait));
+ BUG_ON(!list_empty(&fl->fl_list));
BUG_ON(!list_empty(&fl->fl_block));
BUG_ON(!hlist_unhashed(&fl->fl_link));
@@ -257,8 +302,8 @@ locks_dispose_list(struct list_head *dispose)
struct file_lock *fl;
while (!list_empty(dispose)) {
- fl = list_first_entry(dispose, struct file_lock, fl_block);
- list_del_init(&fl->fl_block);
+ fl = list_first_entry(dispose, struct file_lock, fl_list);
+ list_del_init(&fl->fl_list);
locks_free_lock(fl);
}
}
@@ -513,7 +558,7 @@ static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
return fl1->fl_owner == fl2->fl_owner;
}
-/* Must be called with the i_lock held! */
+/* Must be called with the flc_lock held! */
static void locks_insert_global_locks(struct file_lock *fl)
{
lg_local_lock(&file_lock_lglock);
@@ -522,12 +567,12 @@ static void locks_insert_global_locks(struct file_lock *fl)
lg_local_unlock(&file_lock_lglock);
}
-/* Must be called with the i_lock held! */
+/* Must be called with the flc_lock held! */
static void locks_delete_global_locks(struct file_lock *fl)
{
/*
* Avoid taking lock if already unhashed. This is safe since this check
- * is done while holding the i_lock, and new insertions into the list
+ * is done while holding the flc_lock, and new insertions into the list
* also require that it be held.
*/
if (hlist_unhashed(&fl->fl_link))
@@ -579,10 +624,10 @@ static void locks_delete_block(struct file_lock *waiter)
* the order they blocked. The documentation doesn't require this but
* it seems like the reasonable thing to do.
*
- * Must be called with both the i_lock and blocked_lock_lock held. The fl_block
- * list itself is protected by the blocked_lock_lock, but by ensuring that the
- * i_lock is also held on insertions we can avoid taking the blocked_lock_lock
- * in some cases when we see that the fl_block list is empty.
+ * Must be called with both the flc_lock and blocked_lock_lock held. The
+ * fl_block list itself is protected by the blocked_lock_lock, but by ensuring
+ * that the flc_lock is also held on insertions we can avoid taking the
+ * blocked_lock_lock in some cases when we see that the fl_block list is empty.
*/
static void __locks_insert_block(struct file_lock *blocker,
struct file_lock *waiter)
@@ -594,7 +639,7 @@ static void __locks_insert_block(struct file_lock *blocker,
locks_insert_global_blocked(waiter);
}
-/* Must be called with i_lock held. */
+/* Must be called with flc_lock held. */
static void locks_insert_block(struct file_lock *blocker,
struct file_lock *waiter)
{
@@ -606,15 +651,15 @@ static void locks_insert_block(struct file_lock *blocker,
/*
* Wake up processes blocked waiting for blocker.
*
- * Must be called with the inode->i_lock held!
+ * Must be called with the inode->flc_lock held!
*/
static void locks_wake_up_blocks(struct file_lock *blocker)
{
/*
* Avoid taking global lock if list is empty. This is safe since new
- * blocked requests are only added to the list under the i_lock, and
- * the i_lock is always held here. Note that removal from the fl_block
- * list does not require the i_lock, so we must recheck list_empty()
+ * blocked requests are only added to the list under the flc_lock, and
+ * the flc_lock is always held here. Note that removal from the fl_block
+ * list does not require the flc_lock, so we must recheck list_empty()
* after acquiring the blocked_lock_lock.
*/
if (list_empty(&blocker->fl_block))
@@ -635,63 +680,36 @@ static void locks_wake_up_blocks(struct file_lock *blocker)
spin_unlock(&blocked_lock_lock);
}
-/* Insert file lock fl into an inode's lock list at the position indicated
- * by pos. At the same time add the lock to the global file lock list.
- *
- * Must be called with the i_lock held!
- */
-static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
+static void
+locks_insert_lock_ctx(struct file_lock *fl, int *counter,
+ struct list_head *before)
{
fl->fl_nspid = get_pid(task_tgid(current));
-
- /* insert into file's list */
- fl->fl_next = *pos;
- *pos = fl;
-
+ list_add_tail(&fl->fl_list, before);
+ ++*counter;
locks_insert_global_locks(fl);
}
-/**
- * locks_delete_lock - Delete a lock and then free it.
- * @thisfl_p: pointer that points to the fl_next field of the previous
- * inode->i_flock list entry
- *
- * Unlink a lock from all lists and free the namespace reference, but don't
- * free it yet. Wake up processes that are blocked waiting for this lock and
- * notify the FS that the lock has been cleared.
- *
- * Must be called with the i_lock held!
- */
-static void locks_unlink_lock(struct file_lock **thisfl_p)
+static void
+locks_unlink_lock_ctx(struct file_lock *fl, int *counter)
{
- struct file_lock *fl = *thisfl_p;
-
locks_delete_global_locks(fl);
-
- *thisfl_p = fl->fl_next;
- fl->fl_next = NULL;
-
+ list_del_init(&fl->fl_list);
+ --*counter;
if (fl->fl_nspid) {
put_pid(fl->fl_nspid);
fl->fl_nspid = NULL;
}
-
locks_wake_up_blocks(fl);
}
-/*
- * Unlink a lock from all lists and free it.
- *
- * Must be called with i_lock held!
- */
-static void locks_delete_lock(struct file_lock **thisfl_p,
- struct list_head *dispose)
+static void
+locks_delete_lock_ctx(struct file_lock *fl, int *counter,
+ struct list_head *dispose)
{
- struct file_lock *fl = *thisfl_p;
-
- locks_unlink_lock(thisfl_p);
+ locks_unlink_lock_ctx(fl, counter);
if (dispose)
- list_add(&fl->fl_block, dispose);
+ list_add(&fl->fl_list, dispose);
else
locks_free_lock(fl);
}
@@ -746,22 +764,27 @@ void
posix_test_lock(struct file *filp, struct file_lock *fl)
{
struct file_lock *cfl;
+ struct file_lock_context *ctx;
struct inode *inode = file_inode(filp);
- spin_lock(&inode->i_lock);
- for (cfl = file_inode(filp)->i_flock; cfl; cfl = cfl->fl_next) {
- if (!IS_POSIX(cfl))
- continue;
- if (posix_locks_conflict(fl, cfl))
- break;
- }
- if (cfl) {
- locks_copy_conflock(fl, cfl);
- if (cfl->fl_nspid)
- fl->fl_pid = pid_vnr(cfl->fl_nspid);
- } else
+ ctx = inode->i_flctx;
+ if (!ctx || list_empty_careful(&ctx->flc_posix)) {
fl->fl_type = F_UNLCK;
- spin_unlock(&inode->i_lock);
+ return;
+ }
+
+ spin_lock(&ctx->flc_lock);
+ list_for_each_entry(cfl, &ctx->flc_posix, fl_list) {
+ if (posix_locks_conflict(fl, cfl)) {
+ locks_copy_conflock(fl, cfl);
+ if (cfl->fl_nspid)
+ fl->fl_pid = pid_vnr(cfl->fl_nspid);
+ goto out;
+ }
+ }
+ fl->fl_type = F_UNLCK;
+out:
+ spin_unlock(&ctx->flc_lock);
return;
}
EXPORT_SYMBOL(posix_test_lock);
@@ -845,34 +868,34 @@ static int posix_locks_deadlock(struct file_lock *caller_fl,
static int flock_lock_file(struct file *filp, struct file_lock *request)
{
struct file_lock *new_fl = NULL;
- struct file_lock **before;
- struct inode * inode = file_inode(filp);
+ struct file_lock *fl;
+ struct file_lock_context *ctx;
+ struct inode *inode = file_inode(filp);
int error = 0;
- int found = 0;
+ bool found = false;
LIST_HEAD(dispose);
+ ctx = locks_get_lock_context(inode);
+ if (!ctx)
+ return -ENOMEM;
+
if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
new_fl = locks_alloc_lock();
if (!new_fl)
return -ENOMEM;
}
- spin_lock(&inode->i_lock);
+ spin_lock(&ctx->flc_lock);
if (request->fl_flags & FL_ACCESS)
goto find_conflict;
- for_each_lock(inode, before) {
- struct file_lock *fl = *before;
- if (IS_POSIX(fl))
- break;
- if (IS_LEASE(fl))
- continue;
+ list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
if (filp != fl->fl_file)
continue;
if (request->fl_type == fl->fl_type)
goto out;
- found = 1;
- locks_delete_lock(before, &dispose);
+ found = true;
+ locks_delete_lock_ctx(fl, &ctx->flc_flock_cnt, &dispose);
break;
}
@@ -887,18 +910,13 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
* give it the opportunity to lock the file.
*/
if (found) {
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ctx->flc_lock);
cond_resched();
- spin_lock(&inode->i_lock);
+ spin_lock(&ctx->flc_lock);
}
find_conflict:
- for_each_lock(inode, before) {
- struct file_lock *fl = *before;
- if (IS_POSIX(fl))
- break;
- if (IS_LEASE(fl))
- continue;
+ list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
if (!flock_locks_conflict(request, fl))
continue;
error = -EAGAIN;
@@ -911,12 +929,12 @@ find_conflict:
if (request->fl_flags & FL_ACCESS)
goto out;
locks_copy_lock(new_fl, request);
- locks_insert_lock(before, new_fl);
+ locks_insert_lock_ctx(new_fl, &ctx->flc_flock_cnt, &ctx->flc_flock);
new_fl = NULL;
error = 0;
out:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ctx->flc_lock);
if (new_fl)
locks_free_lock(new_fl);
locks_dispose_list(&dispose);
@@ -925,16 +943,20 @@ out:
static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
{
- struct file_lock *fl;
+ struct file_lock *fl, *tmp;
struct file_lock *new_fl = NULL;
struct file_lock *new_fl2 = NULL;
struct file_lock *left = NULL;
struct file_lock *right = NULL;
- struct file_lock **before;
+ struct file_lock_context *ctx;
int error;
bool added = false;
LIST_HEAD(dispose);
+ ctx = locks_get_lock_context(inode);
+ if (!ctx)
+ return -ENOMEM;
+
/*
* We may need two file_lock structures for this operation,
* so we get them in advance to avoid races.
@@ -948,15 +970,14 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
new_fl2 = locks_alloc_lock();
}
- spin_lock(&inode->i_lock);
+ spin_lock(&ctx->flc_lock);
/*
* New lock request. Walk all POSIX locks and look for conflicts. If
* there are any, either return error or put the request on the
* blocker's list of waiters and the global blocked_hash.
*/
if (request->fl_type != F_UNLCK) {
- for_each_lock(inode, before) {
- fl = *before;
+ list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
if (!IS_POSIX(fl))
continue;
if (!posix_locks_conflict(request, fl))
@@ -986,29 +1007,25 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
if (request->fl_flags & FL_ACCESS)
goto out;
- /*
- * Find the first old lock with the same owner as the new lock.
- */
-
- before = &inode->i_flock;
-
- /* First skip locks owned by other processes. */
- while ((fl = *before) && (!IS_POSIX(fl) ||
- !posix_same_owner(request, fl))) {
- before = &fl->fl_next;
+ /* Find the first old lock with the same owner as the new lock */
+ list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
+ if (posix_same_owner(request, fl))
+ break;
}
/* Process locks with this owner. */
- while ((fl = *before) && posix_same_owner(request, fl)) {
- /* Detect adjacent or overlapping regions (if same lock type)
- */
+ list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, fl_list) {
+ if (!posix_same_owner(request, fl))
+ break;
+
+ /* Detect adjacent or overlapping regions (if same lock type) */
if (request->fl_type == fl->fl_type) {
/* In all comparisons of start vs end, use
* "start - 1" rather than "end + 1". If end
* is OFFSET_MAX, end + 1 will become negative.
*/
if (fl->fl_end < request->fl_start - 1)
- goto next_lock;
+ continue;
/* If the next lock in the list has entirely bigger
* addresses than the new one, insert the lock here.
*/
@@ -1029,18 +1046,18 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
else
request->fl_end = fl->fl_end;
if (added) {
- locks_delete_lock(before, &dispose);
+ locks_delete_lock_ctx(fl, &ctx->flc_posix_cnt,
+ &dispose);
continue;
}
request = fl;
added = true;
- }
- else {
+ } else {
/* Processing for different lock types is a bit
* more complex.
*/
if (fl->fl_end < request->fl_start)
- goto next_lock;
+ continue;
if (fl->fl_start > request->fl_end)
break;
if (request->fl_type == F_UNLCK)
@@ -1059,7 +1076,8 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
* one (This may happen several times).
*/
if (added) {
- locks_delete_lock(before, &dispose);
+ locks_delete_lock_ctx(fl,
+ &ctx->flc_posix_cnt, &dispose);
continue;
}
/*
@@ -1075,15 +1093,13 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
locks_copy_lock(new_fl, request);
request = new_fl;
new_fl = NULL;
- locks_delete_lock(before, &dispose);
- locks_insert_lock(before, request);
+ locks_insert_lock_ctx(request,
+ &ctx->flc_posix_cnt, &fl->fl_list);
+ locks_delete_lock_ctx(fl,
+ &ctx->flc_posix_cnt, &dispose);
added = true;
}
}
- /* Go on to next lock.
- */
- next_lock:
- before = &fl->fl_next;
}
/*
@@ -1108,7 +1124,8 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
goto out;
}
locks_copy_lock(new_fl, request);
- locks_insert_lock(before, new_fl);
+ locks_insert_lock_ctx(new_fl, &ctx->flc_posix_cnt,
+ &fl->fl_list);
new_fl = NULL;
}
if (right) {
@@ -1119,7 +1136,8 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
left = new_fl2;
new_fl2 = NULL;
locks_copy_lock(left, right);
- locks_insert_lock(before, left);
+ locks_insert_lock_ctx(left, &ctx->flc_posix_cnt,
+ &fl->fl_list);
}
right->fl_start = request->fl_end + 1;
locks_wake_up_blocks(right);
@@ -1129,7 +1147,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
locks_wake_up_blocks(left);
}
out:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ctx->flc_lock);
/*
* Free any unused locks.
*/
@@ -1199,22 +1217,29 @@ EXPORT_SYMBOL(posix_lock_file_wait);
*/
int locks_mandatory_locked(struct file *file)
{
+ int ret;
struct inode *inode = file_inode(file);
+ struct file_lock_context *ctx;
struct file_lock *fl;
+ ctx = inode->i_flctx;
+ if (!ctx || list_empty_careful(&ctx->flc_posix))
+ return 0;
+
/*
* Search the lock list for this inode for any POSIX locks.
*/
- spin_lock(&inode->i_lock);
- for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
- if (!IS_POSIX(fl))
- continue;
+ spin_lock(&ctx->flc_lock);
+ ret = 0;
+ list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
if (fl->fl_owner != current->files &&
- fl->fl_owner != file)
+ fl->fl_owner != file) {
+ ret = -EAGAIN;
break;
+ }
}
- spin_unlock(&inode->i_lock);
- return fl ? -EAGAIN : 0;
+ spin_unlock(&ctx->flc_lock);
+ return ret;
}
/**
@@ -1294,9 +1319,9 @@ static void lease_clear_pending(struct file_lock *fl, int arg)
}
/* We already had a lease on this file; just change its type */
-int lease_modify(struct file_lock **before, int arg, struct list_head *dispose)
+int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose)
{
- struct file_lock *fl = *before;
+ struct file_lock_context *flctx;
int error = assign_type(fl, arg);
if (error)
@@ -1306,6 +1331,7 @@ int lease_modify(struct file_lock **before, int arg, struct list_head *dispose)
if (arg == F_UNLCK) {
struct file *filp = fl->fl_file;
+ flctx = file_inode(filp)->i_flctx;
f_delown(filp);
filp->f_owner.signum = 0;
fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
@@ -1313,7 +1339,7 @@ int lease_modify(struct file_lock **before, int arg, struct list_head *dispose)
printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
fl->fl_fasync = NULL;
}
- locks_delete_lock(before, dispose);
+ locks_delete_lock_ctx(fl, &flctx->flc_lease_cnt, dispose);
}
return 0;
}
@@ -1329,25 +1355,24 @@ static bool past_time(unsigned long then)
static void time_out_leases(struct inode *inode, struct list_head *dispose)
{
- struct file_lock **before;
- struct file_lock *fl;
+ struct file_lock_context *ctx = inode->i_flctx;
+ struct file_lock *fl, *tmp;
- lockdep_assert_held(&inode->i_lock);
+ lockdep_assert_held(&ctx->flc_lock);
- before = &inode->i_flock;
- while ((fl = *before) && IS_LEASE(fl) && lease_breaking(fl)) {
+ list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
trace_time_out_leases(inode, fl);
if (past_time(fl->fl_downgrade_time))
- lease_modify(before, F_RDLCK, dispose);
+ lease_modify(fl, F_RDLCK, dispose);
if (past_time(fl->fl_break_time))
- lease_modify(before, F_UNLCK, dispose);
- if (fl == *before) /* lease_modify may have freed fl */
- before = &fl->fl_next;
+ lease_modify(fl, F_UNLCK, dispose);
}
}
static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker)
{
+ if ((breaker->fl_flags & FL_LAYOUT) != (lease->fl_flags & FL_LAYOUT))
+ return false;
if ((breaker->fl_flags & FL_DELEG) && (lease->fl_flags & FL_LEASE))
return false;
return locks_conflict(breaker, lease);
@@ -1356,11 +1381,12 @@ static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker)
static bool
any_leases_conflict(struct inode *inode, struct file_lock *breaker)
{
+ struct file_lock_context *ctx = inode->i_flctx;
struct file_lock *fl;
- lockdep_assert_held(&inode->i_lock);
+ lockdep_assert_held(&ctx->flc_lock);
- for (fl = inode->i_flock ; fl && IS_LEASE(fl); fl = fl->fl_next) {
+ list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
if (leases_conflict(fl, breaker))
return true;
}
@@ -1384,7 +1410,8 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
{
int error = 0;
struct file_lock *new_fl;
- struct file_lock *fl, **before;
+ struct file_lock_context *ctx = inode->i_flctx;
+ struct file_lock *fl;
unsigned long break_time;
int want_write = (mode & O_ACCMODE) != O_RDONLY;
LIST_HEAD(dispose);
@@ -1394,7 +1421,13 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
return PTR_ERR(new_fl);
new_fl->fl_flags = type;
- spin_lock(&inode->i_lock);
+ /* typically we will check that ctx is non-NULL before calling */
+ if (!ctx) {
+ WARN_ON_ONCE(1);
+ return error;
+ }
+
+ spin_lock(&ctx->flc_lock);
time_out_leases(inode, &dispose);
@@ -1408,9 +1441,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
break_time++; /* so that 0 means no break time */
}
- for (before = &inode->i_flock;
- ((fl = *before) != NULL) && IS_LEASE(fl);
- before = &fl->fl_next) {
+ list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
if (!leases_conflict(fl, new_fl))
continue;
if (want_write) {
@@ -1419,17 +1450,17 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
fl->fl_flags |= FL_UNLOCK_PENDING;
fl->fl_break_time = break_time;
} else {
- if (lease_breaking(inode->i_flock))
+ if (lease_breaking(fl))
continue;
fl->fl_flags |= FL_DOWNGRADE_PENDING;
fl->fl_downgrade_time = break_time;
}
if (fl->fl_lmops->lm_break(fl))
- locks_delete_lock(before, &dispose);
+ locks_delete_lock_ctx(fl, &ctx->flc_lease_cnt,
+ &dispose);
}
- fl = inode->i_flock;
- if (!fl || !IS_LEASE(fl))
+ if (list_empty(&ctx->flc_lease))
goto out;
if (mode & O_NONBLOCK) {
@@ -1439,18 +1470,19 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
}
restart:
- break_time = inode->i_flock->fl_break_time;
+ fl = list_first_entry(&ctx->flc_lease, struct file_lock, fl_list);
+ break_time = fl->fl_break_time;
if (break_time != 0)
break_time -= jiffies;
if (break_time == 0)
break_time++;
- locks_insert_block(inode->i_flock, new_fl);
+ locks_insert_block(fl, new_fl);
trace_break_lease_block(inode, new_fl);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ctx->flc_lock);
locks_dispose_list(&dispose);
error = wait_event_interruptible_timeout(new_fl->fl_wait,
!new_fl->fl_next, break_time);
- spin_lock(&inode->i_lock);
+ spin_lock(&ctx->flc_lock);
trace_break_lease_unblock(inode, new_fl);
locks_delete_block(new_fl);
if (error >= 0) {
@@ -1462,12 +1494,10 @@ restart:
time_out_leases(inode, &dispose);
if (any_leases_conflict(inode, new_fl))
goto restart;
-
error = 0;
}
-
out:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ctx->flc_lock);
locks_dispose_list(&dispose);
locks_free_lock(new_fl);
return error;
@@ -1487,14 +1517,18 @@ EXPORT_SYMBOL(__break_lease);
void lease_get_mtime(struct inode *inode, struct timespec *time)
{
bool has_lease = false;
- struct file_lock *flock;
+ struct file_lock_context *ctx = inode->i_flctx;
+ struct file_lock *fl;
- if (inode->i_flock) {
- spin_lock(&inode->i_lock);
- flock = inode->i_flock;
- if (flock && IS_LEASE(flock) && (flock->fl_type == F_WRLCK))
- has_lease = true;
- spin_unlock(&inode->i_lock);
+ if (ctx && !list_empty_careful(&ctx->flc_lease)) {
+ spin_lock(&ctx->flc_lock);
+ if (!list_empty(&ctx->flc_lease)) {
+ fl = list_first_entry(&ctx->flc_lease,
+ struct file_lock, fl_list);
+ if (fl->fl_type == F_WRLCK)
+ has_lease = true;
+ }
+ spin_unlock(&ctx->flc_lock);
}
if (has_lease)
@@ -1532,20 +1566,22 @@ int fcntl_getlease(struct file *filp)
{
struct file_lock *fl;
struct inode *inode = file_inode(filp);
+ struct file_lock_context *ctx = inode->i_flctx;
int type = F_UNLCK;
LIST_HEAD(dispose);
- spin_lock(&inode->i_lock);
- time_out_leases(file_inode(filp), &dispose);
- for (fl = file_inode(filp)->i_flock; fl && IS_LEASE(fl);
- fl = fl->fl_next) {
- if (fl->fl_file == filp) {
+ if (ctx && !list_empty_careful(&ctx->flc_lease)) {
+ spin_lock(&ctx->flc_lock);
+ time_out_leases(file_inode(filp), &dispose);
+ list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
+ if (fl->fl_file != filp)
+ continue;
type = target_leasetype(fl);
break;
}
+ spin_unlock(&ctx->flc_lock);
+ locks_dispose_list(&dispose);
}
- spin_unlock(&inode->i_lock);
- locks_dispose_list(&dispose);
return type;
}
@@ -1560,11 +1596,14 @@ int fcntl_getlease(struct file *filp)
* conflict with the lease we're trying to set.
*/
static int
-check_conflicting_open(const struct dentry *dentry, const long arg)
+check_conflicting_open(const struct dentry *dentry, const long arg, int flags)
{
int ret = 0;
struct inode *inode = dentry->d_inode;
+ if (flags & FL_LAYOUT)
+ return 0;
+
if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
return -EAGAIN;
@@ -1578,9 +1617,10 @@ check_conflicting_open(const struct dentry *dentry, const long arg)
static int
generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **priv)
{
- struct file_lock *fl, **before, **my_before = NULL, *lease;
+ struct file_lock *fl, *my_fl = NULL, *lease;
struct dentry *dentry = filp->f_path.dentry;
struct inode *inode = dentry->d_inode;
+ struct file_lock_context *ctx;
bool is_deleg = (*flp)->fl_flags & FL_DELEG;
int error;
LIST_HEAD(dispose);
@@ -1588,6 +1628,10 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
lease = *flp;
trace_generic_add_lease(inode, lease);
+ ctx = locks_get_lock_context(inode);
+ if (!ctx)
+ return -ENOMEM;
+
/*
* In the delegation case we need mutual exclusion with
* a number of operations that take the i_mutex. We trylock
@@ -1606,9 +1650,9 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
return -EINVAL;
}
- spin_lock(&inode->i_lock);
+ spin_lock(&ctx->flc_lock);
time_out_leases(inode, &dispose);
- error = check_conflicting_open(dentry, arg);
+ error = check_conflicting_open(dentry, arg, lease->fl_flags);
if (error)
goto out;
@@ -1621,13 +1665,13 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
* except for this filp.
*/
error = -EAGAIN;
- for (before = &inode->i_flock;
- ((fl = *before) != NULL) && IS_LEASE(fl);
- before = &fl->fl_next) {
- if (fl->fl_file == filp) {
- my_before = before;
+ list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
+ if (fl->fl_file == filp &&
+ fl->fl_owner == lease->fl_owner) {
+ my_fl = fl;
continue;
}
+
/*
* No exclusive leases if someone else has a lease on
* this file:
@@ -1642,9 +1686,8 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
goto out;
}
- if (my_before != NULL) {
- lease = *my_before;
- error = lease->fl_lmops->lm_change(my_before, arg, &dispose);
+ if (my_fl != NULL) {
+ error = lease->fl_lmops->lm_change(my_fl, arg, &dispose);
if (error)
goto out;
goto out_setup;
@@ -1654,7 +1697,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
if (!leases_enable)
goto out;
- locks_insert_lock(before, lease);
+ locks_insert_lock_ctx(lease, &ctx->flc_lease_cnt, &ctx->flc_lease);
/*
* The check in break_lease() is lockless. It's possible for another
* open to race in after we did the earlier check for a conflicting
@@ -1665,46 +1708,51 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
* precedes these checks.
*/
smp_mb();
- error = check_conflicting_open(dentry, arg);
- if (error)
- goto out_unlink;
+ error = check_conflicting_open(dentry, arg, lease->fl_flags);
+ if (error) {
+ locks_unlink_lock_ctx(lease, &ctx->flc_lease_cnt);
+ goto out;
+ }
out_setup:
if (lease->fl_lmops->lm_setup)
lease->fl_lmops->lm_setup(lease, priv);
out:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ctx->flc_lock);
locks_dispose_list(&dispose);
if (is_deleg)
mutex_unlock(&inode->i_mutex);
- if (!error && !my_before)
+ if (!error && !my_fl)
*flp = NULL;
return error;
-out_unlink:
- locks_unlink_lock(before);
- goto out;
}
-static int generic_delete_lease(struct file *filp)
+static int generic_delete_lease(struct file *filp, void *owner)
{
int error = -EAGAIN;
- struct file_lock *fl, **before;
+ struct file_lock *fl, *victim = NULL;
struct dentry *dentry = filp->f_path.dentry;
struct inode *inode = dentry->d_inode;
+ struct file_lock_context *ctx = inode->i_flctx;
LIST_HEAD(dispose);
- spin_lock(&inode->i_lock);
- time_out_leases(inode, &dispose);
- for (before = &inode->i_flock;
- ((fl = *before) != NULL) && IS_LEASE(fl);
- before = &fl->fl_next) {
- if (fl->fl_file == filp)
+ if (!ctx) {
+ trace_generic_delete_lease(inode, NULL);
+ return error;
+ }
+
+ spin_lock(&ctx->flc_lock);
+ list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
+ if (fl->fl_file == filp &&
+ fl->fl_owner == owner) {
+ victim = fl;
break;
+ }
}
trace_generic_delete_lease(inode, fl);
- if (fl && IS_LEASE(fl))
- error = fl->fl_lmops->lm_change(before, F_UNLCK, &dispose);
- spin_unlock(&inode->i_lock);
+ if (victim)
+ error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
+ spin_unlock(&ctx->flc_lock);
locks_dispose_list(&dispose);
return error;
}
@@ -1737,13 +1785,14 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp,
switch (arg) {
case F_UNLCK:
- return generic_delete_lease(filp);
+ return generic_delete_lease(filp, *priv);
case F_RDLCK:
case F_WRLCK:
if (!(*flp)->fl_lmops->lm_break) {
WARN_ON_ONCE(1);
return -ENOLCK;
}
+
return generic_add_lease(filp, arg, flp, priv);
default:
return -EINVAL;
@@ -1816,7 +1865,7 @@ static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
{
if (arg == F_UNLCK)
- return vfs_setlease(filp, F_UNLCK, NULL, NULL);
+ return vfs_setlease(filp, F_UNLCK, NULL, (void **)&filp);
return do_fcntl_add_lease(fd, filp, arg);
}
@@ -2171,7 +2220,7 @@ again:
*/
/*
* we need that spin_lock here - it prevents reordering between
- * update of inode->i_flock and check for it done in close().
+ * update of i_flctx->flc_posix and check for it done in close().
* rcu_read_lock() wouldn't do.
*/
spin_lock(&current->files->file_lock);
@@ -2331,13 +2380,14 @@ out:
void locks_remove_posix(struct file *filp, fl_owner_t owner)
{
struct file_lock lock;
+ struct file_lock_context *ctx = file_inode(filp)->i_flctx;
/*
* If there are no locks held on this file, we don't need to call
* posix_lock_file(). Another process could be setting a lock on this
* file at the same time, but we wouldn't remove that lock anyway.
*/
- if (!file_inode(filp)->i_flock)
+ if (!ctx || list_empty(&ctx->flc_posix))
return;
lock.fl_type = F_UNLCK;
@@ -2358,67 +2408,67 @@ void locks_remove_posix(struct file *filp, fl_owner_t owner)
EXPORT_SYMBOL(locks_remove_posix);
+/* The i_flctx must be valid when calling into here */
+static void
+locks_remove_flock(struct file *filp)
+{
+ struct file_lock fl = {
+ .fl_owner = filp,
+ .fl_pid = current->tgid,
+ .fl_file = filp,
+ .fl_flags = FL_FLOCK,
+ .fl_type = F_UNLCK,
+ .fl_end = OFFSET_MAX,
+ };
+ struct file_lock_context *flctx = file_inode(filp)->i_flctx;
+
+ if (list_empty(&flctx->flc_flock))
+ return;
+
+ if (filp->f_op->flock)
+ filp->f_op->flock(filp, F_SETLKW, &fl);
+ else
+ flock_lock_file(filp, &fl);
+
+ if (fl.fl_ops && fl.fl_ops->fl_release_private)
+ fl.fl_ops->fl_release_private(&fl);
+}
+
+/* The i_flctx must be valid when calling into here */
+static void
+locks_remove_lease(struct file *filp)
+{
+ struct inode *inode = file_inode(filp);
+ struct file_lock_context *ctx = inode->i_flctx;
+ struct file_lock *fl, *tmp;
+ LIST_HEAD(dispose);
+
+ if (list_empty(&ctx->flc_lease))
+ return;
+
+ spin_lock(&ctx->flc_lock);
+ list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
+ lease_modify(fl, F_UNLCK, &dispose);
+ spin_unlock(&ctx->flc_lock);
+ locks_dispose_list(&dispose);
+}
+
/*
* This function is called on the last close of an open file.
*/
void locks_remove_file(struct file *filp)
{
- struct inode * inode = file_inode(filp);
- struct file_lock *fl;
- struct file_lock **before;
- LIST_HEAD(dispose);
-
- if (!inode->i_flock)
+ if (!file_inode(filp)->i_flctx)
return;
+ /* remove any OFD locks */
locks_remove_posix(filp, filp);
- if (filp->f_op->flock) {
- struct file_lock fl = {
- .fl_owner = filp,
- .fl_pid = current->tgid,
- .fl_file = filp,
- .fl_flags = FL_FLOCK,
- .fl_type = F_UNLCK,
- .fl_end = OFFSET_MAX,
- };
- filp->f_op->flock(filp, F_SETLKW, &fl);
- if (fl.fl_ops && fl.fl_ops->fl_release_private)
- fl.fl_ops->fl_release_private(&fl);
- }
-
- spin_lock(&inode->i_lock);
- before = &inode->i_flock;
-
- while ((fl = *before) != NULL) {
- if (fl->fl_file == filp) {
- if (IS_LEASE(fl)) {
- lease_modify(before, F_UNLCK, &dispose);
- continue;
- }
-
- /*
- * There's a leftover lock on the list of a type that
- * we didn't expect to see. Most likely a classic
- * POSIX lock that ended up not getting released
- * properly, or that raced onto the list somehow. Log
- * some info about it and then just remove it from
- * the list.
- */
- WARN(!IS_FLOCK(fl),
- "leftover lock: dev=%u:%u ino=%lu type=%hhd flags=0x%x start=%lld end=%lld\n",
- MAJOR(inode->i_sb->s_dev),
- MINOR(inode->i_sb->s_dev), inode->i_ino,
- fl->fl_type, fl->fl_flags,
- fl->fl_start, fl->fl_end);
+ /* remove flock locks */
+ locks_remove_flock(filp);
- locks_delete_lock(before, &dispose);
- continue;
- }
- before = &fl->fl_next;
- }
- spin_unlock(&inode->i_lock);
- locks_dispose_list(&dispose);
+ /* remove any leases */
+ locks_remove_lease(filp);
}
/**
@@ -2621,6 +2671,9 @@ static int __init filelock_init(void)
{
int i;
+ flctx_cache = kmem_cache_create("file_lock_ctx",
+ sizeof(struct file_lock_context), 0, SLAB_PANIC, NULL);
+
filelock_cache = kmem_cache_create("file_lock_cache",
sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index 3dece03..c7abc10 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -128,6 +128,11 @@ config PNFS_OBJLAYOUT
depends on NFS_V4_1 && SCSI_OSD_ULD
default NFS_V4
+config PNFS_FLEXFILE_LAYOUT
+ tristate
+ depends on NFS_V4_1 && NFS_V3
+ default m
+
config NFS_V4_1_IMPLEMENTATION_ID_DOMAIN
string "NFSv4.1 Implementation ID Domain"
depends on NFS_V4_1
diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
index 04cb830..1e987ac 100644
--- a/fs/nfs/Makefile
+++ b/fs/nfs/Makefile
@@ -27,9 +27,10 @@ nfsv4-y := nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o nfs4super.o nfs4file.o
dns_resolve.o nfs4trace.o
nfsv4-$(CONFIG_NFS_USE_LEGACY_DNS) += cache_lib.o
nfsv4-$(CONFIG_SYSCTL) += nfs4sysctl.o
-nfsv4-$(CONFIG_NFS_V4_1) += pnfs.o pnfs_dev.o
+nfsv4-$(CONFIG_NFS_V4_1) += pnfs.o pnfs_dev.o pnfs_nfs.o
nfsv4-$(CONFIG_NFS_V4_2) += nfs42proc.o
obj-$(CONFIG_PNFS_FILE_LAYOUT) += filelayout/
obj-$(CONFIG_PNFS_OBJLAYOUT) += objlayout/
obj-$(CONFIG_PNFS_BLOCK) += blocklayout/
+obj-$(CONFIG_PNFS_FLEXFILE_LAYOUT) += flexfilelayout/
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index 77fec6a..1cac3c1 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -860,12 +860,14 @@ static const struct nfs_pageio_ops bl_pg_read_ops = {
.pg_init = bl_pg_init_read,
.pg_test = bl_pg_test_read,
.pg_doio = pnfs_generic_pg_readpages,
+ .pg_cleanup = pnfs_generic_pg_cleanup,
};
static const struct nfs_pageio_ops bl_pg_write_ops = {
.pg_init = bl_pg_init_write,
.pg_test = bl_pg_test_write,
.pg_doio = pnfs_generic_pg_writepages,
+ .pg_cleanup = pnfs_generic_pg_cleanup,
};
static struct pnfs_layoutdriver_type blocklayout_type = {
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index b8fb3a4..351be920 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -128,22 +128,24 @@ nfs41_callback_svc(void *vrqstp)
if (try_to_freeze())
continue;
- prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE);
+ prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_UNINTERRUPTIBLE);
spin_lock_bh(&serv->sv_cb_lock);
if (!list_empty(&serv->sv_cb_list)) {
req = list_first_entry(&serv->sv_cb_list,
struct rpc_rqst, rq_bc_list);
list_del(&req->rq_bc_list);
spin_unlock_bh(&serv->sv_cb_lock);
+ finish_wait(&serv->sv_cb_waitq, &wq);
dprintk("Invoking bc_svc_process()\n");
error = bc_svc_process(serv, req, rqstp);
dprintk("bc_svc_process() returned w/ error code= %d\n",
error);
} else {
spin_unlock_bh(&serv->sv_cb_lock);
- schedule();
+ /* schedule_timeout to game the hung task watchdog */
+ schedule_timeout(60 * HZ);
+ finish_wait(&serv->sv_cb_waitq, &wq);
}
- finish_wait(&serv->sv_cb_waitq, &wq);
}
return 0;
}
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 7f3f606..da54332 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -85,25 +85,30 @@ static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_
{
struct inode *inode = state->inode;
struct file_lock *fl;
+ struct file_lock_context *flctx = inode->i_flctx;
+ struct list_head *list;
int status = 0;
- if (inode->i_flock == NULL)
+ if (flctx == NULL)
goto out;
- /* Protect inode->i_flock using the i_lock */
- spin_lock(&inode->i_lock);
- for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
- if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
- continue;
+ list = &flctx->flc_posix;
+ spin_lock(&flctx->flc_lock);
+restart:
+ list_for_each_entry(fl, list, fl_list) {
if (nfs_file_open_context(fl->fl_file) != ctx)
continue;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&flctx->flc_lock);
status = nfs4_lock_delegation_recall(fl, state, stateid);
if (status < 0)
goto out;
- spin_lock(&inode->i_lock);
+ spin_lock(&flctx->flc_lock);
}
- spin_unlock(&inode->i_lock);
+ if (list == &flctx->flc_posix) {
+ list = &flctx->flc_flock;
+ goto restart;
+ }
+ spin_unlock(&flctx->flc_lock);
out:
return status;
}
@@ -301,6 +306,17 @@ nfs_inode_detach_delegation(struct inode *inode)
return nfs_detach_delegation(nfsi, delegation, server);
}
+static void
+nfs_update_inplace_delegation(struct nfs_delegation *delegation,
+ const struct nfs_delegation *update)
+{
+ if (nfs4_stateid_is_newer(&update->stateid, &delegation->stateid)) {
+ delegation->stateid.seqid = update->stateid.seqid;
+ smp_wmb();
+ delegation->type = update->type;
+ }
+}
+
/**
* nfs_inode_set_delegation - set up a delegation on an inode
* @inode: inode to which delegation applies
@@ -334,9 +350,12 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
old_delegation = rcu_dereference_protected(nfsi->delegation,
lockdep_is_held(&clp->cl_lock));
if (old_delegation != NULL) {
- if (nfs4_stateid_match(&delegation->stateid,
- &old_delegation->stateid) &&
- delegation->type == old_delegation->type) {
+ /* Is this an update of the existing delegation? */
+ if (nfs4_stateid_match_other(&old_delegation->stateid,
+ &delegation->stateid)) {
+ nfs_update_inplace_delegation(old_delegation,
+ delegation);
+ nfsi->delegation_state = old_delegation->type;
goto out;
}
/*
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 10bf072..7077521 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -66,6 +66,10 @@ static struct kmem_cache *nfs_direct_cachep;
/*
* This represents a set of asynchronous requests that we're waiting on
*/
+struct nfs_direct_mirror {
+ ssize_t count;
+};
+
struct nfs_direct_req {
struct kref kref; /* release manager */
@@ -78,8 +82,13 @@ struct nfs_direct_req {
/* completion state */
atomic_t io_count; /* i/os we're waiting for */
spinlock_t lock; /* protect completion state */
+
+ struct nfs_direct_mirror mirrors[NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX];
+ int mirror_count;
+
ssize_t count, /* bytes actually processed */
bytes_left, /* bytes left to be sent */
+ io_start, /* start of IO */
error; /* any reported error */
struct completion completion; /* wait for i/o completion */
@@ -108,26 +117,56 @@ static inline int put_dreq(struct nfs_direct_req *dreq)
return atomic_dec_and_test(&dreq->io_count);
}
+void nfs_direct_set_resched_writes(struct nfs_direct_req *dreq)
+{
+ dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
+}
+EXPORT_SYMBOL_GPL(nfs_direct_set_resched_writes);
+
+static void
+nfs_direct_good_bytes(struct nfs_direct_req *dreq, struct nfs_pgio_header *hdr)
+{
+ int i;
+ ssize_t count;
+
+ WARN_ON_ONCE(hdr->pgio_mirror_idx >= dreq->mirror_count);
+
+ count = dreq->mirrors[hdr->pgio_mirror_idx].count;
+ if (count + dreq->io_start < hdr->io_start + hdr->good_bytes) {
+ count = hdr->io_start + hdr->good_bytes - dreq->io_start;
+ dreq->mirrors[hdr->pgio_mirror_idx].count = count;
+ }
+
+ /* update the dreq->count by finding the minimum agreed count from all
+ * mirrors */
+ count = dreq->mirrors[0].count;
+
+ for (i = 1; i < dreq->mirror_count; i++)
+ count = min(count, dreq->mirrors[i].count);
+
+ dreq->count = count;
+}
+
/*
* nfs_direct_select_verf - select the right verifier
* @dreq - direct request possibly spanning multiple servers
* @ds_clp - nfs_client of data server or NULL if MDS / non-pnfs
- * @ds_idx - index of data server in data server list, only valid if ds_clp set
+ * @commit_idx - commit bucket index for the DS
*
* returns the correct verifier to use given the role of the server
*/
static struct nfs_writeverf *
nfs_direct_select_verf(struct nfs_direct_req *dreq,
struct nfs_client *ds_clp,
- int ds_idx)
+ int commit_idx)
{
struct nfs_writeverf *verfp = &dreq->verf;
#ifdef CONFIG_NFS_V4_1
if (ds_clp) {
/* pNFS is in use, use the DS verf */
- if (ds_idx >= 0 && ds_idx < dreq->ds_cinfo.nbuckets)
- verfp = &dreq->ds_cinfo.buckets[ds_idx].direct_verf;
+ if (commit_idx >= 0 && commit_idx < dreq->ds_cinfo.nbuckets)
+ verfp = &dreq->ds_cinfo.buckets[commit_idx].direct_verf;
else
WARN_ON_ONCE(1);
}
@@ -148,8 +187,7 @@ static void nfs_direct_set_hdr_verf(struct nfs_direct_req *dreq,
{
struct nfs_writeverf *verfp;
- verfp = nfs_direct_select_verf(dreq, hdr->ds_clp,
- hdr->ds_idx);
+ verfp = nfs_direct_select_verf(dreq, hdr->ds_clp, hdr->ds_commit_idx);
WARN_ON_ONCE(verfp->committed >= 0);
memcpy(verfp, &hdr->verf, sizeof(struct nfs_writeverf));
WARN_ON_ONCE(verfp->committed < 0);
@@ -169,8 +207,7 @@ static int nfs_direct_set_or_cmp_hdr_verf(struct nfs_direct_req *dreq,
{
struct nfs_writeverf *verfp;
- verfp = nfs_direct_select_verf(dreq, hdr->ds_clp,
- hdr->ds_idx);
+ verfp = nfs_direct_select_verf(dreq, hdr->ds_clp, hdr->ds_commit_idx);
if (verfp->committed < 0) {
nfs_direct_set_hdr_verf(dreq, hdr);
return 0;
@@ -193,7 +230,11 @@ static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq,
verfp = nfs_direct_select_verf(dreq, data->ds_clp,
data->ds_commit_index);
- WARN_ON_ONCE(verfp->committed < 0);
+
+ /* verifier not set so always fail */
+ if (verfp->committed < 0)
+ return 1;
+
return memcmp(verfp, &data->verf, sizeof(struct nfs_writeverf));
}
@@ -212,6 +253,12 @@ static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq,
*/
ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
{
+ struct inode *inode = iocb->ki_filp->f_mapping->host;
+
+ /* we only support swap file calling nfs_direct_IO */
+ if (!IS_SWAPFILE(inode))
+ return 0;
+
#ifndef CONFIG_NFS_SWAP
dprintk("NFS: nfs_direct_IO (%pD) off/no(%Ld/%lu) EINVAL\n",
iocb->ki_filp, (long long) pos, iter->nr_segs);
@@ -243,6 +290,18 @@ void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
cinfo->completion_ops = &nfs_direct_commit_completion_ops;
}
+static inline void nfs_direct_setup_mirroring(struct nfs_direct_req *dreq,
+ struct nfs_pageio_descriptor *pgio,
+ struct nfs_page *req)
+{
+ int mirror_count = 1;
+
+ if (pgio->pg_ops->pg_get_mirror_count)
+ mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
+
+ dreq->mirror_count = mirror_count;
+}
+
static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
{
struct nfs_direct_req *dreq;
@@ -257,6 +316,7 @@ static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
INIT_LIST_HEAD(&dreq->mds_cinfo.list);
dreq->verf.committed = NFS_INVALID_STABLE_HOW; /* not set yet */
INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
+ dreq->mirror_count = 1;
spin_lock_init(&dreq->lock);
return dreq;
@@ -363,7 +423,8 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes == 0))
dreq->error = hdr->error;
else
- dreq->count += hdr->good_bytes;
+ nfs_direct_good_bytes(dreq, hdr);
+
spin_unlock(&dreq->lock);
while (!list_empty(&hdr->pages)) {
@@ -541,6 +602,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter,
dreq->inode = inode;
dreq->bytes_left = count;
+ dreq->io_start = pos;
dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
l_ctx = nfs_get_lock_context(dreq->ctx);
if (IS_ERR(l_ctx)) {
@@ -573,6 +635,20 @@ out:
return result;
}
+static void
+nfs_direct_write_scan_commit_list(struct inode *inode,
+ struct list_head *list,
+ struct nfs_commit_info *cinfo)
+{
+ spin_lock(cinfo->lock);
+#ifdef CONFIG_NFS_V4_1
+ if (cinfo->ds != NULL && cinfo->ds->nwritten != 0)
+ NFS_SERVER(inode)->pnfs_curr_ld->recover_commit_reqs(list, cinfo);
+#endif
+ nfs_scan_commit_list(&cinfo->mds->list, list, cinfo, 0);
+ spin_unlock(cinfo->lock);
+}
+
static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
{
struct nfs_pageio_descriptor desc;
@@ -580,20 +656,23 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
LIST_HEAD(reqs);
struct nfs_commit_info cinfo;
LIST_HEAD(failed);
+ int i;
nfs_init_cinfo_from_dreq(&cinfo, dreq);
- pnfs_recover_commit_reqs(dreq->inode, &reqs, &cinfo);
- spin_lock(cinfo.lock);
- nfs_scan_commit_list(&cinfo.mds->list, &reqs, &cinfo, 0);
- spin_unlock(cinfo.lock);
+ nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
dreq->count = 0;
+ for (i = 0; i < dreq->mirror_count; i++)
+ dreq->mirrors[i].count = 0;
get_dreq(dreq);
nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE, false,
&nfs_direct_write_completion_ops);
desc.pg_dreq = dreq;
+ req = nfs_list_entry(reqs.next);
+ nfs_direct_setup_mirroring(dreq, &desc, req);
+
list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
if (!nfs_pageio_add_request(&desc, req)) {
nfs_list_remove_request(req);
@@ -640,7 +719,7 @@ static void nfs_direct_commit_complete(struct nfs_commit_data *data)
nfs_list_remove_request(req);
if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) {
/* Note the rewrite will go through mds */
- nfs_mark_request_commit(req, NULL, &cinfo);
+ nfs_mark_request_commit(req, NULL, &cinfo, 0);
} else
nfs_release_request(req);
nfs_unlock_and_release_request(req);
@@ -715,7 +794,7 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
dreq->error = hdr->error;
}
if (dreq->error == 0) {
- dreq->count += hdr->good_bytes;
+ nfs_direct_good_bytes(dreq, hdr);
if (nfs_write_need_commit(hdr)) {
if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
request_commit = true;
@@ -739,7 +818,8 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
nfs_list_remove_request(req);
if (request_commit) {
kref_get(&req->wb_kref);
- nfs_mark_request_commit(req, hdr->lseg, &cinfo);
+ nfs_mark_request_commit(req, hdr->lseg, &cinfo,
+ hdr->ds_commit_idx);
}
nfs_unlock_and_release_request(req);
}
@@ -820,6 +900,9 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
result = PTR_ERR(req);
break;
}
+
+ nfs_direct_setup_mirroring(dreq, &desc, req);
+
nfs_lock_request(req);
req->wb_index = pos >> PAGE_SHIFT;
req->wb_offset = pos & ~PAGE_MASK;
@@ -928,6 +1011,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter,
dreq->inode = inode;
dreq->bytes_left = count;
+ dreq->io_start = pos;
dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
l_ctx = nfs_get_lock_context(dreq->ctx);
if (IS_ERR(l_ctx)) {
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 2ab6f00..94712fc 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -646,7 +646,6 @@ static const struct vm_operations_struct nfs_file_vm_ops = {
.fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = nfs_vm_page_mkwrite,
- .remap_pages = generic_file_remap_pages,
};
static int nfs_need_sync_write(struct file *filp, struct inode *inode)
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index 51aa889..7ae1c26 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -118,13 +118,6 @@ static void filelayout_reset_read(struct nfs_pgio_header *hdr)
}
}
-static void filelayout_fenceme(struct inode *inode, struct pnfs_layout_hdr *lo)
-{
- if (!test_and_clear_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
- return;
- pnfs_return_layout(inode);
-}
-
static int filelayout_async_handle_error(struct rpc_task *task,
struct nfs4_state *state,
struct nfs_client *clp,
@@ -207,7 +200,7 @@ static int filelayout_async_handle_error(struct rpc_task *task,
dprintk("%s DS connection error %d\n", __func__,
task->tk_status);
nfs4_mark_deviceid_unavailable(devid);
- set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
+ pnfs_error_mark_layout_for_return(inode, lseg);
rpc_wake_up(&tbl->slot_tbl_waitq);
/* fall through */
default:
@@ -339,16 +332,6 @@ static void filelayout_read_count_stats(struct rpc_task *task, void *data)
rpc_count_iostats(task, NFS_SERVER(hdr->inode)->client->cl_metrics);
}
-static void filelayout_read_release(void *data)
-{
- struct nfs_pgio_header *hdr = data;
- struct pnfs_layout_hdr *lo = hdr->lseg->pls_layout;
-
- filelayout_fenceme(lo->plh_inode, lo);
- nfs_put_client(hdr->ds_clp);
- hdr->mds_ops->rpc_release(data);
-}
-
static int filelayout_write_done_cb(struct rpc_task *task,
struct nfs_pgio_header *hdr)
{
@@ -371,17 +354,6 @@ static int filelayout_write_done_cb(struct rpc_task *task,
return 0;
}
-/* Fake up some data that will cause nfs_commit_release to retry the writes. */
-static void prepare_to_resend_writes(struct nfs_commit_data *data)
-{
- struct nfs_page *first = nfs_list_entry(data->pages.next);
-
- data->task.tk_status = 0;
- memcpy(&data->verf.verifier, &first->wb_verf,
- sizeof(data->verf.verifier));
- data->verf.verifier.data[0]++; /* ensure verifier mismatch */
-}
-
static int filelayout_commit_done_cb(struct rpc_task *task,
struct nfs_commit_data *data)
{
@@ -393,7 +365,7 @@ static int filelayout_commit_done_cb(struct rpc_task *task,
switch (err) {
case -NFS4ERR_RESET_TO_MDS:
- prepare_to_resend_writes(data);
+ pnfs_generic_prepare_to_resend_writes(data);
return -EAGAIN;
case -EAGAIN:
rpc_restart_call_prepare(task);
@@ -451,16 +423,6 @@ static void filelayout_write_count_stats(struct rpc_task *task, void *data)
rpc_count_iostats(task, NFS_SERVER(hdr->inode)->client->cl_metrics);
}
-static void filelayout_write_release(void *data)
-{
- struct nfs_pgio_header *hdr = data;
- struct pnfs_layout_hdr *lo = hdr->lseg->pls_layout;
-
- filelayout_fenceme(lo->plh_inode, lo);
- nfs_put_client(hdr->ds_clp);
- hdr->mds_ops->rpc_release(data);
-}
-
static void filelayout_commit_prepare(struct rpc_task *task, void *data)
{
struct nfs_commit_data *wdata = data;
@@ -471,14 +433,6 @@ static void filelayout_commit_prepare(struct rpc_task *task, void *data)
task);
}
-static void filelayout_write_commit_done(struct rpc_task *task, void *data)
-{
- struct nfs_commit_data *wdata = data;
-
- /* Note this may cause RPC to be resent */
- wdata->mds_ops->rpc_call_done(task, data);
-}
-
static void filelayout_commit_count_stats(struct rpc_task *task, void *data)
{
struct nfs_commit_data *cdata = data;
@@ -486,35 +440,25 @@ static void filelayout_commit_count_stats(struct rpc_task *task, void *data)
rpc_count_iostats(task, NFS_SERVER(cdata->inode)->client->cl_metrics);
}
-static void filelayout_commit_release(void *calldata)
-{
- struct nfs_commit_data *data = calldata;
-
- data->completion_ops->completion(data);
- pnfs_put_lseg(data->lseg);
- nfs_put_client(data->ds_clp);
- nfs_commitdata_release(data);
-}
-
static const struct rpc_call_ops filelayout_read_call_ops = {
.rpc_call_prepare = filelayout_read_prepare,
.rpc_call_done = filelayout_read_call_done,
.rpc_count_stats = filelayout_read_count_stats,
- .rpc_release = filelayout_read_release,
+ .rpc_release = pnfs_generic_rw_release,
};
static const struct rpc_call_ops filelayout_write_call_ops = {
.rpc_call_prepare = filelayout_write_prepare,
.rpc_call_done = filelayout_write_call_done,
.rpc_count_stats = filelayout_write_count_stats,
- .rpc_release = filelayout_write_release,
+ .rpc_release = pnfs_generic_rw_release,
};
static const struct rpc_call_ops filelayout_commit_call_ops = {
.rpc_call_prepare = filelayout_commit_prepare,
- .rpc_call_done = filelayout_write_commit_done,
+ .rpc_call_done = pnfs_generic_write_commit_done,
.rpc_count_stats = filelayout_commit_count_stats,
- .rpc_release = filelayout_commit_release,
+ .rpc_release = pnfs_generic_commit_release,
};
static enum pnfs_try_status
@@ -548,7 +492,7 @@ filelayout_read_pagelist(struct nfs_pgio_header *hdr)
/* No multipath support. Use first DS */
atomic_inc(&ds->ds_clp->cl_count);
hdr->ds_clp = ds->ds_clp;
- hdr->ds_idx = idx;
+ hdr->ds_commit_idx = idx;
fh = nfs4_fl_select_ds_fh(lseg, j);
if (fh)
hdr->args.fh = fh;
@@ -557,8 +501,9 @@ filelayout_read_pagelist(struct nfs_pgio_header *hdr)
hdr->mds_offset = offset;
/* Perform an asynchronous read to ds */
- nfs_initiate_pgio(ds_clnt, hdr,
- &filelayout_read_call_ops, 0, RPC_TASK_SOFTCONN);
+ nfs_initiate_pgio(ds_clnt, hdr, hdr->cred,
+ NFS_PROTO(hdr->inode), &filelayout_read_call_ops,
+ 0, RPC_TASK_SOFTCONN);
return PNFS_ATTEMPTED;
}
@@ -591,16 +536,16 @@ filelayout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
hdr->pgio_done_cb = filelayout_write_done_cb;
atomic_inc(&ds->ds_clp->cl_count);
hdr->ds_clp = ds->ds_clp;
- hdr->ds_idx = idx;
+ hdr->ds_commit_idx = idx;
fh = nfs4_fl_select_ds_fh(lseg, j);
if (fh)
hdr->args.fh = fh;
hdr->args.offset = filelayout_get_dserver_offset(lseg, offset);
/* Perform an asynchronous write */
- nfs_initiate_pgio(ds_clnt, hdr,
- &filelayout_write_call_ops, sync,
- RPC_TASK_SOFTCONN);
+ nfs_initiate_pgio(ds_clnt, hdr, hdr->cred,
+ NFS_PROTO(hdr->inode), &filelayout_write_call_ops,
+ sync, RPC_TASK_SOFTCONN);
return PNFS_ATTEMPTED;
}
@@ -988,12 +933,14 @@ static const struct nfs_pageio_ops filelayout_pg_read_ops = {
.pg_init = filelayout_pg_init_read,
.pg_test = filelayout_pg_test,
.pg_doio = pnfs_generic_pg_readpages,
+ .pg_cleanup = pnfs_generic_pg_cleanup,
};
static const struct nfs_pageio_ops filelayout_pg_write_ops = {
.pg_init = filelayout_pg_init_write,
.pg_test = filelayout_pg_test,
.pg_doio = pnfs_generic_pg_writepages,
+ .pg_cleanup = pnfs_generic_pg_cleanup,
};
static u32 select_bucket_index(struct nfs4_filelayout_segment *fl, u32 j)
@@ -1004,37 +951,11 @@ static u32 select_bucket_index(struct nfs4_filelayout_segment *fl, u32 j)
return j;
}
-/* The generic layer is about to remove the req from the commit list.
- * If this will make the bucket empty, it will need to put the lseg reference.
- * Note this is must be called holding the inode (/cinfo) lock
- */
-static void
-filelayout_clear_request_commit(struct nfs_page *req,
- struct nfs_commit_info *cinfo)
-{
- struct pnfs_layout_segment *freeme = NULL;
-
- if (!test_and_clear_bit(PG_COMMIT_TO_DS, &req->wb_flags))
- goto out;
- cinfo->ds->nwritten--;
- if (list_is_singular(&req->wb_list)) {
- struct pnfs_commit_bucket *bucket;
-
- bucket = list_first_entry(&req->wb_list,
- struct pnfs_commit_bucket,
- written);
- freeme = bucket->wlseg;
- bucket->wlseg = NULL;
- }
-out:
- nfs_request_remove_commit_list(req, cinfo);
- pnfs_put_lseg_locked(freeme);
-}
-
static void
filelayout_mark_request_commit(struct nfs_page *req,
struct pnfs_layout_segment *lseg,
- struct nfs_commit_info *cinfo)
+ struct nfs_commit_info *cinfo,
+ u32 ds_commit_idx)
{
struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
@@ -1064,7 +985,7 @@ filelayout_mark_request_commit(struct nfs_page *req,
* is normally transferred to the COMMIT call and released
* there. It could also be released if the last req is pulled
* off due to a rewrite, in which case it will be done in
- * filelayout_clear_request_commit
+ * pnfs_generic_clear_request_commit
*/
buckets[i].wlseg = pnfs_get_lseg(lseg);
}
@@ -1138,101 +1059,15 @@ static int filelayout_initiate_commit(struct nfs_commit_data *data, int how)
fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
if (fh)
data->args.fh = fh;
- return nfs_initiate_commit(ds_clnt, data,
+ return nfs_initiate_commit(ds_clnt, data, NFS_PROTO(data->inode),
&filelayout_commit_call_ops, how,
RPC_TASK_SOFTCONN);
out_err:
- prepare_to_resend_writes(data);
- filelayout_commit_release(data);
+ pnfs_generic_prepare_to_resend_writes(data);
+ pnfs_generic_commit_release(data);
return -EAGAIN;
}
-static int
-transfer_commit_list(struct list_head *src, struct list_head *dst,
- struct nfs_commit_info *cinfo, int max)
-{
- struct nfs_page *req, *tmp;
- int ret = 0;
-
- list_for_each_entry_safe(req, tmp, src, wb_list) {
- if (!nfs_lock_request(req))
- continue;
- kref_get(&req->wb_kref);
- if (cond_resched_lock(cinfo->lock))
- list_safe_reset_next(req, tmp, wb_list);
- nfs_request_remove_commit_list(req, cinfo);
- clear_bit(PG_COMMIT_TO_DS, &req->wb_flags);
- nfs_list_add_request(req, dst);
- ret++;
- if ((ret == max) && !cinfo->dreq)
- break;
- }
- return ret;
-}
-
-/* Note called with cinfo->lock held. */
-static int
-filelayout_scan_ds_commit_list(struct pnfs_commit_bucket *bucket,
- struct nfs_commit_info *cinfo,
- int max)
-{
- struct list_head *src = &bucket->written;
- struct list_head *dst = &bucket->committing;
- int ret;
-
- ret = transfer_commit_list(src, dst, cinfo, max);
- if (ret) {
- cinfo->ds->nwritten -= ret;
- cinfo->ds->ncommitting += ret;
- bucket->clseg = bucket->wlseg;
- if (list_empty(src))
- bucket->wlseg = NULL;
- else
- pnfs_get_lseg(bucket->clseg);
- }
- return ret;
-}
-
-/* Move reqs from written to committing lists, returning count of number moved.
- * Note called with cinfo->lock held.
- */
-static int filelayout_scan_commit_lists(struct nfs_commit_info *cinfo,
- int max)
-{
- int i, rv = 0, cnt;
-
- for (i = 0; i < cinfo->ds->nbuckets && max != 0; i++) {
- cnt = filelayout_scan_ds_commit_list(&cinfo->ds->buckets[i],
- cinfo, max);
- max -= cnt;
- rv += cnt;
- }
- return rv;
-}
-
-/* Pull everything off the committing lists and dump into @dst */
-static void filelayout_recover_commit_reqs(struct list_head *dst,
- struct nfs_commit_info *cinfo)
-{
- struct pnfs_commit_bucket *b;
- struct pnfs_layout_segment *freeme;
- int i;
-
-restart:
- spin_lock(cinfo->lock);
- for (i = 0, b = cinfo->ds->buckets; i < cinfo->ds->nbuckets; i++, b++) {
- if (transfer_commit_list(&b->written, dst, cinfo, 0)) {
- freeme = b->wlseg;
- b->wlseg = NULL;
- spin_unlock(cinfo->lock);
- pnfs_put_lseg(freeme);
- goto restart;
- }
- }
- cinfo->ds->nwritten = 0;
- spin_unlock(cinfo->lock);
-}
-
/* filelayout_search_commit_reqs - Search lists in @cinfo for the head reqest
* for @page
* @cinfo - commit info for current inode
@@ -1263,108 +1098,14 @@ filelayout_search_commit_reqs(struct nfs_commit_info *cinfo, struct page *page)
return NULL;
}
-static void filelayout_retry_commit(struct nfs_commit_info *cinfo, int idx)
-{
- struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
- struct pnfs_commit_bucket *bucket;
- struct pnfs_layout_segment *freeme;
- int i;
-
- for (i = idx; i < fl_cinfo->nbuckets; i++) {
- bucket = &fl_cinfo->buckets[i];
- if (list_empty(&bucket->committing))
- continue;
- nfs_retry_commit(&bucket->committing, bucket->clseg, cinfo);
- spin_lock(cinfo->lock);
- freeme = bucket->clseg;
- bucket->clseg = NULL;
- spin_unlock(cinfo->lock);
- pnfs_put_lseg(freeme);
- }
-}
-
-static unsigned int
-alloc_ds_commits(struct nfs_commit_info *cinfo, struct list_head *list)
-{
- struct pnfs_ds_commit_info *fl_cinfo;
- struct pnfs_commit_bucket *bucket;
- struct nfs_commit_data *data;
- int i;
- unsigned int nreq = 0;
-
- fl_cinfo = cinfo->ds;
- bucket = fl_cinfo->buckets;
- for (i = 0; i < fl_cinfo->nbuckets; i++, bucket++) {
- if (list_empty(&bucket->committing))
- continue;
- data = nfs_commitdata_alloc();
- if (!data)
- break;
- data->ds_commit_index = i;
- spin_lock(cinfo->lock);
- data->lseg = bucket->clseg;
- bucket->clseg = NULL;
- spin_unlock(cinfo->lock);
- list_add(&data->pages, list);
- nreq++;
- }
-
- /* Clean up on error */
- filelayout_retry_commit(cinfo, i);
- /* Caller will clean up entries put on list */
- return nreq;
-}
-
-/* This follows nfs_commit_list pretty closely */
static int
filelayout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
int how, struct nfs_commit_info *cinfo)
{
- struct nfs_commit_data *data, *tmp;
- LIST_HEAD(list);
- unsigned int nreq = 0;
-
- if (!list_empty(mds_pages)) {
- data = nfs_commitdata_alloc();
- if (data != NULL) {
- data->lseg = NULL;
- list_add(&data->pages, &list);
- nreq++;
- } else {
- nfs_retry_commit(mds_pages, NULL, cinfo);
- filelayout_retry_commit(cinfo, 0);
- cinfo->completion_ops->error_cleanup(NFS_I(inode));
- return -ENOMEM;
- }
- }
-
- nreq += alloc_ds_commits(cinfo, &list);
-
- if (nreq == 0) {
- cinfo->completion_ops->error_cleanup(NFS_I(inode));
- goto out;
- }
-
- atomic_add(nreq, &cinfo->mds->rpcs_out);
-
- list_for_each_entry_safe(data, tmp, &list, pages) {
- list_del_init(&data->pages);
- if (!data->lseg) {
- nfs_init_commit(data, mds_pages, NULL, cinfo);
- nfs_initiate_commit(NFS_CLIENT(inode), data,
- data->mds_ops, how, 0);
- } else {
- struct pnfs_commit_bucket *buckets;
-
- buckets = cinfo->ds->buckets;
- nfs_init_commit(data, &buckets[data->ds_commit_index].committing, data->lseg, cinfo);
- filelayout_initiate_commit(data, how);
- }
- }
-out:
- cinfo->ds->ncommitting = 0;
- return PNFS_ATTEMPTED;
+ return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
+ filelayout_initiate_commit);
}
+
static struct nfs4_deviceid_node *
filelayout_alloc_deviceid_node(struct nfs_server *server,
struct pnfs_device *pdev, gfp_t gfp_flags)
@@ -1421,9 +1162,9 @@ static struct pnfs_layoutdriver_type filelayout_type = {
.pg_write_ops = &filelayout_pg_write_ops,
.get_ds_info = &filelayout_get_ds_info,
.mark_request_commit = filelayout_mark_request_commit,
- .clear_request_commit = filelayout_clear_request_commit,
- .scan_commit_lists = filelayout_scan_commit_lists,
- .recover_commit_reqs = filelayout_recover_commit_reqs,
+ .clear_request_commit = pnfs_generic_clear_request_commit,
+ .scan_commit_lists = pnfs_generic_scan_commit_lists,
+ .recover_commit_reqs = pnfs_generic_recover_commit_reqs,
.search_commit_reqs = filelayout_search_commit_reqs,
.commit_pagelist = filelayout_commit_pagelist,
.read_pagelist = filelayout_read_pagelist,
diff --git a/fs/nfs/filelayout/filelayout.h b/fs/nfs/filelayout/filelayout.h
index 7c9f800..2896cb8 100644
--- a/fs/nfs/filelayout/filelayout.h
+++ b/fs/nfs/filelayout/filelayout.h
@@ -33,13 +33,6 @@
#include "../pnfs.h"
/*
- * Default data server connection timeout and retrans vaules.
- * Set by module paramters dataserver_timeo and dataserver_retrans.
- */
-#define NFS4_DEF_DS_TIMEO 600 /* in tenths of a second */
-#define NFS4_DEF_DS_RETRANS 5
-
-/*
* Field testing shows we need to support up to 4096 stripe indices.
* We store each index as a u8 (u32 on the wire) to keep the memory footprint
* reasonable. This in turn means we support a maximum of 256
@@ -48,32 +41,11 @@
#define NFS4_PNFS_MAX_STRIPE_CNT 4096
#define NFS4_PNFS_MAX_MULTI_CNT 256 /* 256 fit into a u8 stripe_index */
-/* error codes for internal use */
-#define NFS4ERR_RESET_TO_MDS 12001
-
enum stripetype4 {
STRIPE_SPARSE = 1,
STRIPE_DENSE = 2
};
-/* Individual ip address */
-struct nfs4_pnfs_ds_addr {
- struct sockaddr_storage da_addr;
- size_t da_addrlen;
- struct list_head da_node; /* nfs4_pnfs_dev_hlist dev_dslist */
- char *da_remotestr; /* human readable addr+port */
-};
-
-struct nfs4_pnfs_ds {
- struct list_head ds_node; /* nfs4_pnfs_dev_hlist dev_dslist */
- char *ds_remotestr; /* comma sep list of addrs */
- struct list_head ds_addrs;
- struct nfs_client *ds_clp;
- atomic_t ds_count;
- unsigned long ds_state;
-#define NFS4DS_CONNECTING 0 /* ds is establishing connection */
-};
-
struct nfs4_file_layout_dsaddr {
struct nfs4_deviceid_node id_node;
u32 stripe_count;
@@ -119,17 +91,6 @@ FILELAYOUT_DEVID_NODE(struct pnfs_layout_segment *lseg)
return &FILELAYOUT_LSEG(lseg)->dsaddr->id_node;
}
-static inline void
-filelayout_mark_devid_invalid(struct nfs4_deviceid_node *node)
-{
- u32 *p = (u32 *)&node->deviceid;
-
- printk(KERN_WARNING "NFS: Deviceid [%x%x%x%x] marked out of use.\n",
- p[0], p[1], p[2], p[3]);
-
- set_bit(NFS_DEVICEID_INVALID, &node->flags);
-}
-
static inline bool
filelayout_test_devid_invalid(struct nfs4_deviceid_node *node)
{
@@ -142,7 +103,6 @@ filelayout_test_devid_unavailable(struct nfs4_deviceid_node *node);
extern struct nfs_fh *
nfs4_fl_select_ds_fh(struct pnfs_layout_segment *lseg, u32 j);
-extern void print_ds(struct nfs4_pnfs_ds *ds);
u32 nfs4_fl_calc_j_index(struct pnfs_layout_segment *lseg, loff_t offset);
u32 nfs4_fl_calc_ds_index(struct pnfs_layout_segment *lseg, u32 j);
struct nfs4_pnfs_ds *nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg,
diff --git a/fs/nfs/filelayout/filelayoutdev.c b/fs/nfs/filelayout/filelayoutdev.c
index bfecac7..4f372e2 100644
--- a/fs/nfs/filelayout/filelayoutdev.c
+++ b/fs/nfs/filelayout/filelayoutdev.c
@@ -31,7 +31,6 @@
#include <linux/nfs_fs.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
-#include <linux/sunrpc/addr.h>
#include "../internal.h"
#include "../nfs4session.h"
@@ -42,183 +41,6 @@
static unsigned int dataserver_timeo = NFS4_DEF_DS_TIMEO;
static unsigned int dataserver_retrans = NFS4_DEF_DS_RETRANS;
-/*
- * Data server cache
- *
- * Data servers can be mapped to different device ids.
- * nfs4_pnfs_ds reference counting
- * - set to 1 on allocation
- * - incremented when a device id maps a data server already in the cache.
- * - decremented when deviceid is removed from the cache.
- */
-static DEFINE_SPINLOCK(nfs4_ds_cache_lock);
-static LIST_HEAD(nfs4_data_server_cache);
-
-/* Debug routines */
-void
-print_ds(struct nfs4_pnfs_ds *ds)
-{
- if (ds == NULL) {
- printk("%s NULL device\n", __func__);
- return;
- }
- printk(" ds %s\n"
- " ref count %d\n"
- " client %p\n"
- " cl_exchange_flags %x\n",
- ds->ds_remotestr,
- atomic_read(&ds->ds_count), ds->ds_clp,
- ds->ds_clp ? ds->ds_clp->cl_exchange_flags : 0);
-}
-
-static bool
-same_sockaddr(struct sockaddr *addr1, struct sockaddr *addr2)
-{
- struct sockaddr_in *a, *b;
- struct sockaddr_in6 *a6, *b6;
-
- if (addr1->sa_family != addr2->sa_family)
- return false;
-
- switch (addr1->sa_family) {
- case AF_INET:
- a = (struct sockaddr_in *)addr1;
- b = (struct sockaddr_in *)addr2;
-
- if (a->sin_addr.s_addr == b->sin_addr.s_addr &&
- a->sin_port == b->sin_port)
- return true;
- break;
-
- case AF_INET6:
- a6 = (struct sockaddr_in6 *)addr1;
- b6 = (struct sockaddr_in6 *)addr2;
-
- /* LINKLOCAL addresses must have matching scope_id */
- if (ipv6_addr_src_scope(&a6->sin6_addr) ==
- IPV6_ADDR_SCOPE_LINKLOCAL &&
- a6->sin6_scope_id != b6->sin6_scope_id)
- return false;
-
- if (ipv6_addr_equal(&a6->sin6_addr, &b6->sin6_addr) &&
- a6->sin6_port == b6->sin6_port)
- return true;
- break;
-
- default:
- dprintk("%s: unhandled address family: %u\n",
- __func__, addr1->sa_family);
- return false;
- }
-
- return false;
-}
-
-static bool
-_same_data_server_addrs_locked(const struct list_head *dsaddrs1,
- const struct list_head *dsaddrs2)
-{
- struct nfs4_pnfs_ds_addr *da1, *da2;
-
- /* step through both lists, comparing as we go */
- for (da1 = list_first_entry(dsaddrs1, typeof(*da1), da_node),
- da2 = list_first_entry(dsaddrs2, typeof(*da2), da_node);
- da1 != NULL && da2 != NULL;
- da1 = list_entry(da1->da_node.next, typeof(*da1), da_node),
- da2 = list_entry(da2->da_node.next, typeof(*da2), da_node)) {
- if (!same_sockaddr((struct sockaddr *)&da1->da_addr,
- (struct sockaddr *)&da2->da_addr))
- return false;
- }
- if (da1 == NULL && da2 == NULL)
- return true;
-
- return false;
-}
-
-/*
- * Lookup DS by addresses. nfs4_ds_cache_lock is held
- */
-static struct nfs4_pnfs_ds *
-_data_server_lookup_locked(const struct list_head *dsaddrs)
-{
- struct nfs4_pnfs_ds *ds;
-
- list_for_each_entry(ds, &nfs4_data_server_cache, ds_node)
- if (_same_data_server_addrs_locked(&ds->ds_addrs, dsaddrs))
- return ds;
- return NULL;
-}
-
-/*
- * Create an rpc connection to the nfs4_pnfs_ds data server
- * Currently only supports IPv4 and IPv6 addresses
- */
-static int
-nfs4_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds)
-{
- struct nfs_client *clp = ERR_PTR(-EIO);
- struct nfs4_pnfs_ds_addr *da;
- int status = 0;
-
- dprintk("--> %s DS %s au_flavor %d\n", __func__, ds->ds_remotestr,
- mds_srv->nfs_client->cl_rpcclient->cl_auth->au_flavor);
-
- list_for_each_entry(da, &ds->ds_addrs, da_node) {
- dprintk("%s: DS %s: trying address %s\n",
- __func__, ds->ds_remotestr, da->da_remotestr);
-
- clp = nfs4_set_ds_client(mds_srv->nfs_client,
- (struct sockaddr *)&da->da_addr,
- da->da_addrlen, IPPROTO_TCP,
- dataserver_timeo, dataserver_retrans);
- if (!IS_ERR(clp))
- break;
- }
-
- if (IS_ERR(clp)) {
- status = PTR_ERR(clp);
- goto out;
- }
-
- status = nfs4_init_ds_session(clp, mds_srv->nfs_client->cl_lease_time);
- if (status)
- goto out_put;
-
- smp_wmb();
- ds->ds_clp = clp;
- dprintk("%s [new] addr: %s\n", __func__, ds->ds_remotestr);
-out:
- return status;
-out_put:
- nfs_put_client(clp);
- goto out;
-}
-
-static void
-destroy_ds(struct nfs4_pnfs_ds *ds)
-{
- struct nfs4_pnfs_ds_addr *da;
-
- dprintk("--> %s\n", __func__);
- ifdebug(FACILITY)
- print_ds(ds);
-
- nfs_put_client(ds->ds_clp);
-
- while (!list_empty(&ds->ds_addrs)) {
- da = list_first_entry(&ds->ds_addrs,
- struct nfs4_pnfs_ds_addr,
- da_node);
- list_del_init(&da->da_node);
- kfree(da->da_remotestr);
- kfree(da);
- }
-
- kfree(ds->ds_remotestr);
- kfree(ds);
-}
-
void
nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr)
{
@@ -229,259 +51,13 @@ nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr)
for (i = 0; i < dsaddr->ds_num; i++) {
ds = dsaddr->ds_list[i];
- if (ds != NULL) {
- if (atomic_dec_and_lock(&ds->ds_count,
- &nfs4_ds_cache_lock)) {
- list_del_init(&ds->ds_node);
- spin_unlock(&nfs4_ds_cache_lock);
- destroy_ds(ds);
- }
- }
+ if (ds != NULL)
+ nfs4_pnfs_ds_put(ds);
}
kfree(dsaddr->stripe_indices);
kfree(dsaddr);
}
-/*
- * Create a string with a human readable address and port to avoid
- * complicated setup around many dprinks.
- */
-static char *
-nfs4_pnfs_remotestr(struct list_head *dsaddrs, gfp_t gfp_flags)
-{
- struct nfs4_pnfs_ds_addr *da;
- char *remotestr;
- size_t len;
- char *p;
-
- len = 3; /* '{', '}' and eol */
- list_for_each_entry(da, dsaddrs, da_node) {
- len += strlen(da->da_remotestr) + 1; /* string plus comma */
- }
-
- remotestr = kzalloc(len, gfp_flags);
- if (!remotestr)
- return NULL;
-
- p = remotestr;
- *(p++) = '{';
- len--;
- list_for_each_entry(da, dsaddrs, da_node) {
- size_t ll = strlen(da->da_remotestr);
-
- if (ll > len)
- goto out_err;
-
- memcpy(p, da->da_remotestr, ll);
- p += ll;
- len -= ll;
-
- if (len < 1)
- goto out_err;
- (*p++) = ',';
- len--;
- }
- if (len < 2)
- goto out_err;
- *(p++) = '}';
- *p = '\0';
- return remotestr;
-out_err:
- kfree(remotestr);
- return NULL;
-}
-
-static struct nfs4_pnfs_ds *
-nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags)
-{
- struct nfs4_pnfs_ds *tmp_ds, *ds = NULL;
- char *remotestr;
-
- if (list_empty(dsaddrs)) {
- dprintk("%s: no addresses defined\n", __func__);
- goto out;
- }
-
- ds = kzalloc(sizeof(*ds), gfp_flags);
- if (!ds)
- goto out;
-
- /* this is only used for debugging, so it's ok if its NULL */
- remotestr = nfs4_pnfs_remotestr(dsaddrs, gfp_flags);
-
- spin_lock(&nfs4_ds_cache_lock);
- tmp_ds = _data_server_lookup_locked(dsaddrs);
- if (tmp_ds == NULL) {
- INIT_LIST_HEAD(&ds->ds_addrs);
- list_splice_init(dsaddrs, &ds->ds_addrs);
- ds->ds_remotestr = remotestr;
- atomic_set(&ds->ds_count, 1);
- INIT_LIST_HEAD(&ds->ds_node);
- ds->ds_clp = NULL;
- list_add(&ds->ds_node, &nfs4_data_server_cache);
- dprintk("%s add new data server %s\n", __func__,
- ds->ds_remotestr);
- } else {
- kfree(remotestr);
- kfree(ds);
- atomic_inc(&tmp_ds->ds_count);
- dprintk("%s data server %s found, inc'ed ds_count to %d\n",
- __func__, tmp_ds->ds_remotestr,
- atomic_read(&tmp_ds->ds_count));
- ds = tmp_ds;
- }
- spin_unlock(&nfs4_ds_cache_lock);
-out:
- return ds;
-}
-
-/*
- * Currently only supports ipv4, ipv6 and one multi-path address.
- */
-static struct nfs4_pnfs_ds_addr *
-decode_ds_addr(struct net *net, struct xdr_stream *streamp, gfp_t gfp_flags)
-{
- struct nfs4_pnfs_ds_addr *da = NULL;
- char *buf, *portstr;
- __be16 port;
- int nlen, rlen;
- int tmp[2];
- __be32 *p;
- char *netid, *match_netid;
- size_t len, match_netid_len;
- char *startsep = "";
- char *endsep = "";
-
-
- /* r_netid */
- p = xdr_inline_decode(streamp, 4);
- if (unlikely(!p))
- goto out_err;
- nlen = be32_to_cpup(p++);
-
- p = xdr_inline_decode(streamp, nlen);
- if (unlikely(!p))
- goto out_err;
-
- netid = kmalloc(nlen+1, gfp_flags);
- if (unlikely(!netid))
- goto out_err;
-
- netid[nlen] = '\0';
- memcpy(netid, p, nlen);
-
- /* r_addr: ip/ip6addr with port in dec octets - see RFC 5665 */
- p = xdr_inline_decode(streamp, 4);
- if (unlikely(!p))
- goto out_free_netid;
- rlen = be32_to_cpup(p);
-
- p = xdr_inline_decode(streamp, rlen);
- if (unlikely(!p))
- goto out_free_netid;
-
- /* port is ".ABC.DEF", 8 chars max */
- if (rlen > INET6_ADDRSTRLEN + IPV6_SCOPE_ID_LEN + 8) {
- dprintk("%s: Invalid address, length %d\n", __func__,
- rlen);
- goto out_free_netid;
- }
- buf = kmalloc(rlen + 1, gfp_flags);
- if (!buf) {
- dprintk("%s: Not enough memory\n", __func__);
- goto out_free_netid;
- }
- buf[rlen] = '\0';
- memcpy(buf, p, rlen);
-
- /* replace port '.' with '-' */
- portstr = strrchr(buf, '.');
- if (!portstr) {
- dprintk("%s: Failed finding expected dot in port\n",
- __func__);
- goto out_free_buf;
- }
- *portstr = '-';
-
- /* find '.' between address and port */
- portstr = strrchr(buf, '.');
- if (!portstr) {
- dprintk("%s: Failed finding expected dot between address and "
- "port\n", __func__);
- goto out_free_buf;
- }
- *portstr = '\0';
-
- da = kzalloc(sizeof(*da), gfp_flags);
- if (unlikely(!da))
- goto out_free_buf;
-
- INIT_LIST_HEAD(&da->da_node);
-
- if (!rpc_pton(net, buf, portstr-buf, (struct sockaddr *)&da->da_addr,
- sizeof(da->da_addr))) {
- dprintk("%s: error parsing address %s\n", __func__, buf);
- goto out_free_da;
- }
-
- portstr++;
- sscanf(portstr, "%d-%d", &tmp[0], &tmp[1]);
- port = htons((tmp[0] << 8) | (tmp[1]));
-
- switch (da->da_addr.ss_family) {
- case AF_INET:
- ((struct sockaddr_in *)&da->da_addr)->sin_port = port;
- da->da_addrlen = sizeof(struct sockaddr_in);
- match_netid = "tcp";
- match_netid_len = 3;
- break;
-
- case AF_INET6:
- ((struct sockaddr_in6 *)&da->da_addr)->sin6_port = port;
- da->da_addrlen = sizeof(struct sockaddr_in6);
- match_netid = "tcp6";
- match_netid_len = 4;
- startsep = "[";
- endsep = "]";
- break;
-
- default:
- dprintk("%s: unsupported address family: %u\n",
- __func__, da->da_addr.ss_family);
- goto out_free_da;
- }
-
- if (nlen != match_netid_len || strncmp(netid, match_netid, nlen)) {
- dprintk("%s: ERROR: r_netid \"%s\" != \"%s\"\n",
- __func__, netid, match_netid);
- goto out_free_da;
- }
-
- /* save human readable address */
- len = strlen(startsep) + strlen(buf) + strlen(endsep) + 7;
- da->da_remotestr = kzalloc(len, gfp_flags);
-
- /* NULL is ok, only used for dprintk */
- if (da->da_remotestr)
- snprintf(da->da_remotestr, len, "%s%s%s:%u", startsep,
- buf, endsep, ntohs(port));
-
- dprintk("%s: Parsed DS addr %s\n", __func__, da->da_remotestr);
- kfree(buf);
- kfree(netid);
- return da;
-
-out_free_da:
- kfree(da);
-out_free_buf:
- dprintk("%s: Error parsing DS addr: %s\n", __func__, buf);
- kfree(buf);
-out_free_netid:
- kfree(netid);
-out_err:
- return NULL;
-}
-
/* Decode opaque device data and return the result */
struct nfs4_file_layout_dsaddr *
nfs4_fl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
@@ -584,8 +160,8 @@ nfs4_fl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
mp_count = be32_to_cpup(p); /* multipath count */
for (j = 0; j < mp_count; j++) {
- da = decode_ds_addr(server->nfs_client->cl_net,
- &stream, gfp_flags);
+ da = nfs4_decode_mp_ds_addr(server->nfs_client->cl_net,
+ &stream, gfp_flags);
if (da)
list_add_tail(&da->da_node, &dsaddrs);
}
@@ -681,22 +257,7 @@ nfs4_fl_select_ds_fh(struct pnfs_layout_segment *lseg, u32 j)
return flseg->fh_array[i];
}
-static void nfs4_wait_ds_connect(struct nfs4_pnfs_ds *ds)
-{
- might_sleep();
- wait_on_bit_action(&ds->ds_state, NFS4DS_CONNECTING,
- nfs_wait_bit_killable, TASK_KILLABLE);
-}
-
-static void nfs4_clear_ds_conn_bit(struct nfs4_pnfs_ds *ds)
-{
- smp_mb__before_atomic();
- clear_bit(NFS4DS_CONNECTING, &ds->ds_state);
- smp_mb__after_atomic();
- wake_up_bit(&ds->ds_state, NFS4DS_CONNECTING);
-}
-
-
+/* Upon return, either ds is connected, or ds is NULL */
struct nfs4_pnfs_ds *
nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
{
@@ -704,29 +265,23 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
struct nfs4_pnfs_ds *ds = dsaddr->ds_list[ds_idx];
struct nfs4_deviceid_node *devid = FILELAYOUT_DEVID_NODE(lseg);
struct nfs4_pnfs_ds *ret = ds;
+ struct nfs_server *s = NFS_SERVER(lseg->pls_layout->plh_inode);
if (ds == NULL) {
printk(KERN_ERR "NFS: %s: No data server for offset index %d\n",
__func__, ds_idx);
- filelayout_mark_devid_invalid(devid);
+ pnfs_generic_mark_devid_invalid(devid);
goto out;
}
smp_rmb();
if (ds->ds_clp)
goto out_test_devid;
- if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) {
- struct nfs_server *s = NFS_SERVER(lseg->pls_layout->plh_inode);
- int err;
-
- err = nfs4_ds_connect(s, ds);
- if (err)
- nfs4_mark_deviceid_unavailable(devid);
- nfs4_clear_ds_conn_bit(ds);
- } else {
- /* Either ds is connected, or ds is NULL */
- nfs4_wait_ds_connect(ds);
- }
+ nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo,
+ dataserver_retrans, 4,
+ s->nfs_client->cl_minorversion,
+ s->nfs_client->cl_rpcclient->cl_auth->au_flavor);
+
out_test_devid:
if (filelayout_test_devid_unavailable(devid))
ret = NULL;
diff --git a/fs/nfs/flexfilelayout/Makefile b/fs/nfs/flexfilelayout/Makefile
new file mode 100644
index 0000000..1d2c9f6
--- /dev/null
+++ b/fs/nfs/flexfilelayout/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the pNFS Flexfile Layout Driver kernel module
+#
+obj-$(CONFIG_PNFS_FLEXFILE_LAYOUT) += nfs_layout_flexfiles.o
+nfs_layout_flexfiles-y := flexfilelayout.o flexfilelayoutdev.o
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
new file mode 100644
index 0000000..c22ecaa
--- /dev/null
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -0,0 +1,1574 @@
+/*
+ * Module for pnfs flexfile layout driver.
+ *
+ * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
+ *
+ * Tao Peng <bergwolf@primarydata.com>
+ */
+
+#include <linux/nfs_fs.h>
+#include <linux/nfs_page.h>
+#include <linux/module.h>
+
+#include <linux/sunrpc/metrics.h>
+#include <linux/nfs_idmap.h>
+
+#include "flexfilelayout.h"
+#include "../nfs4session.h"
+#include "../internal.h"
+#include "../delegation.h"
+#include "../nfs4trace.h"
+#include "../iostat.h"
+#include "../nfs.h"
+
+#define NFSDBG_FACILITY NFSDBG_PNFS_LD
+
+#define FF_LAYOUT_POLL_RETRY_MAX (15*HZ)
+
+static struct pnfs_layout_hdr *
+ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
+{
+ struct nfs4_flexfile_layout *ffl;
+
+ ffl = kzalloc(sizeof(*ffl), gfp_flags);
+ if (ffl) {
+ INIT_LIST_HEAD(&ffl->error_list);
+ return &ffl->generic_hdr;
+ } else
+ return NULL;
+}
+
+static void
+ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo)
+{
+ struct nfs4_ff_layout_ds_err *err, *n;
+
+ list_for_each_entry_safe(err, n, &FF_LAYOUT_FROM_HDR(lo)->error_list,
+ list) {
+ list_del(&err->list);
+ kfree(err);
+ }
+ kfree(FF_LAYOUT_FROM_HDR(lo));
+}
+
+static int decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
+ if (unlikely(p == NULL))
+ return -ENOBUFS;
+ memcpy(stateid, p, NFS4_STATEID_SIZE);
+ dprintk("%s: stateid id= [%x%x%x%x]\n", __func__,
+ p[0], p[1], p[2], p[3]);
+ return 0;
+}
+
+static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
+ if (unlikely(!p))
+ return -ENOBUFS;
+ memcpy(devid, p, NFS4_DEVICEID4_SIZE);
+ nfs4_print_deviceid(devid);
+ return 0;
+}
+
+static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ return -ENOBUFS;
+ fh->size = be32_to_cpup(p++);
+ if (fh->size > sizeof(struct nfs_fh)) {
+ printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
+ fh->size);
+ return -EOVERFLOW;
+ }
+ /* fh.data */
+ p = xdr_inline_decode(xdr, fh->size);
+ if (unlikely(!p))
+ return -ENOBUFS;
+ memcpy(&fh->data, p, fh->size);
+ dprintk("%s: fh len %d\n", __func__, fh->size);
+
+ return 0;
+}
+
+/*
+ * Currently only stringified uids and gids are accepted.
+ * I.e., kerberos is not supported to the DSes, so no pricipals.
+ *
+ * That means that one common function will suffice, but when
+ * principals are added, this should be split to accomodate
+ * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid().
+ */
+static int
+decode_name(struct xdr_stream *xdr, u32 *id)
+{
+ __be32 *p;
+ int len;
+
+ /* opaque_length(4)*/
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ return -ENOBUFS;
+ len = be32_to_cpup(p++);
+ if (len < 0)
+ return -EINVAL;
+
+ dprintk("%s: len %u\n", __func__, len);
+
+ /* opaque body */
+ p = xdr_inline_decode(xdr, len);
+ if (unlikely(!p))
+ return -ENOBUFS;
+
+ if (!nfs_map_string_to_numeric((char *)p, len, id))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls)
+{
+ int i;
+
+ if (fls->mirror_array) {
+ for (i = 0; i < fls->mirror_array_cnt; i++) {
+ /* normally mirror_ds is freed in
+ * .free_deviceid_node but we still do it here
+ * for .alloc_lseg error path */
+ if (fls->mirror_array[i]) {
+ kfree(fls->mirror_array[i]->fh_versions);
+ nfs4_ff_layout_put_deviceid(fls->mirror_array[i]->mirror_ds);
+ kfree(fls->mirror_array[i]);
+ }
+ }
+ kfree(fls->mirror_array);
+ fls->mirror_array = NULL;
+ }
+}
+
+static int ff_layout_check_layout(struct nfs4_layoutget_res *lgr)
+{
+ int ret = 0;
+
+ dprintk("--> %s\n", __func__);
+
+ /* FIXME: remove this check when layout segment support is added */
+ if (lgr->range.offset != 0 ||
+ lgr->range.length != NFS4_MAX_UINT64) {
+ dprintk("%s Only whole file layouts supported. Use MDS i/o\n",
+ __func__);
+ ret = -EINVAL;
+ }
+
+ dprintk("--> %s returns %d\n", __func__, ret);
+ return ret;
+}
+
+static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)
+{
+ if (fls) {
+ ff_layout_free_mirror_array(fls);
+ kfree(fls);
+ }
+}
+
+static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
+{
+ struct nfs4_ff_layout_mirror *tmp;
+ int i, j;
+
+ for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
+ for (j = i + 1; j < fls->mirror_array_cnt; j++)
+ if (fls->mirror_array[i]->efficiency <
+ fls->mirror_array[j]->efficiency) {
+ tmp = fls->mirror_array[i];
+ fls->mirror_array[i] = fls->mirror_array[j];
+ fls->mirror_array[j] = tmp;
+ }
+ }
+}
+
+static struct pnfs_layout_segment *
+ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
+ struct nfs4_layoutget_res *lgr,
+ gfp_t gfp_flags)
+{
+ struct pnfs_layout_segment *ret;
+ struct nfs4_ff_layout_segment *fls = NULL;
+ struct xdr_stream stream;
+ struct xdr_buf buf;
+ struct page *scratch;
+ u64 stripe_unit;
+ u32 mirror_array_cnt;
+ __be32 *p;
+ int i, rc;
+
+ dprintk("--> %s\n", __func__);
+ scratch = alloc_page(gfp_flags);
+ if (!scratch)
+ return ERR_PTR(-ENOMEM);
+
+ xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages,
+ lgr->layoutp->len);
+ xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
+
+ /* stripe unit and mirror_array_cnt */
+ rc = -EIO;
+ p = xdr_inline_decode(&stream, 8 + 4);
+ if (!p)
+ goto out_err_free;
+
+ p = xdr_decode_hyper(p, &stripe_unit);
+ mirror_array_cnt = be32_to_cpup(p++);
+ dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__,
+ stripe_unit, mirror_array_cnt);
+
+ if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT ||
+ mirror_array_cnt == 0)
+ goto out_err_free;
+
+ rc = -ENOMEM;
+ fls = kzalloc(sizeof(*fls), gfp_flags);
+ if (!fls)
+ goto out_err_free;
+
+ fls->mirror_array_cnt = mirror_array_cnt;
+ fls->stripe_unit = stripe_unit;
+ fls->mirror_array = kcalloc(fls->mirror_array_cnt,
+ sizeof(fls->mirror_array[0]), gfp_flags);
+ if (fls->mirror_array == NULL)
+ goto out_err_free;
+
+ for (i = 0; i < fls->mirror_array_cnt; i++) {
+ struct nfs4_deviceid devid;
+ struct nfs4_deviceid_node *idnode;
+ u32 ds_count;
+ u32 fh_count;
+ int j;
+
+ rc = -EIO;
+ p = xdr_inline_decode(&stream, 4);
+ if (!p)
+ goto out_err_free;
+ ds_count = be32_to_cpup(p);
+
+ /* FIXME: allow for striping? */
+ if (ds_count != 1)
+ goto out_err_free;
+
+ fls->mirror_array[i] =
+ kzalloc(sizeof(struct nfs4_ff_layout_mirror),
+ gfp_flags);
+ if (fls->mirror_array[i] == NULL) {
+ rc = -ENOMEM;
+ goto out_err_free;
+ }
+
+ spin_lock_init(&fls->mirror_array[i]->lock);
+ fls->mirror_array[i]->ds_count = ds_count;
+
+ /* deviceid */
+ rc = decode_deviceid(&stream, &devid);
+ if (rc)
+ goto out_err_free;
+
+ idnode = nfs4_find_get_deviceid(NFS_SERVER(lh->plh_inode),
+ &devid, lh->plh_lc_cred,
+ gfp_flags);
+ /*
+ * upon success, mirror_ds is allocated by previous
+ * getdeviceinfo, or newly by .alloc_deviceid_node
+ * nfs4_find_get_deviceid failure is indeed getdeviceinfo falure
+ */
+ if (idnode)
+ fls->mirror_array[i]->mirror_ds =
+ FF_LAYOUT_MIRROR_DS(idnode);
+ else
+ goto out_err_free;
+
+ /* efficiency */
+ rc = -EIO;
+ p = xdr_inline_decode(&stream, 4);
+ if (!p)
+ goto out_err_free;
+ fls->mirror_array[i]->efficiency = be32_to_cpup(p);
+
+ /* stateid */
+ rc = decode_stateid(&stream, &fls->mirror_array[i]->stateid);
+ if (rc)
+ goto out_err_free;
+
+ /* fh */
+ p = xdr_inline_decode(&stream, 4);
+ if (!p)
+ goto out_err_free;
+ fh_count = be32_to_cpup(p);
+
+ fls->mirror_array[i]->fh_versions =
+ kzalloc(fh_count * sizeof(struct nfs_fh),
+ gfp_flags);
+ if (fls->mirror_array[i]->fh_versions == NULL) {
+ rc = -ENOMEM;
+ goto out_err_free;
+ }
+
+ for (j = 0; j < fh_count; j++) {
+ rc = decode_nfs_fh(&stream,
+ &fls->mirror_array[i]->fh_versions[j]);
+ if (rc)
+ goto out_err_free;
+ }
+
+ fls->mirror_array[i]->fh_versions_cnt = fh_count;
+
+ /* user */
+ rc = decode_name(&stream, &fls->mirror_array[i]->uid);
+ if (rc)
+ goto out_err_free;
+
+ /* group */
+ rc = decode_name(&stream, &fls->mirror_array[i]->gid);
+ if (rc)
+ goto out_err_free;
+
+ dprintk("%s: uid %d gid %d\n", __func__,
+ fls->mirror_array[i]->uid,
+ fls->mirror_array[i]->gid);
+ }
+
+ ff_layout_sort_mirrors(fls);
+ rc = ff_layout_check_layout(lgr);
+ if (rc)
+ goto out_err_free;
+
+ ret = &fls->generic_hdr;
+ dprintk("<-- %s (success)\n", __func__);
+out_free_page:
+ __free_page(scratch);
+ return ret;
+out_err_free:
+ _ff_layout_free_lseg(fls);
+ ret = ERR_PTR(rc);
+ dprintk("<-- %s (%d)\n", __func__, rc);
+ goto out_free_page;
+}
+
+static bool ff_layout_has_rw_segments(struct pnfs_layout_hdr *layout)
+{
+ struct pnfs_layout_segment *lseg;
+
+ list_for_each_entry(lseg, &layout->plh_segs, pls_list)
+ if (lseg->pls_range.iomode == IOMODE_RW)
+ return true;
+
+ return false;
+}
+
+static void
+ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
+{
+ struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
+ int i;
+
+ dprintk("--> %s\n", __func__);
+
+ for (i = 0; i < fls->mirror_array_cnt; i++) {
+ if (fls->mirror_array[i]) {
+ nfs4_ff_layout_put_deviceid(fls->mirror_array[i]->mirror_ds);
+ fls->mirror_array[i]->mirror_ds = NULL;
+ if (fls->mirror_array[i]->cred) {
+ put_rpccred(fls->mirror_array[i]->cred);
+ fls->mirror_array[i]->cred = NULL;
+ }
+ }
+ }
+
+ if (lseg->pls_range.iomode == IOMODE_RW) {
+ struct nfs4_flexfile_layout *ffl;
+ struct inode *inode;
+
+ ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout);
+ inode = ffl->generic_hdr.plh_inode;
+ spin_lock(&inode->i_lock);
+ if (!ff_layout_has_rw_segments(lseg->pls_layout)) {
+ ffl->commit_info.nbuckets = 0;
+ kfree(ffl->commit_info.buckets);
+ ffl->commit_info.buckets = NULL;
+ }
+ spin_unlock(&inode->i_lock);
+ }
+ _ff_layout_free_lseg(fls);
+}
+
+/* Return 1 until we have multiple lsegs support */
+static int
+ff_layout_get_lseg_count(struct nfs4_ff_layout_segment *fls)
+{
+ return 1;
+}
+
+static int
+ff_layout_alloc_commit_info(struct pnfs_layout_segment *lseg,
+ struct nfs_commit_info *cinfo,
+ gfp_t gfp_flags)
+{
+ struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
+ struct pnfs_commit_bucket *buckets;
+ int size;
+
+ if (cinfo->ds->nbuckets != 0) {
+ /* This assumes there is only one RW lseg per file.
+ * To support multiple lseg per file, we need to
+ * change struct pnfs_commit_bucket to allow dynamic
+ * increasing nbuckets.
+ */
+ return 0;
+ }
+
+ size = ff_layout_get_lseg_count(fls) * FF_LAYOUT_MIRROR_COUNT(lseg);
+
+ buckets = kcalloc(size, sizeof(struct pnfs_commit_bucket),
+ gfp_flags);
+ if (!buckets)
+ return -ENOMEM;
+ else {
+ int i;
+
+ spin_lock(cinfo->lock);
+ if (cinfo->ds->nbuckets != 0)
+ kfree(buckets);
+ else {
+ cinfo->ds->buckets = buckets;
+ cinfo->ds->nbuckets = size;
+ for (i = 0; i < size; i++) {
+ INIT_LIST_HEAD(&buckets[i].written);
+ INIT_LIST_HEAD(&buckets[i].committing);
+ /* mark direct verifier as unset */
+ buckets[i].direct_verf.committed =
+ NFS_INVALID_STABLE_HOW;
+ }
+ }
+ spin_unlock(cinfo->lock);
+ return 0;
+ }
+}
+
+static struct nfs4_pnfs_ds *
+ff_layout_choose_best_ds_for_read(struct nfs_pageio_descriptor *pgio,
+ int *best_idx)
+{
+ struct nfs4_ff_layout_segment *fls;
+ struct nfs4_pnfs_ds *ds;
+ int idx;
+
+ fls = FF_LAYOUT_LSEG(pgio->pg_lseg);
+ /* mirrors are sorted by efficiency */
+ for (idx = 0; idx < fls->mirror_array_cnt; idx++) {
+ ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, idx, false);
+ if (ds) {
+ *best_idx = idx;
+ return ds;
+ }
+ }
+
+ return NULL;
+}
+
+static void
+ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
+ struct nfs_page *req)
+{
+ struct nfs_pgio_mirror *pgm;
+ struct nfs4_ff_layout_mirror *mirror;
+ struct nfs4_pnfs_ds *ds;
+ int ds_idx;
+
+ /* Use full layout for now */
+ if (!pgio->pg_lseg)
+ pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
+ req->wb_context,
+ 0,
+ NFS4_MAX_UINT64,
+ IOMODE_READ,
+ GFP_KERNEL);
+ /* If no lseg, fall back to read through mds */
+ if (pgio->pg_lseg == NULL)
+ goto out_mds;
+
+ ds = ff_layout_choose_best_ds_for_read(pgio, &ds_idx);
+ if (!ds)
+ goto out_mds;
+ mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
+
+ pgio->pg_mirror_idx = ds_idx;
+
+ /* read always uses only one mirror - idx 0 for pgio layer */
+ pgm = &pgio->pg_mirrors[0];
+ pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
+
+ return;
+out_mds:
+ pnfs_put_lseg(pgio->pg_lseg);
+ pgio->pg_lseg = NULL;
+ nfs_pageio_reset_read_mds(pgio);
+}
+
+static void
+ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
+ struct nfs_page *req)
+{
+ struct nfs4_ff_layout_mirror *mirror;
+ struct nfs_pgio_mirror *pgm;
+ struct nfs_commit_info cinfo;
+ struct nfs4_pnfs_ds *ds;
+ int i;
+ int status;
+
+ if (!pgio->pg_lseg)
+ pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
+ req->wb_context,
+ 0,
+ NFS4_MAX_UINT64,
+ IOMODE_RW,
+ GFP_NOFS);
+ /* If no lseg, fall back to write through mds */
+ if (pgio->pg_lseg == NULL)
+ goto out_mds;
+
+ nfs_init_cinfo(&cinfo, pgio->pg_inode, pgio->pg_dreq);
+ status = ff_layout_alloc_commit_info(pgio->pg_lseg, &cinfo, GFP_NOFS);
+ if (status < 0)
+ goto out_mds;
+
+ /* Use a direct mapping of ds_idx to pgio mirror_idx */
+ if (WARN_ON_ONCE(pgio->pg_mirror_count !=
+ FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg)))
+ goto out_mds;
+
+ for (i = 0; i < pgio->pg_mirror_count; i++) {
+ ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, i, true);
+ if (!ds)
+ goto out_mds;
+ pgm = &pgio->pg_mirrors[i];
+ mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
+ pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
+ }
+
+ return;
+
+out_mds:
+ pnfs_put_lseg(pgio->pg_lseg);
+ pgio->pg_lseg = NULL;
+ nfs_pageio_reset_write_mds(pgio);
+}
+
+static unsigned int
+ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
+ struct nfs_page *req)
+{
+ if (!pgio->pg_lseg)
+ pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
+ req->wb_context,
+ 0,
+ NFS4_MAX_UINT64,
+ IOMODE_RW,
+ GFP_NOFS);
+ if (pgio->pg_lseg)
+ return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
+
+ /* no lseg means that pnfs is not in use, so no mirroring here */
+ pnfs_put_lseg(pgio->pg_lseg);
+ pgio->pg_lseg = NULL;
+ nfs_pageio_reset_write_mds(pgio);
+ return 1;
+}
+
+static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
+ .pg_init = ff_layout_pg_init_read,
+ .pg_test = pnfs_generic_pg_test,
+ .pg_doio = pnfs_generic_pg_readpages,
+ .pg_cleanup = pnfs_generic_pg_cleanup,
+};
+
+static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
+ .pg_init = ff_layout_pg_init_write,
+ .pg_test = pnfs_generic_pg_test,
+ .pg_doio = pnfs_generic_pg_writepages,
+ .pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
+ .pg_cleanup = pnfs_generic_pg_cleanup,
+};
+
+static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
+{
+ struct rpc_task *task = &hdr->task;
+
+ pnfs_layoutcommit_inode(hdr->inode, false);
+
+ if (retry_pnfs) {
+ dprintk("%s Reset task %5u for i/o through pNFS "
+ "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
+ hdr->task.tk_pid,
+ hdr->inode->i_sb->s_id,
+ (unsigned long long)NFS_FILEID(hdr->inode),
+ hdr->args.count,
+ (unsigned long long)hdr->args.offset);
+
+ if (!hdr->dreq) {
+ struct nfs_open_context *ctx;
+
+ ctx = nfs_list_entry(hdr->pages.next)->wb_context;
+ set_bit(NFS_CONTEXT_RESEND_WRITES, &ctx->flags);
+ hdr->completion_ops->error_cleanup(&hdr->pages);
+ } else {
+ nfs_direct_set_resched_writes(hdr->dreq);
+ /* fake unstable write to let common nfs resend pages */
+ hdr->verf.committed = NFS_UNSTABLE;
+ hdr->good_bytes = 0;
+ }
+ return;
+ }
+
+ if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
+ dprintk("%s Reset task %5u for i/o through MDS "
+ "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
+ hdr->task.tk_pid,
+ hdr->inode->i_sb->s_id,
+ (unsigned long long)NFS_FILEID(hdr->inode),
+ hdr->args.count,
+ (unsigned long long)hdr->args.offset);
+
+ task->tk_status = pnfs_write_done_resend_to_mds(hdr);
+ }
+}
+
+static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
+{
+ struct rpc_task *task = &hdr->task;
+
+ pnfs_layoutcommit_inode(hdr->inode, false);
+
+ if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
+ dprintk("%s Reset task %5u for i/o through MDS "
+ "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
+ hdr->task.tk_pid,
+ hdr->inode->i_sb->s_id,
+ (unsigned long long)NFS_FILEID(hdr->inode),
+ hdr->args.count,
+ (unsigned long long)hdr->args.offset);
+
+ task->tk_status = pnfs_read_done_resend_to_mds(hdr);
+ }
+}
+
+static int ff_layout_async_handle_error_v4(struct rpc_task *task,
+ struct nfs4_state *state,
+ struct nfs_client *clp,
+ struct pnfs_layout_segment *lseg,
+ int idx)
+{
+ struct pnfs_layout_hdr *lo = lseg->pls_layout;
+ struct inode *inode = lo->plh_inode;
+ struct nfs_server *mds_server = NFS_SERVER(inode);
+
+ struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
+ struct nfs_client *mds_client = mds_server->nfs_client;
+ struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
+
+ if (task->tk_status >= 0)
+ return 0;
+
+ switch (task->tk_status) {
+ /* MDS state errors */
+ case -NFS4ERR_DELEG_REVOKED:
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_BAD_STATEID:
+ if (state == NULL)
+ break;
+ nfs_remove_bad_delegation(state->inode);
+ case -NFS4ERR_OPENMODE:
+ if (state == NULL)
+ break;
+ if (nfs4_schedule_stateid_recovery(mds_server, state) < 0)
+ goto out_bad_stateid;
+ goto wait_on_recovery;
+ case -NFS4ERR_EXPIRED:
+ if (state != NULL) {
+ if (nfs4_schedule_stateid_recovery(mds_server, state) < 0)
+ goto out_bad_stateid;
+ }
+ nfs4_schedule_lease_recovery(mds_client);
+ goto wait_on_recovery;
+ /* DS session errors */
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_BADSLOT:
+ case -NFS4ERR_BAD_HIGH_SLOT:
+ case -NFS4ERR_DEADSESSION:
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ case -NFS4ERR_SEQ_FALSE_RETRY:
+ case -NFS4ERR_SEQ_MISORDERED:
+ dprintk("%s ERROR %d, Reset session. Exchangeid "
+ "flags 0x%x\n", __func__, task->tk_status,
+ clp->cl_exchange_flags);
+ nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
+ break;
+ case -NFS4ERR_DELAY:
+ case -NFS4ERR_GRACE:
+ rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
+ break;
+ case -NFS4ERR_RETRY_UNCACHED_REP:
+ break;
+ /* Invalidate Layout errors */
+ case -NFS4ERR_PNFS_NO_LAYOUT:
+ case -ESTALE: /* mapped NFS4ERR_STALE */
+ case -EBADHANDLE: /* mapped NFS4ERR_BADHANDLE */
+ case -EISDIR: /* mapped NFS4ERR_ISDIR */
+ case -NFS4ERR_FHEXPIRED:
+ case -NFS4ERR_WRONG_TYPE:
+ dprintk("%s Invalid layout error %d\n", __func__,
+ task->tk_status);
+ /*
+ * Destroy layout so new i/o will get a new layout.
+ * Layout will not be destroyed until all current lseg
+ * references are put. Mark layout as invalid to resend failed
+ * i/o and all i/o waiting on the slot table to the MDS until
+ * layout is destroyed and a new valid layout is obtained.
+ */
+ pnfs_destroy_layout(NFS_I(inode));
+ rpc_wake_up(&tbl->slot_tbl_waitq);
+ goto reset;
+ /* RPC connection errors */
+ case -ECONNREFUSED:
+ case -EHOSTDOWN:
+ case -EHOSTUNREACH:
+ case -ENETUNREACH:
+ case -EIO:
+ case -ETIMEDOUT:
+ case -EPIPE:
+ dprintk("%s DS connection error %d\n", __func__,
+ task->tk_status);
+ nfs4_mark_deviceid_unavailable(devid);
+ rpc_wake_up(&tbl->slot_tbl_waitq);
+ /* fall through */
+ default:
+ if (ff_layout_has_available_ds(lseg))
+ return -NFS4ERR_RESET_TO_PNFS;
+reset:
+ dprintk("%s Retry through MDS. Error %d\n", __func__,
+ task->tk_status);
+ return -NFS4ERR_RESET_TO_MDS;
+ }
+out:
+ task->tk_status = 0;
+ return -EAGAIN;
+out_bad_stateid:
+ task->tk_status = -EIO;
+ return 0;
+wait_on_recovery:
+ rpc_sleep_on(&mds_client->cl_rpcwaitq, task, NULL);
+ if (test_bit(NFS4CLNT_MANAGER_RUNNING, &mds_client->cl_state) == 0)
+ rpc_wake_up_queued_task(&mds_client->cl_rpcwaitq, task);
+ goto out;
+}
+
+/* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
+static int ff_layout_async_handle_error_v3(struct rpc_task *task,
+ struct pnfs_layout_segment *lseg,
+ int idx)
+{
+ struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
+
+ if (task->tk_status >= 0)
+ return 0;
+
+ if (task->tk_status != -EJUKEBOX) {
+ dprintk("%s DS connection error %d\n", __func__,
+ task->tk_status);
+ nfs4_mark_deviceid_unavailable(devid);
+ if (ff_layout_has_available_ds(lseg))
+ return -NFS4ERR_RESET_TO_PNFS;
+ else
+ return -NFS4ERR_RESET_TO_MDS;
+ }
+
+ if (task->tk_status == -EJUKEBOX)
+ nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
+ task->tk_status = 0;
+ rpc_restart_call(task);
+ rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
+ return -EAGAIN;
+}
+
+static int ff_layout_async_handle_error(struct rpc_task *task,
+ struct nfs4_state *state,
+ struct nfs_client *clp,
+ struct pnfs_layout_segment *lseg,
+ int idx)
+{
+ int vers = clp->cl_nfs_mod->rpc_vers->number;
+
+ switch (vers) {
+ case 3:
+ return ff_layout_async_handle_error_v3(task, lseg, idx);
+ case 4:
+ return ff_layout_async_handle_error_v4(task, state, clp,
+ lseg, idx);
+ default:
+ /* should never happen */
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+}
+
+static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
+ int idx, u64 offset, u64 length,
+ u32 status, int opnum)
+{
+ struct nfs4_ff_layout_mirror *mirror;
+ int err;
+
+ mirror = FF_LAYOUT_COMP(lseg, idx);
+ err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
+ mirror, offset, length, status, opnum,
+ GFP_NOIO);
+ dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status);
+}
+
+/* NFS_PROTO call done callback routines */
+
+static int ff_layout_read_done_cb(struct rpc_task *task,
+ struct nfs_pgio_header *hdr)
+{
+ struct inode *inode;
+ int err;
+
+ trace_nfs4_pnfs_read(hdr, task->tk_status);
+ if (task->tk_status == -ETIMEDOUT && !hdr->res.op_status)
+ hdr->res.op_status = NFS4ERR_NXIO;
+ if (task->tk_status < 0 && hdr->res.op_status)
+ ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
+ hdr->args.offset, hdr->args.count,
+ hdr->res.op_status, OP_READ);
+ err = ff_layout_async_handle_error(task, hdr->args.context->state,
+ hdr->ds_clp, hdr->lseg,
+ hdr->pgio_mirror_idx);
+
+ switch (err) {
+ case -NFS4ERR_RESET_TO_PNFS:
+ set_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
+ &hdr->lseg->pls_layout->plh_flags);
+ pnfs_read_resend_pnfs(hdr);
+ return task->tk_status;
+ case -NFS4ERR_RESET_TO_MDS:
+ inode = hdr->lseg->pls_layout->plh_inode;
+ pnfs_error_mark_layout_for_return(inode, hdr->lseg);
+ ff_layout_reset_read(hdr);
+ return task->tk_status;
+ case -EAGAIN:
+ rpc_restart_call_prepare(task);
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+/*
+ * We reference the rpc_cred of the first WRITE that triggers the need for
+ * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
+ * rfc5661 is not clear about which credential should be used.
+ *
+ * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so
+ * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751
+ * we always send layoutcommit after DS writes.
+ */
+static void
+ff_layout_set_layoutcommit(struct nfs_pgio_header *hdr)
+{
+ pnfs_set_layoutcommit(hdr);
+ dprintk("%s inode %lu pls_end_pos %lu\n", __func__, hdr->inode->i_ino,
+ (unsigned long) NFS_I(hdr->inode)->layout->plh_lwb);
+}
+
+static bool
+ff_layout_reset_to_mds(struct pnfs_layout_segment *lseg, int idx)
+{
+ /* No mirroring for now */
+ struct nfs4_deviceid_node *node = FF_LAYOUT_DEVID_NODE(lseg, idx);
+
+ return ff_layout_test_devid_unavailable(node);
+}
+
+static int ff_layout_read_prepare_common(struct rpc_task *task,
+ struct nfs_pgio_header *hdr)
+{
+ if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
+ rpc_exit(task, -EIO);
+ return -EIO;
+ }
+ if (ff_layout_reset_to_mds(hdr->lseg, hdr->pgio_mirror_idx)) {
+ dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid);
+ if (ff_layout_has_available_ds(hdr->lseg))
+ pnfs_read_resend_pnfs(hdr);
+ else
+ ff_layout_reset_read(hdr);
+ rpc_exit(task, 0);
+ return -EAGAIN;
+ }
+ hdr->pgio_done_cb = ff_layout_read_done_cb;
+
+ return 0;
+}
+
+/*
+ * Call ops for the async read/write cases
+ * In the case of dense layouts, the offset needs to be reset to its
+ * original value.
+ */
+static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data)
+{
+ struct nfs_pgio_header *hdr = data;
+
+ if (ff_layout_read_prepare_common(task, hdr))
+ return;
+
+ rpc_call_start(task);
+}
+
+static int ff_layout_setup_sequence(struct nfs_client *ds_clp,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ struct rpc_task *task)
+{
+ if (ds_clp->cl_session)
+ return nfs41_setup_sequence(ds_clp->cl_session,
+ args,
+ res,
+ task);
+ return nfs40_setup_sequence(ds_clp->cl_slot_tbl,
+ args,
+ res,
+ task);
+}
+
+static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
+{
+ struct nfs_pgio_header *hdr = data;
+
+ if (ff_layout_read_prepare_common(task, hdr))
+ return;
+
+ if (ff_layout_setup_sequence(hdr->ds_clp,
+ &hdr->args.seq_args,
+ &hdr->res.seq_res,
+ task))
+ return;
+
+ if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
+ hdr->args.lock_context, FMODE_READ) == -EIO)
+ rpc_exit(task, -EIO); /* lost lock, terminate I/O */
+}
+
+static void ff_layout_read_call_done(struct rpc_task *task, void *data)
+{
+ struct nfs_pgio_header *hdr = data;
+
+ dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);
+
+ if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
+ task->tk_status == 0) {
+ nfs4_sequence_done(task, &hdr->res.seq_res);
+ return;
+ }
+
+ /* Note this may cause RPC to be resent */
+ hdr->mds_ops->rpc_call_done(task, hdr);
+}
+
+static void ff_layout_read_count_stats(struct rpc_task *task, void *data)
+{
+ struct nfs_pgio_header *hdr = data;
+
+ rpc_count_iostats_metrics(task,
+ &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]);
+}
+
+static int ff_layout_write_done_cb(struct rpc_task *task,
+ struct nfs_pgio_header *hdr)
+{
+ struct inode *inode;
+ int err;
+
+ trace_nfs4_pnfs_write(hdr, task->tk_status);
+ if (task->tk_status == -ETIMEDOUT && !hdr->res.op_status)
+ hdr->res.op_status = NFS4ERR_NXIO;
+ if (task->tk_status < 0 && hdr->res.op_status)
+ ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
+ hdr->args.offset, hdr->args.count,
+ hdr->res.op_status, OP_WRITE);
+ err = ff_layout_async_handle_error(task, hdr->args.context->state,
+ hdr->ds_clp, hdr->lseg,
+ hdr->pgio_mirror_idx);
+
+ switch (err) {
+ case -NFS4ERR_RESET_TO_PNFS:
+ case -NFS4ERR_RESET_TO_MDS:
+ inode = hdr->lseg->pls_layout->plh_inode;
+ pnfs_error_mark_layout_for_return(inode, hdr->lseg);
+ if (err == -NFS4ERR_RESET_TO_PNFS) {
+ pnfs_set_retry_layoutget(hdr->lseg->pls_layout);
+ ff_layout_reset_write(hdr, true);
+ } else {
+ pnfs_clear_retry_layoutget(hdr->lseg->pls_layout);
+ ff_layout_reset_write(hdr, false);
+ }
+ return task->tk_status;
+ case -EAGAIN:
+ rpc_restart_call_prepare(task);
+ return -EAGAIN;
+ }
+
+ if (hdr->res.verf->committed == NFS_FILE_SYNC ||
+ hdr->res.verf->committed == NFS_DATA_SYNC)
+ ff_layout_set_layoutcommit(hdr);
+
+ return 0;
+}
+
+static int ff_layout_commit_done_cb(struct rpc_task *task,
+ struct nfs_commit_data *data)
+{
+ struct inode *inode;
+ int err;
+
+ trace_nfs4_pnfs_commit_ds(data, task->tk_status);
+ if (task->tk_status == -ETIMEDOUT && !data->res.op_status)
+ data->res.op_status = NFS4ERR_NXIO;
+ if (task->tk_status < 0 && data->res.op_status)
+ ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index,
+ data->args.offset, data->args.count,
+ data->res.op_status, OP_COMMIT);
+ err = ff_layout_async_handle_error(task, NULL, data->ds_clp,
+ data->lseg, data->ds_commit_index);
+
+ switch (err) {
+ case -NFS4ERR_RESET_TO_PNFS:
+ case -NFS4ERR_RESET_TO_MDS:
+ inode = data->lseg->pls_layout->plh_inode;
+ pnfs_error_mark_layout_for_return(inode, data->lseg);
+ if (err == -NFS4ERR_RESET_TO_PNFS)
+ pnfs_set_retry_layoutget(data->lseg->pls_layout);
+ else
+ pnfs_clear_retry_layoutget(data->lseg->pls_layout);
+ pnfs_generic_prepare_to_resend_writes(data);
+ return -EAGAIN;
+ case -EAGAIN:
+ rpc_restart_call_prepare(task);
+ return -EAGAIN;
+ }
+
+ if (data->verf.committed == NFS_UNSTABLE)
+ pnfs_commit_set_layoutcommit(data);
+
+ return 0;
+}
+
+static int ff_layout_write_prepare_common(struct rpc_task *task,
+ struct nfs_pgio_header *hdr)
+{
+ if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
+ rpc_exit(task, -EIO);
+ return -EIO;
+ }
+
+ if (ff_layout_reset_to_mds(hdr->lseg, hdr->pgio_mirror_idx)) {
+ bool retry_pnfs;
+
+ retry_pnfs = ff_layout_has_available_ds(hdr->lseg);
+ dprintk("%s task %u reset io to %s\n", __func__,
+ task->tk_pid, retry_pnfs ? "pNFS" : "MDS");
+ ff_layout_reset_write(hdr, retry_pnfs);
+ rpc_exit(task, 0);
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data)
+{
+ struct nfs_pgio_header *hdr = data;
+
+ if (ff_layout_write_prepare_common(task, hdr))
+ return;
+
+ rpc_call_start(task);
+}
+
+static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
+{
+ struct nfs_pgio_header *hdr = data;
+
+ if (ff_layout_write_prepare_common(task, hdr))
+ return;
+
+ if (ff_layout_setup_sequence(hdr->ds_clp,
+ &hdr->args.seq_args,
+ &hdr->res.seq_res,
+ task))
+ return;
+
+ if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
+ hdr->args.lock_context, FMODE_WRITE) == -EIO)
+ rpc_exit(task, -EIO); /* lost lock, terminate I/O */
+}
+
+static void ff_layout_write_call_done(struct rpc_task *task, void *data)
+{
+ struct nfs_pgio_header *hdr = data;
+
+ if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
+ task->tk_status == 0) {
+ nfs4_sequence_done(task, &hdr->res.seq_res);
+ return;
+ }
+
+ /* Note this may cause RPC to be resent */
+ hdr->mds_ops->rpc_call_done(task, hdr);
+}
+
+static void ff_layout_write_count_stats(struct rpc_task *task, void *data)
+{
+ struct nfs_pgio_header *hdr = data;
+
+ rpc_count_iostats_metrics(task,
+ &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]);
+}
+
+static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
+{
+ rpc_call_start(task);
+}
+
+static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)
+{
+ struct nfs_commit_data *wdata = data;
+
+ ff_layout_setup_sequence(wdata->ds_clp,
+ &wdata->args.seq_args,
+ &wdata->res.seq_res,
+ task);
+}
+
+static void ff_layout_commit_count_stats(struct rpc_task *task, void *data)
+{
+ struct nfs_commit_data *cdata = data;
+
+ rpc_count_iostats_metrics(task,
+ &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]);
+}
+
+static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
+ .rpc_call_prepare = ff_layout_read_prepare_v3,
+ .rpc_call_done = ff_layout_read_call_done,
+ .rpc_count_stats = ff_layout_read_count_stats,
+ .rpc_release = pnfs_generic_rw_release,
+};
+
+static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
+ .rpc_call_prepare = ff_layout_read_prepare_v4,
+ .rpc_call_done = ff_layout_read_call_done,
+ .rpc_count_stats = ff_layout_read_count_stats,
+ .rpc_release = pnfs_generic_rw_release,
+};
+
+static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
+ .rpc_call_prepare = ff_layout_write_prepare_v3,
+ .rpc_call_done = ff_layout_write_call_done,
+ .rpc_count_stats = ff_layout_write_count_stats,
+ .rpc_release = pnfs_generic_rw_release,
+};
+
+static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
+ .rpc_call_prepare = ff_layout_write_prepare_v4,
+ .rpc_call_done = ff_layout_write_call_done,
+ .rpc_count_stats = ff_layout_write_count_stats,
+ .rpc_release = pnfs_generic_rw_release,
+};
+
+static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = {
+ .rpc_call_prepare = ff_layout_commit_prepare_v3,
+ .rpc_call_done = pnfs_generic_write_commit_done,
+ .rpc_count_stats = ff_layout_commit_count_stats,
+ .rpc_release = pnfs_generic_commit_release,
+};
+
+static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = {
+ .rpc_call_prepare = ff_layout_commit_prepare_v4,
+ .rpc_call_done = pnfs_generic_write_commit_done,
+ .rpc_count_stats = ff_layout_commit_count_stats,
+ .rpc_release = pnfs_generic_commit_release,
+};
+
+static enum pnfs_try_status
+ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
+{
+ struct pnfs_layout_segment *lseg = hdr->lseg;
+ struct nfs4_pnfs_ds *ds;
+ struct rpc_clnt *ds_clnt;
+ struct rpc_cred *ds_cred;
+ loff_t offset = hdr->args.offset;
+ u32 idx = hdr->pgio_mirror_idx;
+ int vers;
+ struct nfs_fh *fh;
+
+ dprintk("--> %s ino %lu pgbase %u req %Zu@%llu\n",
+ __func__, hdr->inode->i_ino,
+ hdr->args.pgbase, (size_t)hdr->args.count, offset);
+
+ ds = nfs4_ff_layout_prepare_ds(lseg, idx, false);
+ if (!ds)
+ goto out_failed;
+
+ ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
+ hdr->inode);
+ if (IS_ERR(ds_clnt))
+ goto out_failed;
+
+ ds_cred = ff_layout_get_ds_cred(lseg, idx, hdr->cred);
+ if (IS_ERR(ds_cred))
+ goto out_failed;
+
+ vers = nfs4_ff_layout_ds_version(lseg, idx);
+
+ dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
+ ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count), vers);
+
+ atomic_inc(&ds->ds_clp->cl_count);
+ hdr->ds_clp = ds->ds_clp;
+ fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
+ if (fh)
+ hdr->args.fh = fh;
+
+ /*
+ * Note that if we ever decide to split across DSes,
+ * then we may need to handle dense-like offsets.
+ */
+ hdr->args.offset = offset;
+ hdr->mds_offset = offset;
+
+ /* Perform an asynchronous read to ds */
+ nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
+ vers == 3 ? &ff_layout_read_call_ops_v3 :
+ &ff_layout_read_call_ops_v4,
+ 0, RPC_TASK_SOFTCONN);
+
+ return PNFS_ATTEMPTED;
+
+out_failed:
+ if (ff_layout_has_available_ds(lseg))
+ return PNFS_TRY_AGAIN;
+ return PNFS_NOT_ATTEMPTED;
+}
+
+/* Perform async writes. */
+static enum pnfs_try_status
+ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
+{
+ struct pnfs_layout_segment *lseg = hdr->lseg;
+ struct nfs4_pnfs_ds *ds;
+ struct rpc_clnt *ds_clnt;
+ struct rpc_cred *ds_cred;
+ loff_t offset = hdr->args.offset;
+ int vers;
+ struct nfs_fh *fh;
+ int idx = hdr->pgio_mirror_idx;
+
+ ds = nfs4_ff_layout_prepare_ds(lseg, idx, true);
+ if (!ds)
+ return PNFS_NOT_ATTEMPTED;
+
+ ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
+ hdr->inode);
+ if (IS_ERR(ds_clnt))
+ return PNFS_NOT_ATTEMPTED;
+
+ ds_cred = ff_layout_get_ds_cred(lseg, idx, hdr->cred);
+ if (IS_ERR(ds_cred))
+ return PNFS_NOT_ATTEMPTED;
+
+ vers = nfs4_ff_layout_ds_version(lseg, idx);
+
+ dprintk("%s ino %lu sync %d req %Zu@%llu DS: %s cl_count %d vers %d\n",
+ __func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
+ offset, ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count),
+ vers);
+
+ hdr->pgio_done_cb = ff_layout_write_done_cb;
+ atomic_inc(&ds->ds_clp->cl_count);
+ hdr->ds_clp = ds->ds_clp;
+ hdr->ds_commit_idx = idx;
+ fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
+ if (fh)
+ hdr->args.fh = fh;
+
+ /*
+ * Note that if we ever decide to split across DSes,
+ * then we may need to handle dense-like offsets.
+ */
+ hdr->args.offset = offset;
+
+ /* Perform an asynchronous write */
+ nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
+ vers == 3 ? &ff_layout_write_call_ops_v3 :
+ &ff_layout_write_call_ops_v4,
+ sync, RPC_TASK_SOFTCONN);
+ return PNFS_ATTEMPTED;
+}
+
+static void
+ff_layout_mark_request_commit(struct nfs_page *req,
+ struct pnfs_layout_segment *lseg,
+ struct nfs_commit_info *cinfo,
+ u32 ds_commit_idx)
+{
+ struct list_head *list;
+ struct pnfs_commit_bucket *buckets;
+
+ spin_lock(cinfo->lock);
+ buckets = cinfo->ds->buckets;
+ list = &buckets[ds_commit_idx].written;
+ if (list_empty(list)) {
+ /* Non-empty buckets hold a reference on the lseg. That ref
+ * is normally transferred to the COMMIT call and released
+ * there. It could also be released if the last req is pulled
+ * off due to a rewrite, in which case it will be done in
+ * pnfs_common_clear_request_commit
+ */
+ WARN_ON_ONCE(buckets[ds_commit_idx].wlseg != NULL);
+ buckets[ds_commit_idx].wlseg = pnfs_get_lseg(lseg);
+ }
+ set_bit(PG_COMMIT_TO_DS, &req->wb_flags);
+ cinfo->ds->nwritten++;
+
+ /* nfs_request_add_commit_list(). We need to add req to list without
+ * dropping cinfo lock.
+ */
+ set_bit(PG_CLEAN, &(req)->wb_flags);
+ nfs_list_add_request(req, list);
+ cinfo->mds->ncommit++;
+ spin_unlock(cinfo->lock);
+ if (!cinfo->dreq) {
+ inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
+ inc_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host),
+ BDI_RECLAIMABLE);
+ __mark_inode_dirty(req->wb_context->dentry->d_inode,
+ I_DIRTY_DATASYNC);
+ }
+}
+
+static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
+{
+ return i;
+}
+
+static struct nfs_fh *
+select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
+{
+ struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
+
+ /* FIXME: Assume that there is only one NFS version available
+ * for the DS.
+ */
+ return &flseg->mirror_array[i]->fh_versions[0];
+}
+
+static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
+{
+ struct pnfs_layout_segment *lseg = data->lseg;
+ struct nfs4_pnfs_ds *ds;
+ struct rpc_clnt *ds_clnt;
+ struct rpc_cred *ds_cred;
+ u32 idx;
+ int vers;
+ struct nfs_fh *fh;
+
+ idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
+ ds = nfs4_ff_layout_prepare_ds(lseg, idx, true);
+ if (!ds)
+ goto out_err;
+
+ ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
+ data->inode);
+ if (IS_ERR(ds_clnt))
+ goto out_err;
+
+ ds_cred = ff_layout_get_ds_cred(lseg, idx, data->cred);
+ if (IS_ERR(ds_cred))
+ goto out_err;
+
+ vers = nfs4_ff_layout_ds_version(lseg, idx);
+
+ dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
+ data->inode->i_ino, how, atomic_read(&ds->ds_clp->cl_count),
+ vers);
+ data->commit_done_cb = ff_layout_commit_done_cb;
+ data->cred = ds_cred;
+ atomic_inc(&ds->ds_clp->cl_count);
+ data->ds_clp = ds->ds_clp;
+ fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
+ if (fh)
+ data->args.fh = fh;
+ return nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
+ vers == 3 ? &ff_layout_commit_call_ops_v3 :
+ &ff_layout_commit_call_ops_v4,
+ how, RPC_TASK_SOFTCONN);
+out_err:
+ pnfs_generic_prepare_to_resend_writes(data);
+ pnfs_generic_commit_release(data);
+ return -EAGAIN;
+}
+
+static int
+ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
+ int how, struct nfs_commit_info *cinfo)
+{
+ return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
+ ff_layout_initiate_commit);
+}
+
+static struct pnfs_ds_commit_info *
+ff_layout_get_ds_info(struct inode *inode)
+{
+ struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
+
+ if (layout == NULL)
+ return NULL;
+
+ return &FF_LAYOUT_FROM_HDR(layout)->commit_info;
+}
+
+static void
+ff_layout_free_deveiceid_node(struct nfs4_deviceid_node *d)
+{
+ nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds,
+ id_node));
+}
+
+static int ff_layout_encode_ioerr(struct nfs4_flexfile_layout *flo,
+ struct xdr_stream *xdr,
+ const struct nfs4_layoutreturn_args *args)
+{
+ struct pnfs_layout_hdr *hdr = &flo->generic_hdr;
+ __be32 *start;
+ int count = 0, ret = 0;
+
+ start = xdr_reserve_space(xdr, 4);
+ if (unlikely(!start))
+ return -E2BIG;
+
+ /* This assume we always return _ALL_ layouts */
+ spin_lock(&hdr->plh_inode->i_lock);
+ ret = ff_layout_encode_ds_ioerr(flo, xdr, &count, &args->range);
+ spin_unlock(&hdr->plh_inode->i_lock);
+
+ *start = cpu_to_be32(count);
+
+ return ret;
+}
+
+/* report nothing for now */
+static void ff_layout_encode_iostats(struct nfs4_flexfile_layout *flo,
+ struct xdr_stream *xdr,
+ const struct nfs4_layoutreturn_args *args)
+{
+ __be32 *p;
+
+ p = xdr_reserve_space(xdr, 4);
+ if (likely(p))
+ *p = cpu_to_be32(0);
+}
+
+static struct nfs4_deviceid_node *
+ff_layout_alloc_deviceid_node(struct nfs_server *server,
+ struct pnfs_device *pdev, gfp_t gfp_flags)
+{
+ struct nfs4_ff_layout_ds *dsaddr;
+
+ dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags);
+ if (!dsaddr)
+ return NULL;
+ return &dsaddr->id_node;
+}
+
+static void
+ff_layout_encode_layoutreturn(struct pnfs_layout_hdr *lo,
+ struct xdr_stream *xdr,
+ const struct nfs4_layoutreturn_args *args)
+{
+ struct nfs4_flexfile_layout *flo = FF_LAYOUT_FROM_HDR(lo);
+ __be32 *start;
+
+ dprintk("%s: Begin\n", __func__);
+ start = xdr_reserve_space(xdr, 4);
+ BUG_ON(!start);
+
+ if (ff_layout_encode_ioerr(flo, xdr, args))
+ goto out;
+
+ ff_layout_encode_iostats(flo, xdr, args);
+out:
+ *start = cpu_to_be32((xdr->p - start - 1) * 4);
+ dprintk("%s: Return\n", __func__);
+}
+
+static struct pnfs_layoutdriver_type flexfilelayout_type = {
+ .id = LAYOUT_FLEX_FILES,
+ .name = "LAYOUT_FLEX_FILES",
+ .owner = THIS_MODULE,
+ .alloc_layout_hdr = ff_layout_alloc_layout_hdr,
+ .free_layout_hdr = ff_layout_free_layout_hdr,
+ .alloc_lseg = ff_layout_alloc_lseg,
+ .free_lseg = ff_layout_free_lseg,
+ .pg_read_ops = &ff_layout_pg_read_ops,
+ .pg_write_ops = &ff_layout_pg_write_ops,
+ .get_ds_info = ff_layout_get_ds_info,
+ .free_deviceid_node = ff_layout_free_deveiceid_node,
+ .mark_request_commit = ff_layout_mark_request_commit,
+ .clear_request_commit = pnfs_generic_clear_request_commit,
+ .scan_commit_lists = pnfs_generic_scan_commit_lists,
+ .recover_commit_reqs = pnfs_generic_recover_commit_reqs,
+ .commit_pagelist = ff_layout_commit_pagelist,
+ .read_pagelist = ff_layout_read_pagelist,
+ .write_pagelist = ff_layout_write_pagelist,
+ .alloc_deviceid_node = ff_layout_alloc_deviceid_node,
+ .encode_layoutreturn = ff_layout_encode_layoutreturn,
+};
+
+static int __init nfs4flexfilelayout_init(void)
+{
+ printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n",
+ __func__);
+ return pnfs_register_layoutdriver(&flexfilelayout_type);
+}
+
+static void __exit nfs4flexfilelayout_exit(void)
+{
+ printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
+ __func__);
+ pnfs_unregister_layoutdriver(&flexfilelayout_type);
+}
+
+MODULE_ALIAS("nfs-layouttype4-4");
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
+
+module_init(nfs4flexfilelayout_init);
+module_exit(nfs4flexfilelayout_exit);
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.h b/fs/nfs/flexfilelayout/flexfilelayout.h
new file mode 100644
index 0000000..070f204
--- /dev/null
+++ b/fs/nfs/flexfilelayout/flexfilelayout.h
@@ -0,0 +1,155 @@
+/*
+ * NFSv4 flexfile layout driver data structures.
+ *
+ * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
+ *
+ * Tao Peng <bergwolf@primarydata.com>
+ */
+
+#ifndef FS_NFS_NFS4FLEXFILELAYOUT_H
+#define FS_NFS_NFS4FLEXFILELAYOUT_H
+
+#include "../pnfs.h"
+
+/* XXX: Let's filter out insanely large mirror count for now to avoid oom
+ * due to network error etc. */
+#define NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT 4096
+
+struct nfs4_ff_ds_version {
+ u32 version;
+ u32 minor_version;
+ u32 rsize;
+ u32 wsize;
+ bool tightly_coupled;
+};
+
+/* chained in global deviceid hlist */
+struct nfs4_ff_layout_ds {
+ struct nfs4_deviceid_node id_node;
+ u32 ds_versions_cnt;
+ struct nfs4_ff_ds_version *ds_versions;
+ struct nfs4_pnfs_ds *ds;
+};
+
+struct nfs4_ff_layout_ds_err {
+ struct list_head list; /* linked in mirror error_list */
+ u64 offset;
+ u64 length;
+ int status;
+ enum nfs_opnum4 opnum;
+ nfs4_stateid stateid;
+ struct nfs4_deviceid deviceid;
+};
+
+struct nfs4_ff_layout_mirror {
+ u32 ds_count;
+ u32 efficiency;
+ struct nfs4_ff_layout_ds *mirror_ds;
+ u32 fh_versions_cnt;
+ struct nfs_fh *fh_versions;
+ nfs4_stateid stateid;
+ struct nfs4_string user_name;
+ struct nfs4_string group_name;
+ u32 uid;
+ u32 gid;
+ struct rpc_cred *cred;
+ spinlock_t lock;
+};
+
+struct nfs4_ff_layout_segment {
+ struct pnfs_layout_segment generic_hdr;
+ u64 stripe_unit;
+ u32 mirror_array_cnt;
+ struct nfs4_ff_layout_mirror **mirror_array;
+};
+
+struct nfs4_flexfile_layout {
+ struct pnfs_layout_hdr generic_hdr;
+ struct pnfs_ds_commit_info commit_info;
+ struct list_head error_list; /* nfs4_ff_layout_ds_err */
+};
+
+static inline struct nfs4_flexfile_layout *
+FF_LAYOUT_FROM_HDR(struct pnfs_layout_hdr *lo)
+{
+ return container_of(lo, struct nfs4_flexfile_layout, generic_hdr);
+}
+
+static inline struct nfs4_ff_layout_segment *
+FF_LAYOUT_LSEG(struct pnfs_layout_segment *lseg)
+{
+ return container_of(lseg,
+ struct nfs4_ff_layout_segment,
+ generic_hdr);
+}
+
+static inline struct nfs4_deviceid_node *
+FF_LAYOUT_DEVID_NODE(struct pnfs_layout_segment *lseg, u32 idx)
+{
+ if (idx >= FF_LAYOUT_LSEG(lseg)->mirror_array_cnt ||
+ FF_LAYOUT_LSEG(lseg)->mirror_array[idx] == NULL ||
+ FF_LAYOUT_LSEG(lseg)->mirror_array[idx]->mirror_ds == NULL)
+ return NULL;
+ return &FF_LAYOUT_LSEG(lseg)->mirror_array[idx]->mirror_ds->id_node;
+}
+
+static inline struct nfs4_ff_layout_ds *
+FF_LAYOUT_MIRROR_DS(struct nfs4_deviceid_node *node)
+{
+ return container_of(node, struct nfs4_ff_layout_ds, id_node);
+}
+
+static inline struct nfs4_ff_layout_mirror *
+FF_LAYOUT_COMP(struct pnfs_layout_segment *lseg, u32 idx)
+{
+ if (idx >= FF_LAYOUT_LSEG(lseg)->mirror_array_cnt)
+ return NULL;
+ return FF_LAYOUT_LSEG(lseg)->mirror_array[idx];
+}
+
+static inline u32
+FF_LAYOUT_MIRROR_COUNT(struct pnfs_layout_segment *lseg)
+{
+ return FF_LAYOUT_LSEG(lseg)->mirror_array_cnt;
+}
+
+static inline bool
+ff_layout_test_devid_unavailable(struct nfs4_deviceid_node *node)
+{
+ return nfs4_test_deviceid_unavailable(node);
+}
+
+static inline int
+nfs4_ff_layout_ds_version(struct pnfs_layout_segment *lseg, u32 ds_idx)
+{
+ return FF_LAYOUT_COMP(lseg, ds_idx)->mirror_ds->ds_versions[0].version;
+}
+
+struct nfs4_ff_layout_ds *
+nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
+ gfp_t gfp_flags);
+void nfs4_ff_layout_put_deviceid(struct nfs4_ff_layout_ds *mirror_ds);
+void nfs4_ff_layout_free_deviceid(struct nfs4_ff_layout_ds *mirror_ds);
+int ff_layout_track_ds_error(struct nfs4_flexfile_layout *flo,
+ struct nfs4_ff_layout_mirror *mirror, u64 offset,
+ u64 length, int status, enum nfs_opnum4 opnum,
+ gfp_t gfp_flags);
+int ff_layout_encode_ds_ioerr(struct nfs4_flexfile_layout *flo,
+ struct xdr_stream *xdr, int *count,
+ const struct pnfs_layout_range *range);
+struct nfs_fh *
+nfs4_ff_layout_select_ds_fh(struct pnfs_layout_segment *lseg, u32 mirror_idx);
+
+struct nfs4_pnfs_ds *
+nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
+ bool fail_return);
+
+struct rpc_clnt *
+nfs4_ff_find_or_create_ds_client(struct pnfs_layout_segment *lseg,
+ u32 ds_idx,
+ struct nfs_client *ds_clp,
+ struct inode *inode);
+struct rpc_cred *ff_layout_get_ds_cred(struct pnfs_layout_segment *lseg,
+ u32 ds_idx, struct rpc_cred *mdscred);
+bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg);
+#endif /* FS_NFS_NFS4FLEXFILELAYOUT_H */
diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
new file mode 100644
index 0000000..e2c01f2
--- /dev/null
+++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
@@ -0,0 +1,552 @@
+/*
+ * Device operations for the pnfs nfs4 file layout driver.
+ *
+ * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
+ *
+ * Tao Peng <bergwolf@primarydata.com>
+ */
+
+#include <linux/nfs_fs.h>
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+#include <linux/sunrpc/addr.h>
+
+#include "../internal.h"
+#include "../nfs4session.h"
+#include "flexfilelayout.h"
+
+#define NFSDBG_FACILITY NFSDBG_PNFS_LD
+
+static unsigned int dataserver_timeo = NFS4_DEF_DS_TIMEO;
+static unsigned int dataserver_retrans = NFS4_DEF_DS_RETRANS;
+
+void nfs4_ff_layout_put_deviceid(struct nfs4_ff_layout_ds *mirror_ds)
+{
+ if (mirror_ds)
+ nfs4_put_deviceid_node(&mirror_ds->id_node);
+}
+
+void nfs4_ff_layout_free_deviceid(struct nfs4_ff_layout_ds *mirror_ds)
+{
+ nfs4_print_deviceid(&mirror_ds->id_node.deviceid);
+ nfs4_pnfs_ds_put(mirror_ds->ds);
+ kfree(mirror_ds);
+}
+
+/* Decode opaque device data and construct new_ds using it */
+struct nfs4_ff_layout_ds *
+nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
+ gfp_t gfp_flags)
+{
+ struct xdr_stream stream;
+ struct xdr_buf buf;
+ struct page *scratch;
+ struct list_head dsaddrs;
+ struct nfs4_pnfs_ds_addr *da;
+ struct nfs4_ff_layout_ds *new_ds = NULL;
+ struct nfs4_ff_ds_version *ds_versions = NULL;
+ u32 mp_count;
+ u32 version_count;
+ __be32 *p;
+ int i, ret = -ENOMEM;
+
+ /* set up xdr stream */
+ scratch = alloc_page(gfp_flags);
+ if (!scratch)
+ goto out_err;
+
+ new_ds = kzalloc(sizeof(struct nfs4_ff_layout_ds), gfp_flags);
+ if (!new_ds)
+ goto out_scratch;
+
+ nfs4_init_deviceid_node(&new_ds->id_node,
+ server,
+ &pdev->dev_id);
+ INIT_LIST_HEAD(&dsaddrs);
+
+ xdr_init_decode_pages(&stream, &buf, pdev->pages, pdev->pglen);
+ xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
+
+ /* multipath count */
+ p = xdr_inline_decode(&stream, 4);
+ if (unlikely(!p))
+ goto out_err_drain_dsaddrs;
+ mp_count = be32_to_cpup(p);
+ dprintk("%s: multipath ds count %d\n", __func__, mp_count);
+
+ for (i = 0; i < mp_count; i++) {
+ /* multipath ds */
+ da = nfs4_decode_mp_ds_addr(server->nfs_client->cl_net,
+ &stream, gfp_flags);
+ if (da)
+ list_add_tail(&da->da_node, &dsaddrs);
+ }
+ if (list_empty(&dsaddrs)) {
+ dprintk("%s: no suitable DS addresses found\n",
+ __func__);
+ ret = -ENOMEDIUM;
+ goto out_err_drain_dsaddrs;
+ }
+
+ /* version count */
+ p = xdr_inline_decode(&stream, 4);
+ if (unlikely(!p))
+ goto out_err_drain_dsaddrs;
+ version_count = be32_to_cpup(p);
+ dprintk("%s: version count %d\n", __func__, version_count);
+
+ ds_versions = kzalloc(version_count * sizeof(struct nfs4_ff_ds_version),
+ gfp_flags);
+ if (!ds_versions)
+ goto out_scratch;
+
+ for (i = 0; i < version_count; i++) {
+ /* 20 = version(4) + minor_version(4) + rsize(4) + wsize(4) +
+ * tightly_coupled(4) */
+ p = xdr_inline_decode(&stream, 20);
+ if (unlikely(!p))
+ goto out_err_drain_dsaddrs;
+ ds_versions[i].version = be32_to_cpup(p++);
+ ds_versions[i].minor_version = be32_to_cpup(p++);
+ ds_versions[i].rsize = nfs_block_size(be32_to_cpup(p++), NULL);
+ ds_versions[i].wsize = nfs_block_size(be32_to_cpup(p++), NULL);
+ ds_versions[i].tightly_coupled = be32_to_cpup(p);
+
+ if (ds_versions[i].rsize > NFS_MAX_FILE_IO_SIZE)
+ ds_versions[i].rsize = NFS_MAX_FILE_IO_SIZE;
+ if (ds_versions[i].wsize > NFS_MAX_FILE_IO_SIZE)
+ ds_versions[i].wsize = NFS_MAX_FILE_IO_SIZE;
+
+ if (ds_versions[i].version != 3 || ds_versions[i].minor_version != 0) {
+ dprintk("%s: [%d] unsupported ds version %d-%d\n", __func__,
+ i, ds_versions[i].version,
+ ds_versions[i].minor_version);
+ ret = -EPROTONOSUPPORT;
+ goto out_err_drain_dsaddrs;
+ }
+
+ dprintk("%s: [%d] vers %u minor_ver %u rsize %u wsize %u coupled %d\n",
+ __func__, i, ds_versions[i].version,
+ ds_versions[i].minor_version,
+ ds_versions[i].rsize,
+ ds_versions[i].wsize,
+ ds_versions[i].tightly_coupled);
+ }
+
+ new_ds->ds_versions = ds_versions;
+ new_ds->ds_versions_cnt = version_count;
+
+ new_ds->ds = nfs4_pnfs_ds_add(&dsaddrs, gfp_flags);
+ if (!new_ds->ds)
+ goto out_err_drain_dsaddrs;
+
+ /* If DS was already in cache, free ds addrs */
+ while (!list_empty(&dsaddrs)) {
+ da = list_first_entry(&dsaddrs,
+ struct nfs4_pnfs_ds_addr,
+ da_node);
+ list_del_init(&da->da_node);
+ kfree(da->da_remotestr);
+ kfree(da);
+ }
+
+ __free_page(scratch);
+ return new_ds;
+
+out_err_drain_dsaddrs:
+ while (!list_empty(&dsaddrs)) {
+ da = list_first_entry(&dsaddrs, struct nfs4_pnfs_ds_addr,
+ da_node);
+ list_del_init(&da->da_node);
+ kfree(da->da_remotestr);
+ kfree(da);
+ }
+
+ kfree(ds_versions);
+out_scratch:
+ __free_page(scratch);
+out_err:
+ kfree(new_ds);
+
+ dprintk("%s ERROR: returning %d\n", __func__, ret);
+ return NULL;
+}
+
+static u64
+end_offset(u64 start, u64 len)
+{
+ u64 end;
+
+ end = start + len;
+ return end >= start ? end : NFS4_MAX_UINT64;
+}
+
+static void extend_ds_error(struct nfs4_ff_layout_ds_err *err,
+ u64 offset, u64 length)
+{
+ u64 end;
+
+ end = max_t(u64, end_offset(err->offset, err->length),
+ end_offset(offset, length));
+ err->offset = min_t(u64, err->offset, offset);
+ err->length = end - err->offset;
+}
+
+static bool ds_error_can_merge(struct nfs4_ff_layout_ds_err *err, u64 offset,
+ u64 length, int status, enum nfs_opnum4 opnum,
+ nfs4_stateid *stateid,
+ struct nfs4_deviceid *deviceid)
+{
+ return err->status == status && err->opnum == opnum &&
+ nfs4_stateid_match(&err->stateid, stateid) &&
+ !memcmp(&err->deviceid, deviceid, sizeof(*deviceid)) &&
+ end_offset(err->offset, err->length) >= offset &&
+ err->offset <= end_offset(offset, length);
+}
+
+static bool merge_ds_error(struct nfs4_ff_layout_ds_err *old,
+ struct nfs4_ff_layout_ds_err *new)
+{
+ if (!ds_error_can_merge(old, new->offset, new->length, new->status,
+ new->opnum, &new->stateid, &new->deviceid))
+ return false;
+
+ extend_ds_error(old, new->offset, new->length);
+ return true;
+}
+
+static bool
+ff_layout_add_ds_error_locked(struct nfs4_flexfile_layout *flo,
+ struct nfs4_ff_layout_ds_err *dserr)
+{
+ struct nfs4_ff_layout_ds_err *err;
+
+ list_for_each_entry(err, &flo->error_list, list) {
+ if (merge_ds_error(err, dserr)) {
+ return true;
+ }
+ }
+
+ list_add(&dserr->list, &flo->error_list);
+ return false;
+}
+
+static bool
+ff_layout_update_ds_error(struct nfs4_flexfile_layout *flo, u64 offset,
+ u64 length, int status, enum nfs_opnum4 opnum,
+ nfs4_stateid *stateid, struct nfs4_deviceid *deviceid)
+{
+ bool found = false;
+ struct nfs4_ff_layout_ds_err *err;
+
+ list_for_each_entry(err, &flo->error_list, list) {
+ if (ds_error_can_merge(err, offset, length, status, opnum,
+ stateid, deviceid)) {
+ found = true;
+ extend_ds_error(err, offset, length);
+ break;
+ }
+ }
+
+ return found;
+}
+
+int ff_layout_track_ds_error(struct nfs4_flexfile_layout *flo,
+ struct nfs4_ff_layout_mirror *mirror, u64 offset,
+ u64 length, int status, enum nfs_opnum4 opnum,
+ gfp_t gfp_flags)
+{
+ struct nfs4_ff_layout_ds_err *dserr;
+ bool needfree;
+
+ if (status == 0)
+ return 0;
+
+ if (mirror->mirror_ds == NULL)
+ return -EINVAL;
+
+ spin_lock(&flo->generic_hdr.plh_inode->i_lock);
+ if (ff_layout_update_ds_error(flo, offset, length, status, opnum,
+ &mirror->stateid,
+ &mirror->mirror_ds->id_node.deviceid)) {
+ spin_unlock(&flo->generic_hdr.plh_inode->i_lock);
+ return 0;
+ }
+ spin_unlock(&flo->generic_hdr.plh_inode->i_lock);
+ dserr = kmalloc(sizeof(*dserr), gfp_flags);
+ if (!dserr)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&dserr->list);
+ dserr->offset = offset;
+ dserr->length = length;
+ dserr->status = status;
+ dserr->opnum = opnum;
+ nfs4_stateid_copy(&dserr->stateid, &mirror->stateid);
+ memcpy(&dserr->deviceid, &mirror->mirror_ds->id_node.deviceid,
+ NFS4_DEVICEID4_SIZE);
+
+ spin_lock(&flo->generic_hdr.plh_inode->i_lock);
+ needfree = ff_layout_add_ds_error_locked(flo, dserr);
+ spin_unlock(&flo->generic_hdr.plh_inode->i_lock);
+ if (needfree)
+ kfree(dserr);
+
+ return 0;
+}
+
+/* currently we only support AUTH_NONE and AUTH_SYS */
+static rpc_authflavor_t
+nfs4_ff_layout_choose_authflavor(struct nfs4_ff_layout_mirror *mirror)
+{
+ if (mirror->uid == (u32)-1)
+ return RPC_AUTH_NULL;
+ return RPC_AUTH_UNIX;
+}
+
+/* fetch cred for NFSv3 DS */
+static int ff_layout_update_mirror_cred(struct nfs4_ff_layout_mirror *mirror,
+ struct nfs4_pnfs_ds *ds)
+{
+ if (ds->ds_clp && !mirror->cred &&
+ mirror->mirror_ds->ds_versions[0].version == 3) {
+ struct rpc_auth *auth = ds->ds_clp->cl_rpcclient->cl_auth;
+ struct rpc_cred *cred;
+ struct auth_cred acred = {
+ .uid = make_kuid(&init_user_ns, mirror->uid),
+ .gid = make_kgid(&init_user_ns, mirror->gid),
+ };
+
+ /* AUTH_NULL ignores acred */
+ cred = auth->au_ops->lookup_cred(auth, &acred, 0);
+ if (IS_ERR(cred)) {
+ dprintk("%s: lookup_cred failed with %ld\n",
+ __func__, PTR_ERR(cred));
+ return PTR_ERR(cred);
+ } else {
+ mirror->cred = cred;
+ }
+ }
+ return 0;
+}
+
+struct nfs_fh *
+nfs4_ff_layout_select_ds_fh(struct pnfs_layout_segment *lseg, u32 mirror_idx)
+{
+ struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, mirror_idx);
+ struct nfs_fh *fh = NULL;
+ struct nfs4_deviceid_node *devid;
+
+ if (mirror == NULL || mirror->mirror_ds == NULL ||
+ mirror->mirror_ds->ds == NULL) {
+ printk(KERN_ERR "NFS: %s: No data server for mirror offset index %d\n",
+ __func__, mirror_idx);
+ if (mirror && mirror->mirror_ds) {
+ devid = &mirror->mirror_ds->id_node;
+ pnfs_generic_mark_devid_invalid(devid);
+ }
+ goto out;
+ }
+
+ /* FIXME: For now assume there is only 1 version available for the DS */
+ fh = &mirror->fh_versions[0];
+out:
+ return fh;
+}
+
+/* Upon return, either ds is connected, or ds is NULL */
+struct nfs4_pnfs_ds *
+nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
+ bool fail_return)
+{
+ struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx);
+ struct nfs4_pnfs_ds *ds = NULL;
+ struct nfs4_deviceid_node *devid;
+ struct inode *ino = lseg->pls_layout->plh_inode;
+ struct nfs_server *s = NFS_SERVER(ino);
+ unsigned int max_payload;
+ rpc_authflavor_t flavor;
+
+ if (mirror == NULL || mirror->mirror_ds == NULL ||
+ mirror->mirror_ds->ds == NULL) {
+ printk(KERN_ERR "NFS: %s: No data server for offset index %d\n",
+ __func__, ds_idx);
+ if (mirror && mirror->mirror_ds) {
+ devid = &mirror->mirror_ds->id_node;
+ pnfs_generic_mark_devid_invalid(devid);
+ }
+ goto out;
+ }
+
+ devid = &mirror->mirror_ds->id_node;
+ if (ff_layout_test_devid_unavailable(devid))
+ goto out;
+
+ ds = mirror->mirror_ds->ds;
+ /* matching smp_wmb() in _nfs4_pnfs_v3/4_ds_connect */
+ smp_rmb();
+ if (ds->ds_clp)
+ goto out;
+
+ flavor = nfs4_ff_layout_choose_authflavor(mirror);
+
+ /* FIXME: For now we assume the server sent only one version of NFS
+ * to use for the DS.
+ */
+ nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo,
+ dataserver_retrans,
+ mirror->mirror_ds->ds_versions[0].version,
+ mirror->mirror_ds->ds_versions[0].minor_version,
+ flavor);
+
+ /* connect success, check rsize/wsize limit */
+ if (ds->ds_clp) {
+ max_payload =
+ nfs_block_size(rpc_max_payload(ds->ds_clp->cl_rpcclient),
+ NULL);
+ if (mirror->mirror_ds->ds_versions[0].rsize > max_payload)
+ mirror->mirror_ds->ds_versions[0].rsize = max_payload;
+ if (mirror->mirror_ds->ds_versions[0].wsize > max_payload)
+ mirror->mirror_ds->ds_versions[0].wsize = max_payload;
+ } else {
+ ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
+ mirror, lseg->pls_range.offset,
+ lseg->pls_range.length, NFS4ERR_NXIO,
+ OP_ILLEGAL, GFP_NOIO);
+ if (fail_return) {
+ pnfs_error_mark_layout_for_return(ino, lseg);
+ if (ff_layout_has_available_ds(lseg))
+ pnfs_set_retry_layoutget(lseg->pls_layout);
+ else
+ pnfs_clear_retry_layoutget(lseg->pls_layout);
+
+ } else {
+ if (ff_layout_has_available_ds(lseg))
+ set_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
+ &lseg->pls_layout->plh_flags);
+ else {
+ pnfs_error_mark_layout_for_return(ino, lseg);
+ pnfs_clear_retry_layoutget(lseg->pls_layout);
+ }
+ }
+ }
+
+ if (ff_layout_update_mirror_cred(mirror, ds))
+ ds = NULL;
+out:
+ return ds;
+}
+
+struct rpc_cred *
+ff_layout_get_ds_cred(struct pnfs_layout_segment *lseg, u32 ds_idx,
+ struct rpc_cred *mdscred)
+{
+ struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx);
+ struct rpc_cred *cred = ERR_PTR(-EINVAL);
+
+ if (!nfs4_ff_layout_prepare_ds(lseg, ds_idx, true))
+ goto out;
+
+ if (mirror && mirror->cred)
+ cred = mirror->cred;
+ else
+ cred = mdscred;
+out:
+ return cred;
+}
+
+/**
+* Find or create a DS rpc client with th MDS server rpc client auth flavor
+* in the nfs_client cl_ds_clients list.
+*/
+struct rpc_clnt *
+nfs4_ff_find_or_create_ds_client(struct pnfs_layout_segment *lseg, u32 ds_idx,
+ struct nfs_client *ds_clp, struct inode *inode)
+{
+ struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx);
+
+ switch (mirror->mirror_ds->ds_versions[0].version) {
+ case 3:
+ /* For NFSv3 DS, flavor is set when creating DS connections */
+ return ds_clp->cl_rpcclient;
+ case 4:
+ return nfs4_find_or_create_ds_client(ds_clp, inode);
+ default:
+ BUG();
+ }
+}
+
+static bool is_range_intersecting(u64 offset1, u64 length1,
+ u64 offset2, u64 length2)
+{
+ u64 end1 = end_offset(offset1, length1);
+ u64 end2 = end_offset(offset2, length2);
+
+ return (end1 == NFS4_MAX_UINT64 || end1 > offset2) &&
+ (end2 == NFS4_MAX_UINT64 || end2 > offset1);
+}
+
+/* called with inode i_lock held */
+int ff_layout_encode_ds_ioerr(struct nfs4_flexfile_layout *flo,
+ struct xdr_stream *xdr, int *count,
+ const struct pnfs_layout_range *range)
+{
+ struct nfs4_ff_layout_ds_err *err, *n;
+ __be32 *p;
+
+ list_for_each_entry_safe(err, n, &flo->error_list, list) {
+ if (!is_range_intersecting(err->offset, err->length,
+ range->offset, range->length))
+ continue;
+ /* offset(8) + length(8) + stateid(NFS4_STATEID_SIZE)
+ * + deviceid(NFS4_DEVICEID4_SIZE) + status(4) + opnum(4)
+ */
+ p = xdr_reserve_space(xdr,
+ 24 + NFS4_STATEID_SIZE + NFS4_DEVICEID4_SIZE);
+ if (unlikely(!p))
+ return -ENOBUFS;
+ p = xdr_encode_hyper(p, err->offset);
+ p = xdr_encode_hyper(p, err->length);
+ p = xdr_encode_opaque_fixed(p, &err->stateid,
+ NFS4_STATEID_SIZE);
+ p = xdr_encode_opaque_fixed(p, &err->deviceid,
+ NFS4_DEVICEID4_SIZE);
+ *p++ = cpu_to_be32(err->status);
+ *p++ = cpu_to_be32(err->opnum);
+ *count += 1;
+ list_del(&err->list);
+ dprintk("%s: offset %llu length %llu status %d op %d count %d\n",
+ __func__, err->offset, err->length, err->status,
+ err->opnum, *count);
+ kfree(err);
+ }
+
+ return 0;
+}
+
+bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg)
+{
+ struct nfs4_ff_layout_mirror *mirror;
+ struct nfs4_deviceid_node *devid;
+ int idx;
+
+ for (idx = 0; idx < FF_LAYOUT_MIRROR_COUNT(lseg); idx++) {
+ mirror = FF_LAYOUT_COMP(lseg, idx);
+ if (mirror && mirror->mirror_ds) {
+ devid = &mirror->mirror_ds->id_node;
+ if (!ff_layout_test_devid_unavailable(devid))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+module_param(dataserver_retrans, uint, 0644);
+MODULE_PARM_DESC(dataserver_retrans, "The number of times the NFSv4.1 client "
+ "retries a request before it attempts further "
+ " recovery action.");
+module_param(dataserver_timeo, uint, 0644);
+MODULE_PARM_DESC(dataserver_timeo, "The time (in tenths of a second) the "
+ "NFSv4.1 client waits for a response from a "
+ " data server before it retries an NFS request.");
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
index 2f5db84..857e2a9 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/idmap.c
@@ -152,7 +152,7 @@ void nfs_fattr_map_and_free_names(struct nfs_server *server, struct nfs_fattr *f
nfs_fattr_free_group_name(fattr);
}
-static int nfs_map_string_to_numeric(const char *name, size_t namelen, __u32 *res)
+int nfs_map_string_to_numeric(const char *name, size_t namelen, __u32 *res)
{
unsigned long val;
char buf[16];
@@ -166,6 +166,7 @@ static int nfs_map_string_to_numeric(const char *name, size_t namelen, __u32 *re
*res = val;
return 1;
}
+EXPORT_SYMBOL_GPL(nfs_map_string_to_numeric);
static int nfs_map_numeric_to_string(__u32 id, char *buf, size_t buflen)
{
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 24aac72..e4f0dce 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -352,8 +352,9 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
nfs_attr_check_mountpoint(sb, fattr);
- if (((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0) &&
- !nfs_attr_use_mounted_on_fileid(fattr))
+ if (nfs_attr_use_mounted_on_fileid(fattr))
+ fattr->fileid = fattr->mounted_on_fileid;
+ else if ((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0)
goto out_no_inode;
if ((fattr->valid & NFS_ATTR_FATTR_TYPE) == 0)
goto out_no_inode;
@@ -505,10 +506,15 @@ nfs_setattr(struct dentry *dentry, struct iattr *attr)
attr->ia_valid &= ~ATTR_MODE;
if (attr->ia_valid & ATTR_SIZE) {
+ loff_t i_size;
+
BUG_ON(!S_ISREG(inode->i_mode));
- if (attr->ia_size == i_size_read(inode))
+ i_size = i_size_read(inode);
+ if (attr->ia_size == i_size)
attr->ia_valid &= ~ATTR_SIZE;
+ else if (attr->ia_size < i_size && IS_SWAPFILE(inode))
+ return -ETXTBSY;
}
/* Optimization: if the end result is no change, don't RPC */
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index f519d41..212b8c8 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -6,6 +6,7 @@
#include <linux/mount.h>
#include <linux/security.h>
#include <linux/crc32.h>
+#include <linux/nfs_page.h>
#define NFS_MS_MASK (MS_RDONLY|MS_NOSUID|MS_NODEV|MS_NOEXEC|MS_SYNCHRONOUS)
@@ -31,8 +32,6 @@ static inline int nfs_attr_use_mounted_on_fileid(struct nfs_fattr *fattr)
(((fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT) == 0) &&
((fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) == 0)))
return 0;
-
- fattr->fileid = fattr->mounted_on_fileid;
return 1;
}
@@ -189,9 +188,15 @@ extern struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp,
const struct sockaddr *ds_addr,
int ds_addrlen, int ds_proto,
unsigned int ds_timeo,
- unsigned int ds_retrans);
+ unsigned int ds_retrans,
+ u32 minor_version,
+ rpc_authflavor_t au_flavor);
extern struct rpc_clnt *nfs4_find_or_create_ds_client(struct nfs_client *,
struct inode *);
+extern struct nfs_client *nfs3_set_ds_client(struct nfs_client *mds_clp,
+ const struct sockaddr *ds_addr, int ds_addrlen,
+ int ds_proto, unsigned int ds_timeo,
+ unsigned int ds_retrans, rpc_authflavor_t au_flavor);
#ifdef CONFIG_PROC_FS
extern int __init nfs_fs_proc_init(void);
extern void nfs_fs_proc_exit(void);
@@ -244,9 +249,12 @@ struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *);
void nfs_pgio_header_free(struct nfs_pgio_header *);
void nfs_pgio_data_destroy(struct nfs_pgio_header *);
int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *);
-int nfs_initiate_pgio(struct rpc_clnt *, struct nfs_pgio_header *,
- const struct rpc_call_ops *, int, int);
+int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
+ struct rpc_cred *cred, const struct nfs_rpc_ops *rpc_ops,
+ const struct rpc_call_ops *call_ops, int how, int flags);
void nfs_free_request(struct nfs_page *req);
+struct nfs_pgio_mirror *
+nfs_pgio_current_mirror(struct nfs_pageio_descriptor *desc);
static inline void nfs_iocounter_init(struct nfs_io_counter *c)
{
@@ -254,6 +262,12 @@ static inline void nfs_iocounter_init(struct nfs_io_counter *c)
atomic_set(&c->io_count, 0);
}
+static inline bool nfs_pgio_has_mirroring(struct nfs_pageio_descriptor *desc)
+{
+ WARN_ON_ONCE(desc->pg_mirror_count < 1);
+ return desc->pg_mirror_count > 1;
+}
+
/* nfs2xdr.c */
extern struct rpc_procinfo nfs_procedures[];
extern int nfs2_decode_dirent(struct xdr_stream *,
@@ -377,7 +391,7 @@ extern struct rpc_stat nfs_rpcstat;
extern int __init register_nfs_fs(void);
extern void __exit unregister_nfs_fs(void);
-extern void nfs_sb_active(struct super_block *sb);
+extern bool nfs_sb_active(struct super_block *sb);
extern void nfs_sb_deactive(struct super_block *sb);
/* namespace.c */
@@ -428,6 +442,7 @@ extern void nfs_write_prepare(struct rpc_task *task, void *calldata);
extern void nfs_commit_prepare(struct rpc_task *task, void *calldata);
extern int nfs_initiate_commit(struct rpc_clnt *clnt,
struct nfs_commit_data *data,
+ const struct nfs_rpc_ops *nfs_ops,
const struct rpc_call_ops *call_ops,
int how, int flags);
extern void nfs_init_commit(struct nfs_commit_data *data,
@@ -441,13 +456,15 @@ int nfs_scan_commit(struct inode *inode, struct list_head *dst,
struct nfs_commit_info *cinfo);
void nfs_mark_request_commit(struct nfs_page *req,
struct pnfs_layout_segment *lseg,
- struct nfs_commit_info *cinfo);
+ struct nfs_commit_info *cinfo,
+ u32 ds_commit_idx);
int nfs_write_need_commit(struct nfs_pgio_header *);
int nfs_generic_commit_list(struct inode *inode, struct list_head *head,
int how, struct nfs_commit_info *cinfo);
void nfs_retry_commit(struct list_head *page_list,
struct pnfs_layout_segment *lseg,
- struct nfs_commit_info *cinfo);
+ struct nfs_commit_info *cinfo,
+ u32 ds_commit_idx);
void nfs_commitdata_release(struct nfs_commit_data *data);
void nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst,
struct nfs_commit_info *cinfo);
@@ -458,6 +475,7 @@ void nfs_init_cinfo(struct nfs_commit_info *cinfo,
struct nfs_direct_req *dreq);
int nfs_key_timeout_notify(struct file *filp, struct inode *inode);
bool nfs_ctx_key_to_expire(struct nfs_open_context *ctx);
+void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio);
#ifdef CONFIG_MIGRATION
extern int nfs_migrate_page(struct address_space *,
@@ -481,6 +499,7 @@ static inline void nfs_inode_dio_wait(struct inode *inode)
inode_dio_wait(inode);
}
extern ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq);
+extern void nfs_direct_set_resched_writes(struct nfs_direct_req *dreq);
/* nfs4proc.c */
extern void __nfs4_read_done_cb(struct nfs_pgio_header *);
@@ -494,6 +513,26 @@ extern int nfs41_walk_client_list(struct nfs_client *clp,
struct nfs_client **result,
struct rpc_cred *cred);
+static inline struct inode *nfs_igrab_and_active(struct inode *inode)
+{
+ inode = igrab(inode);
+ if (inode != NULL && !nfs_sb_active(inode->i_sb)) {
+ iput(inode);
+ inode = NULL;
+ }
+ return inode;
+}
+
+static inline void nfs_iput_and_deactive(struct inode *inode)
+{
+ if (inode != NULL) {
+ struct super_block *sb = inode->i_sb;
+
+ iput(inode);
+ nfs_sb_deactive(sb);
+ }
+}
+
/*
* Determine the device name as a string
*/
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index 5f61b83..b4e03ed 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -481,7 +481,8 @@ out_overflow:
* void;
* };
*/
-static int decode_attrstat(struct xdr_stream *xdr, struct nfs_fattr *result)
+static int decode_attrstat(struct xdr_stream *xdr, struct nfs_fattr *result,
+ __u32 *op_status)
{
enum nfs_stat status;
int error;
@@ -489,6 +490,8 @@ static int decode_attrstat(struct xdr_stream *xdr, struct nfs_fattr *result)
error = decode_stat(xdr, &status);
if (unlikely(error))
goto out;
+ if (op_status)
+ *op_status = status;
if (status != NFS_OK)
goto out_default;
error = decode_fattr(xdr, result);
@@ -808,7 +811,7 @@ out_default:
static int nfs2_xdr_dec_attrstat(struct rpc_rqst *req, struct xdr_stream *xdr,
struct nfs_fattr *result)
{
- return decode_attrstat(xdr, result);
+ return decode_attrstat(xdr, result, NULL);
}
static int nfs2_xdr_dec_diropres(struct rpc_rqst *req, struct xdr_stream *xdr,
@@ -865,6 +868,7 @@ static int nfs2_xdr_dec_readres(struct rpc_rqst *req, struct xdr_stream *xdr,
error = decode_stat(xdr, &status);
if (unlikely(error))
goto out;
+ result->op_status = status;
if (status != NFS_OK)
goto out_default;
error = decode_fattr(xdr, result->fattr);
@@ -882,7 +886,7 @@ static int nfs2_xdr_dec_writeres(struct rpc_rqst *req, struct xdr_stream *xdr,
{
/* All NFSv2 writes are "file sync" writes */
result->verf->committed = NFS_FILE_SYNC;
- return decode_attrstat(xdr, result->fattr);
+ return decode_attrstat(xdr, result->fattr, &result->op_status);
}
/**
diff --git a/fs/nfs/nfs3_fs.h b/fs/nfs/nfs3_fs.h
index 333ae40..e134d65 100644
--- a/fs/nfs/nfs3_fs.h
+++ b/fs/nfs/nfs3_fs.h
@@ -30,5 +30,7 @@ struct nfs_server *nfs3_create_server(struct nfs_mount_info *, struct nfs_subver
struct nfs_server *nfs3_clone_server(struct nfs_server *, struct nfs_fh *,
struct nfs_fattr *, rpc_authflavor_t);
+/* nfs3super.c */
+extern struct nfs_subversion nfs_v3;
#endif /* __LINUX_FS_NFS_NFS3_FS_H */
diff --git a/fs/nfs/nfs3client.c b/fs/nfs/nfs3client.c
index 8c1b437..9e9fa34 100644
--- a/fs/nfs/nfs3client.c
+++ b/fs/nfs/nfs3client.c
@@ -1,5 +1,6 @@
#include <linux/nfs_fs.h>
#include <linux/nfs_mount.h>
+#include <linux/sunrpc/addr.h>
#include "internal.h"
#include "nfs3_fs.h"
@@ -64,3 +65,43 @@ struct nfs_server *nfs3_clone_server(struct nfs_server *source,
nfs_init_server_aclclient(server);
return server;
}
+
+/*
+ * Set up a pNFS Data Server client over NFSv3.
+ *
+ * Return any existing nfs_client that matches server address,port,version
+ * and minorversion.
+ *
+ * For a new nfs_client, use a soft mount (default), a low retrans and a
+ * low timeout interval so that if a connection is lost, we retry through
+ * the MDS.
+ */
+struct nfs_client *nfs3_set_ds_client(struct nfs_client *mds_clp,
+ const struct sockaddr *ds_addr, int ds_addrlen,
+ int ds_proto, unsigned int ds_timeo, unsigned int ds_retrans,
+ rpc_authflavor_t au_flavor)
+{
+ struct nfs_client_initdata cl_init = {
+ .addr = ds_addr,
+ .addrlen = ds_addrlen,
+ .nfs_mod = &nfs_v3,
+ .proto = ds_proto,
+ .net = mds_clp->cl_net,
+ };
+ struct rpc_timeout ds_timeout;
+ struct nfs_client *clp;
+ char buf[INET6_ADDRSTRLEN + 1];
+
+ /* fake a hostname because lockd wants it */
+ if (rpc_ntop(ds_addr, buf, sizeof(buf)) <= 0)
+ return ERR_PTR(-EINVAL);
+ cl_init.hostname = buf;
+
+ /* Use the MDS nfs_client cl_ipaddr. */
+ nfs_init_timeout_values(&ds_timeout, ds_proto, ds_timeo, ds_retrans);
+ clp = nfs_get_client(&cl_init, &ds_timeout, mds_clp->cl_ipaddr,
+ au_flavor);
+
+ return clp;
+}
+EXPORT_SYMBOL_GPL(nfs3_set_ds_client);
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 524f9f8..78e557c 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -800,6 +800,9 @@ static int nfs3_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
{
struct inode *inode = hdr->inode;
+ if (hdr->pgio_done_cb != NULL)
+ return hdr->pgio_done_cb(task, hdr);
+
if (nfs3_async_handle_jukebox(task, inode))
return -EAGAIN;
@@ -825,6 +828,9 @@ static int nfs3_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
{
struct inode *inode = hdr->inode;
+ if (hdr->pgio_done_cb != NULL)
+ return hdr->pgio_done_cb(task, hdr);
+
if (nfs3_async_handle_jukebox(task, inode))
return -EAGAIN;
if (task->tk_status >= 0)
@@ -845,6 +851,9 @@ static void nfs3_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commi
static int nfs3_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
{
+ if (data->commit_done_cb != NULL)
+ return data->commit_done_cb(task, data);
+
if (nfs3_async_handle_jukebox(task, data->inode))
return -EAGAIN;
nfs_refresh_inode(data->inode, data->res.fattr);
diff --git a/fs/nfs/nfs3super.c b/fs/nfs/nfs3super.c
index 6af29c2..5c4394e 100644
--- a/fs/nfs/nfs3super.c
+++ b/fs/nfs/nfs3super.c
@@ -7,7 +7,7 @@
#include "nfs3_fs.h"
#include "nfs.h"
-static struct nfs_subversion nfs_v3 = {
+struct nfs_subversion nfs_v3 = {
.owner = THIS_MODULE,
.nfs_fs = &nfs_fs_type,
.rpc_vers = &nfs_version3,
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index 8f4cbe7..2a932fd 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -1636,6 +1636,7 @@ static int nfs3_xdr_dec_read3res(struct rpc_rqst *req, struct xdr_stream *xdr,
error = decode_post_op_attr(xdr, result->fattr);
if (unlikely(error))
goto out;
+ result->op_status = status;
if (status != NFS3_OK)
goto out_status;
error = decode_read3resok(xdr, result);
@@ -1708,6 +1709,7 @@ static int nfs3_xdr_dec_write3res(struct rpc_rqst *req, struct xdr_stream *xdr,
error = decode_wcc_data(xdr, result->fattr);
if (unlikely(error))
goto out;
+ result->op_status = status;
if (status != NFS3_OK)
goto out_status;
error = decode_write3resok(xdr, result);
@@ -2323,6 +2325,7 @@ static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req,
error = decode_wcc_data(xdr, result->fattr);
if (unlikely(error))
goto out;
+ result->op_status = status;
if (status != NFS3_OK)
goto out_status;
error = decode_writeverf3(xdr, &result->verf->verifier);
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index a081787..fdef424 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -44,6 +44,7 @@ enum nfs4_client_state {
#define NFS4_RENEW_TIMEOUT 0x01
#define NFS4_RENEW_DELEGATION_CB 0x02
+struct nfs_seqid_counter;
struct nfs4_minor_version_ops {
u32 minor_version;
unsigned init_caps;
@@ -56,6 +57,8 @@ struct nfs4_minor_version_ops {
struct nfs_fsinfo *);
void (*free_lock_state)(struct nfs_server *,
struct nfs4_lock_state *);
+ struct nfs_seqid *
+ (*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
const struct rpc_call_ops *call_sync_ops;
const struct nfs4_state_recovery_ops *reboot_recovery_ops;
const struct nfs4_state_recovery_ops *nograce_recovery_ops;
@@ -443,6 +446,12 @@ extern void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid);
extern void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid);
extern void nfs_release_seqid(struct nfs_seqid *seqid);
extern void nfs_free_seqid(struct nfs_seqid *seqid);
+extern int nfs40_setup_sequence(struct nfs4_slot_table *tbl,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ struct rpc_task *task);
+extern int nfs4_sequence_done(struct rpc_task *task,
+ struct nfs4_sequence_res *res);
extern void nfs4_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp);
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 953daa4..8646af9 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -639,7 +639,7 @@ int nfs41_walk_client_list(struct nfs_client *new,
prev = pos;
status = nfs_wait_client_init_complete(pos);
- if (status == 0) {
+ if (pos->cl_cons_state == NFS_CS_SESSION_INITING) {
nfs4_schedule_lease_recovery(pos);
status = nfs4_wait_clnt_recover(pos);
}
@@ -849,14 +849,15 @@ error:
*/
struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp,
const struct sockaddr *ds_addr, int ds_addrlen,
- int ds_proto, unsigned int ds_timeo, unsigned int ds_retrans)
+ int ds_proto, unsigned int ds_timeo, unsigned int ds_retrans,
+ u32 minor_version, rpc_authflavor_t au_flavor)
{
struct nfs_client_initdata cl_init = {
.addr = ds_addr,
.addrlen = ds_addrlen,
.nfs_mod = &nfs_v4,
.proto = ds_proto,
- .minorversion = mds_clp->cl_minorversion,
+ .minorversion = minor_version,
.net = mds_clp->cl_net,
};
struct rpc_timeout ds_timeout;
@@ -874,7 +875,7 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp,
*/
nfs_init_timeout_values(&ds_timeout, ds_proto, ds_timeo, ds_retrans);
clp = nfs_get_client(&cl_init, &ds_timeout, mds_clp->cl_ipaddr,
- mds_clp->cl_rpcclient->cl_auth->au_flavor);
+ au_flavor);
dprintk("<-- %s %p\n", __func__, clp);
return clp;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index c347705..2e7c9f7 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -495,12 +495,11 @@ static void nfs4_set_sequence_privileged(struct nfs4_sequence_args *args)
args->sa_privileged = 1;
}
-static int nfs40_setup_sequence(const struct nfs_server *server,
- struct nfs4_sequence_args *args,
- struct nfs4_sequence_res *res,
- struct rpc_task *task)
+int nfs40_setup_sequence(struct nfs4_slot_table *tbl,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ struct rpc_task *task)
{
- struct nfs4_slot_table *tbl = server->nfs_client->cl_slot_tbl;
struct nfs4_slot *slot;
/* slot already allocated? */
@@ -535,6 +534,7 @@ out_sleep:
spin_unlock(&tbl->slot_tbl_lock);
return -EAGAIN;
}
+EXPORT_SYMBOL_GPL(nfs40_setup_sequence);
static int nfs40_sequence_done(struct rpc_task *task,
struct nfs4_sequence_res *res)
@@ -694,8 +694,7 @@ out_retry:
}
EXPORT_SYMBOL_GPL(nfs41_sequence_done);
-static int nfs4_sequence_done(struct rpc_task *task,
- struct nfs4_sequence_res *res)
+int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
{
if (res->sr_slot == NULL)
return 1;
@@ -703,6 +702,7 @@ static int nfs4_sequence_done(struct rpc_task *task,
return nfs40_sequence_done(task, res);
return nfs41_sequence_done(task, res);
}
+EXPORT_SYMBOL_GPL(nfs4_sequence_done);
int nfs41_setup_sequence(struct nfs4_session *session,
struct nfs4_sequence_args *args,
@@ -777,7 +777,8 @@ static int nfs4_setup_sequence(const struct nfs_server *server,
int ret = 0;
if (!session)
- return nfs40_setup_sequence(server, args, res, task);
+ return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl,
+ args, res, task);
dprintk("--> %s clp %p session %p sr_slot %u\n",
__func__, session->clp, session, res->sr_slot ?
@@ -818,14 +819,16 @@ static int nfs4_setup_sequence(const struct nfs_server *server,
struct nfs4_sequence_res *res,
struct rpc_task *task)
{
- return nfs40_setup_sequence(server, args, res, task);
+ return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl,
+ args, res, task);
}
-static int nfs4_sequence_done(struct rpc_task *task,
- struct nfs4_sequence_res *res)
+int nfs4_sequence_done(struct rpc_task *task,
+ struct nfs4_sequence_res *res)
{
return nfs40_sequence_done(task, res);
}
+EXPORT_SYMBOL_GPL(nfs4_sequence_done);
#endif /* !CONFIG_NFS_V4_1 */
@@ -937,6 +940,31 @@ static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server,
return true;
}
+static u32
+nfs4_map_atomic_open_share(struct nfs_server *server,
+ fmode_t fmode, int openflags)
+{
+ u32 res = 0;
+
+ switch (fmode & (FMODE_READ | FMODE_WRITE)) {
+ case FMODE_READ:
+ res = NFS4_SHARE_ACCESS_READ;
+ break;
+ case FMODE_WRITE:
+ res = NFS4_SHARE_ACCESS_WRITE;
+ break;
+ case FMODE_READ|FMODE_WRITE:
+ res = NFS4_SHARE_ACCESS_BOTH;
+ }
+ if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
+ goto out;
+ /* Want no delegation if we're using O_DIRECT */
+ if (openflags & O_DIRECT)
+ res |= NFS4_SHARE_WANT_NO_DELEG;
+out:
+ return res;
+}
+
static enum open_claim_type4
nfs4_map_atomic_open_claim(struct nfs_server *server,
enum open_claim_type4 claim)
@@ -977,6 +1005,7 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
struct dentry *parent = dget_parent(dentry);
struct inode *dir = parent->d_inode;
struct nfs_server *server = NFS_SERVER(dir);
+ struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
struct nfs4_opendata *p;
p = kzalloc(sizeof(*p), gfp_mask);
@@ -987,8 +1016,9 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
if (IS_ERR(p->f_label))
goto err_free_p;
- p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid, gfp_mask);
- if (p->o_arg.seqid == NULL)
+ alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
+ p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask);
+ if (IS_ERR(p->o_arg.seqid))
goto err_free_label;
nfs_sb_active(dentry->d_sb);
p->dentry = dget(dentry);
@@ -997,6 +1027,8 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
atomic_inc(&sp->so_count);
p->o_arg.open_flags = flags;
p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
+ p->o_arg.share_access = nfs4_map_atomic_open_share(server,
+ fmode, flags);
/* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS
* will return permission denied for all bits until close */
if (!(flags & O_EXCL)) {
@@ -1167,6 +1199,16 @@ static bool nfs_need_update_open_stateid(struct nfs4_state *state,
return false;
}
+static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
+{
+ if (state->n_wronly)
+ set_bit(NFS_O_WRONLY_STATE, &state->flags);
+ if (state->n_rdonly)
+ set_bit(NFS_O_RDONLY_STATE, &state->flags);
+ if (state->n_rdwr)
+ set_bit(NFS_O_RDWR_STATE, &state->flags);
+}
+
static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
nfs4_stateid *stateid, fmode_t fmode)
{
@@ -1185,8 +1227,12 @@ static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
}
if (stateid == NULL)
return;
- if (!nfs_need_update_open_stateid(state, stateid))
+ /* Handle races with OPEN */
+ if (!nfs4_stateid_match_other(stateid, &state->open_stateid) ||
+ !nfs4_stateid_is_newer(stateid, &state->open_stateid)) {
+ nfs_resync_open_stateid_locked(state);
return;
+ }
if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
nfs4_stateid_copy(&state->stateid, stateid);
nfs4_stateid_copy(&state->open_stateid, stateid);
@@ -1281,6 +1327,23 @@ no_delegation:
return ret;
}
+static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp,
+ const nfs4_stateid *stateid)
+{
+ struct nfs4_state *state = lsp->ls_state;
+ bool ret = false;
+
+ spin_lock(&state->state_lock);
+ if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid))
+ goto out_noupdate;
+ if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid))
+ goto out_noupdate;
+ nfs4_stateid_copy(&lsp->ls_stateid, stateid);
+ ret = true;
+out_noupdate:
+ spin_unlock(&state->state_lock);
+ return ret;
+}
static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
{
@@ -1679,8 +1742,8 @@ static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
{
struct nfs4_opendata *data = calldata;
- nfs40_setup_sequence(data->o_arg.server, &data->c_arg.seq_args,
- &data->c_res.seq_res, task);
+ nfs40_setup_sequence(data->o_arg.server->nfs_client->cl_slot_tbl,
+ &data->c_arg.seq_args, &data->c_res.seq_res, task);
}
static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
@@ -2587,6 +2650,11 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
case -NFS4ERR_OLD_STATEID:
case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_EXPIRED:
+ if (!nfs4_stateid_match(&calldata->arg.stateid,
+ &state->stateid)) {
+ rpc_restart_call_prepare(task);
+ goto out_release;
+ }
if (calldata->arg.fmode == 0)
break;
default:
@@ -2619,6 +2687,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags);
is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags);
is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags);
+ nfs4_stateid_copy(&calldata->arg.stateid, &state->stateid);
/* Calculate the change in open mode */
calldata->arg.fmode = 0;
if (state->n_rdwr == 0) {
@@ -2653,6 +2722,9 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
goto out_wait;
}
}
+ calldata->arg.share_access =
+ nfs4_map_atomic_open_share(NFS_SERVER(inode),
+ calldata->arg.fmode, 0);
nfs_fattr_init(calldata->res.fattr);
calldata->timestamp = jiffies;
@@ -2675,45 +2747,10 @@ static const struct rpc_call_ops nfs4_close_ops = {
.rpc_release = nfs4_free_closedata,
};
-static bool nfs4_state_has_opener(struct nfs4_state *state)
-{
- /* first check existing openers */
- if (test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0 &&
- state->n_rdonly != 0)
- return true;
-
- if (test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0 &&
- state->n_wronly != 0)
- return true;
-
- if (test_bit(NFS_O_RDWR_STATE, &state->flags) != 0 &&
- state->n_rdwr != 0)
- return true;
-
- return false;
-}
-
static bool nfs4_roc(struct inode *inode)
{
- struct nfs_inode *nfsi = NFS_I(inode);
- struct nfs_open_context *ctx;
- struct nfs4_state *state;
-
- spin_lock(&inode->i_lock);
- list_for_each_entry(ctx, &nfsi->open_files, list) {
- state = ctx->state;
- if (state == NULL)
- continue;
- if (nfs4_state_has_opener(state)) {
- spin_unlock(&inode->i_lock);
- return false;
- }
- }
- spin_unlock(&inode->i_lock);
-
- if (nfs4_check_delegation(inode, FMODE_READ))
+ if (!nfs_have_layout(inode))
return false;
-
return pnfs_roc(inode);
}
@@ -2731,6 +2768,7 @@ static bool nfs4_roc(struct inode *inode)
int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
{
struct nfs_server *server = NFS_SERVER(state->inode);
+ struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
struct nfs4_closedata *calldata;
struct nfs4_state_owner *sp = state->owner;
struct rpc_task *task;
@@ -2757,10 +2795,10 @@ int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
calldata->inode = state->inode;
calldata->state = state;
calldata->arg.fh = NFS_FH(state->inode);
- calldata->arg.stateid = &state->open_stateid;
/* Serialization for the sequence id */
- calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid, gfp_mask);
- if (calldata->arg.seqid == NULL)
+ alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
+ calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask);
+ if (IS_ERR(calldata->arg.seqid))
goto out_free_calldata;
calldata->arg.fmode = 0;
calldata->arg.bitmask = server->cache_consistency_bitmask;
@@ -5137,9 +5175,13 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
static void nfs4_delegreturn_release(void *calldata)
{
struct nfs4_delegreturndata *data = calldata;
+ struct inode *inode = data->inode;
- if (data->roc)
- pnfs_roc_release(data->inode);
+ if (inode) {
+ if (data->roc)
+ pnfs_roc_release(inode);
+ nfs_iput_and_deactive(inode);
+ }
kfree(calldata);
}
@@ -5196,9 +5238,9 @@ static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, co
nfs_fattr_init(data->res.fattr);
data->timestamp = jiffies;
data->rpc_status = 0;
- data->inode = inode;
- data->roc = list_empty(&NFS_I(inode)->open_files) ?
- pnfs_roc(inode) : false;
+ data->inode = nfs_igrab_and_active(inode);
+ if (data->inode)
+ data->roc = nfs4_roc(inode);
task_setup_data.callback_data = data;
msg.rpc_argp = &data->args;
@@ -5353,7 +5395,6 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
p->arg.fl = &p->fl;
p->arg.seqid = seqid;
p->res.seqid = seqid;
- p->arg.stateid = &lsp->ls_stateid;
p->lsp = lsp;
atomic_inc(&lsp->ls_count);
/* Ensure we don't close file until we're done freeing locks! */
@@ -5380,14 +5421,18 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
return;
switch (task->tk_status) {
case 0:
- nfs4_stateid_copy(&calldata->lsp->ls_stateid,
- &calldata->res.stateid);
renew_lease(calldata->server, calldata->timestamp);
- break;
+ do_vfs_lock(calldata->fl.fl_file, &calldata->fl);
+ if (nfs4_update_lock_stateid(calldata->lsp,
+ &calldata->res.stateid))
+ break;
case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_OLD_STATEID:
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_EXPIRED:
+ if (!nfs4_stateid_match(&calldata->arg.stateid,
+ &calldata->lsp->ls_stateid))
+ rpc_restart_call_prepare(task);
break;
default:
if (nfs4_async_handle_error(task, calldata->server,
@@ -5403,6 +5448,7 @@ static void nfs4_locku_prepare(struct rpc_task *task, void *data)
if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
goto out_wait;
+ nfs4_stateid_copy(&calldata->arg.stateid, &calldata->lsp->ls_stateid);
if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) {
/* Note: exit _without_ running nfs4_locku_done */
goto out_no_action;
@@ -5473,6 +5519,7 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *
struct nfs_seqid *seqid;
struct nfs4_lock_state *lsp;
struct rpc_task *task;
+ struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
int status = 0;
unsigned char fl_flags = request->fl_flags;
@@ -5496,9 +5543,10 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *
lsp = request->fl_u.nfs4_fl.owner;
if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0)
goto out;
- seqid = nfs_alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
+ alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid;
+ seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
status = -ENOMEM;
- if (seqid == NULL)
+ if (IS_ERR(seqid))
goto out;
task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
status = PTR_ERR(task);
@@ -5531,6 +5579,7 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
struct nfs4_lockdata *p;
struct inode *inode = lsp->ls_state->inode;
struct nfs_server *server = NFS_SERVER(inode);
+ struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
p = kzalloc(sizeof(*p), gfp_mask);
if (p == NULL)
@@ -5539,12 +5588,12 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
p->arg.fh = NFS_FH(inode);
p->arg.fl = &p->fl;
p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
- if (p->arg.open_seqid == NULL)
+ if (IS_ERR(p->arg.open_seqid))
goto out_free;
- p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid, gfp_mask);
- if (p->arg.lock_seqid == NULL)
+ alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
+ p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask);
+ if (IS_ERR(p->arg.lock_seqid))
goto out_free_seqid;
- p->arg.lock_stateid = &lsp->ls_stateid;
p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
p->arg.lock_owner.s_dev = server->s_dev;
@@ -5571,15 +5620,19 @@ static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
goto out_wait;
/* Do we need to do an open_to_lock_owner? */
- if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)) {
+ if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) {
if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) {
goto out_release_lock_seqid;
}
- data->arg.open_stateid = &state->open_stateid;
+ nfs4_stateid_copy(&data->arg.open_stateid,
+ &state->open_stateid);
data->arg.new_lock_owner = 1;
data->res.open_seqid = data->arg.open_seqid;
- } else
+ } else {
data->arg.new_lock_owner = 0;
+ nfs4_stateid_copy(&data->arg.lock_stateid,
+ &data->lsp->ls_stateid);
+ }
if (!nfs4_valid_open_stateid(state)) {
data->rpc_status = -EBADF;
task->tk_action = NULL;
@@ -5603,6 +5656,7 @@ out_wait:
static void nfs4_lock_done(struct rpc_task *task, void *calldata)
{
struct nfs4_lockdata *data = calldata;
+ struct nfs4_lock_state *lsp = data->lsp;
dprintk("%s: begin!\n", __func__);
@@ -5610,18 +5664,36 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
return;
data->rpc_status = task->tk_status;
- if (data->arg.new_lock_owner != 0) {
- if (data->rpc_status == 0)
- nfs_confirm_seqid(&data->lsp->ls_seqid, 0);
- else
- goto out;
- }
- if (data->rpc_status == 0) {
- nfs4_stateid_copy(&data->lsp->ls_stateid, &data->res.stateid);
- set_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags);
- renew_lease(NFS_SERVER(data->ctx->dentry->d_inode), data->timestamp);
+ switch (task->tk_status) {
+ case 0:
+ renew_lease(NFS_SERVER(data->ctx->dentry->d_inode),
+ data->timestamp);
+ if (data->arg.new_lock) {
+ data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
+ if (do_vfs_lock(data->fl.fl_file, &data->fl) < 0) {
+ rpc_restart_call_prepare(task);
+ break;
+ }
+ }
+ if (data->arg.new_lock_owner != 0) {
+ nfs_confirm_seqid(&lsp->ls_seqid, 0);
+ nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid);
+ set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
+ } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid))
+ rpc_restart_call_prepare(task);
+ break;
+ case -NFS4ERR_BAD_STATEID:
+ case -NFS4ERR_OLD_STATEID:
+ case -NFS4ERR_STALE_STATEID:
+ case -NFS4ERR_EXPIRED:
+ if (data->arg.new_lock_owner != 0) {
+ if (!nfs4_stateid_match(&data->arg.open_stateid,
+ &lsp->ls_state->open_stateid))
+ rpc_restart_call_prepare(task);
+ } else if (!nfs4_stateid_match(&data->arg.lock_stateid,
+ &lsp->ls_stateid))
+ rpc_restart_call_prepare(task);
}
-out:
dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
}
@@ -5702,7 +5774,8 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f
if (recovery_type == NFS_LOCK_RECLAIM)
data->arg.reclaim = NFS_LOCK_RECLAIM;
nfs4_set_sequence_privileged(&data->arg.seq_args);
- }
+ } else
+ data->arg.new_lock = 1;
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
@@ -5826,10 +5899,8 @@ static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *reques
static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
{
- struct nfs4_state_owner *sp = state->owner;
struct nfs_inode *nfsi = NFS_I(state->inode);
unsigned char fl_flags = request->fl_flags;
- unsigned int seq;
int status = -ENOLCK;
if ((fl_flags & FL_POSIX) &&
@@ -5849,25 +5920,11 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
/* ...but avoid races with delegation recall... */
request->fl_flags = fl_flags & ~FL_SLEEP;
status = do_vfs_lock(request->fl_file, request);
- goto out_unlock;
- }
- seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
- up_read(&nfsi->rwsem);
- status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
- if (status != 0)
+ up_read(&nfsi->rwsem);
goto out;
- down_read(&nfsi->rwsem);
- if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) {
- status = -NFS4ERR_DELAY;
- goto out_unlock;
}
- /* Note: we always want to sleep here! */
- request->fl_flags = fl_flags | FL_SLEEP;
- if (do_vfs_lock(request->fl_file, request) < 0)
- printk(KERN_WARNING "NFS: %s: VFS is out of sync with lock "
- "manager!\n", __func__);
-out_unlock:
up_read(&nfsi->rwsem);
+ status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
out:
request->fl_flags = fl_flags;
return status;
@@ -5974,8 +6031,8 @@ static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata
{
struct nfs_release_lockowner_data *data = calldata;
struct nfs_server *server = data->server;
- nfs40_setup_sequence(server, &data->args.seq_args,
- &data->res.seq_res, task);
+ nfs40_setup_sequence(server->nfs_client->cl_slot_tbl,
+ &data->args.seq_args, &data->res.seq_res, task);
data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
data->timestamp = jiffies;
}
@@ -7537,6 +7594,7 @@ nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
return;
if (pnfs_choose_layoutget_stateid(&lgp->args.stateid,
NFS_I(lgp->args.inode)->layout,
+ &lgp->args.range,
lgp->args.ctx->state)) {
rpc_exit(task, NFS4_OK);
}
@@ -7792,9 +7850,13 @@ static void nfs4_layoutreturn_release(void *calldata)
spin_lock(&lo->plh_inode->i_lock);
if (lrp->res.lrs_present)
pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
+ pnfs_clear_layoutreturn_waitbit(lo);
+ clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, &lo->plh_flags);
+ rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
lo->plh_block_lgets--;
spin_unlock(&lo->plh_inode->i_lock);
pnfs_put_layout_hdr(lrp->args.layout);
+ nfs_iput_and_deactive(lrp->inode);
kfree(calldata);
dprintk("<-- %s\n", __func__);
}
@@ -7805,7 +7867,7 @@ static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
.rpc_release = nfs4_layoutreturn_release,
};
-int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp)
+int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync)
{
struct rpc_task *task;
struct rpc_message msg = {
@@ -7820,14 +7882,23 @@ int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp)
.callback_ops = &nfs4_layoutreturn_call_ops,
.callback_data = lrp,
};
- int status;
+ int status = 0;
dprintk("--> %s\n", __func__);
+ if (!sync) {
+ lrp->inode = nfs_igrab_and_active(lrp->args.inode);
+ if (!lrp->inode) {
+ nfs4_layoutreturn_release(lrp);
+ return -EAGAIN;
+ }
+ task_setup_data.flags |= RPC_TASK_ASYNC;
+ }
nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1);
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
- status = task->tk_status;
+ if (sync)
+ status = task->tk_status;
trace_nfs4_layoutreturn(lrp->args.inode, status);
dprintk("<-- %s status=%d\n", __func__, status);
rpc_put_task(task);
@@ -7921,6 +7992,7 @@ static void nfs4_layoutcommit_release(void *calldata)
nfs_post_op_update_inode_force_wcc(data->args.inode,
data->res.fattr);
put_rpccred(data->cred);
+ nfs_iput_and_deactive(data->inode);
kfree(data);
}
@@ -7945,7 +8017,6 @@ nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
.rpc_message = &msg,
.callback_ops = &nfs4_layoutcommit_ops,
.callback_data = data,
- .flags = RPC_TASK_ASYNC,
};
struct rpc_task *task;
int status = 0;
@@ -7956,18 +8027,21 @@ nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
data->args.lastbytewritten,
data->args.inode->i_ino);
+ if (!sync) {
+ data->inode = nfs_igrab_and_active(data->args.inode);
+ if (data->inode == NULL) {
+ nfs4_layoutcommit_release(data);
+ return -EAGAIN;
+ }
+ task_setup_data.flags = RPC_TASK_ASYNC;
+ }
nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
- if (sync == false)
- goto out;
- status = nfs4_wait_for_completion_rpc_task(task);
- if (status != 0)
- goto out;
- status = task->tk_status;
+ if (sync)
+ status = task->tk_status;
trace_nfs4_layoutcommit(data->args.inode, status);
-out:
dprintk("%s: status %d\n", __func__, status);
rpc_put_task(task);
return status;
@@ -8395,6 +8469,7 @@ static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
.match_stateid = nfs4_match_stateid,
.find_root_sec = nfs4_find_root_sec,
.free_lock_state = nfs4_release_lockowner,
+ .alloc_seqid = nfs_alloc_seqid,
.call_sync_ops = &nfs40_call_sync_ops,
.reboot_recovery_ops = &nfs40_reboot_recovery_ops,
.nograce_recovery_ops = &nfs40_nograce_recovery_ops,
@@ -8403,6 +8478,12 @@ static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
};
#if defined(CONFIG_NFS_V4_1)
+static struct nfs_seqid *
+nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2)
+{
+ return NULL;
+}
+
static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
.minor_version = 1,
.init_caps = NFS_CAP_READDIRPLUS
@@ -8416,6 +8497,7 @@ static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
.match_stateid = nfs41_match_stateid,
.find_root_sec = nfs41_find_root_sec,
.free_lock_state = nfs41_free_lock_state,
+ .alloc_seqid = nfs_alloc_no_seqid,
.call_sync_ops = &nfs41_call_sync_ops,
.reboot_recovery_ops = &nfs41_reboot_recovery_ops,
.nograce_recovery_ops = &nfs41_nograce_recovery_ops,
@@ -8442,6 +8524,7 @@ static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
.find_root_sec = nfs41_find_root_sec,
.free_lock_state = nfs41_free_lock_state,
.call_sync_ops = &nfs41_call_sync_ops,
+ .alloc_seqid = nfs_alloc_no_seqid,
.reboot_recovery_ops = &nfs41_reboot_recovery_ops,
.nograce_recovery_ops = &nfs41_nograce_recovery_ops,
.state_renewal_ops = &nfs41_state_renewal_ops,
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 5194933..5ad908e 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1003,11 +1003,11 @@ struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_m
struct nfs_seqid *new;
new = kmalloc(sizeof(*new), gfp_mask);
- if (new != NULL) {
- new->sequence = counter;
- INIT_LIST_HEAD(&new->list);
- new->task = NULL;
- }
+ if (new == NULL)
+ return ERR_PTR(-ENOMEM);
+ new->sequence = counter;
+ INIT_LIST_HEAD(&new->list);
+ new->task = NULL;
return new;
}
@@ -1015,7 +1015,7 @@ void nfs_release_seqid(struct nfs_seqid *seqid)
{
struct nfs_seqid_counter *sequence;
- if (list_empty(&seqid->list))
+ if (seqid == NULL || list_empty(&seqid->list))
return;
sequence = seqid->sequence;
spin_lock(&sequence->lock);
@@ -1071,13 +1071,15 @@ static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
{
- struct nfs4_state_owner *sp = container_of(seqid->sequence,
- struct nfs4_state_owner, so_seqid);
- struct nfs_server *server = sp->so_server;
+ struct nfs4_state_owner *sp;
+
+ if (seqid == NULL)
+ return;
+ sp = container_of(seqid->sequence, struct nfs4_state_owner, so_seqid);
if (status == -NFS4ERR_BAD_SEQID)
nfs4_drop_state_owner(sp);
- if (!nfs4_has_session(server->nfs_client))
+ if (!nfs4_has_session(sp->so_server->nfs_client))
nfs_increment_seqid(status, seqid);
}
@@ -1088,14 +1090,18 @@ void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
*/
void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid)
{
- nfs_increment_seqid(status, seqid);
+ if (seqid != NULL)
+ nfs_increment_seqid(status, seqid);
}
int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
{
- struct nfs_seqid_counter *sequence = seqid->sequence;
+ struct nfs_seqid_counter *sequence;
int status = 0;
+ if (seqid == NULL)
+ goto out;
+ sequence = seqid->sequence;
spin_lock(&sequence->lock);
seqid->task = task;
if (list_empty(&seqid->list))
@@ -1106,6 +1112,7 @@ int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
status = -EAGAIN;
unlock:
spin_unlock(&sequence->lock);
+out:
return status;
}
@@ -1366,49 +1373,55 @@ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_
struct nfs_inode *nfsi = NFS_I(inode);
struct file_lock *fl;
int status = 0;
+ struct file_lock_context *flctx = inode->i_flctx;
+ struct list_head *list;
- if (inode->i_flock == NULL)
+ if (flctx == NULL)
return 0;
+ list = &flctx->flc_posix;
+
/* Guard against delegation returns and new lock/unlock calls */
down_write(&nfsi->rwsem);
- /* Protect inode->i_flock using the BKL */
- spin_lock(&inode->i_lock);
- for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
- if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
- continue;
+ spin_lock(&flctx->flc_lock);
+restart:
+ list_for_each_entry(fl, list, fl_list) {
if (nfs_file_open_context(fl->fl_file)->state != state)
continue;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&flctx->flc_lock);
status = ops->recover_lock(state, fl);
switch (status) {
- case 0:
- break;
- case -ESTALE:
- case -NFS4ERR_ADMIN_REVOKED:
- case -NFS4ERR_STALE_STATEID:
- case -NFS4ERR_BAD_STATEID:
- case -NFS4ERR_EXPIRED:
- case -NFS4ERR_NO_GRACE:
- case -NFS4ERR_STALE_CLIENTID:
- case -NFS4ERR_BADSESSION:
- case -NFS4ERR_BADSLOT:
- case -NFS4ERR_BAD_HIGH_SLOT:
- case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
- goto out;
- default:
- printk(KERN_ERR "NFS: %s: unhandled error %d\n",
- __func__, status);
- case -ENOMEM:
- case -NFS4ERR_DENIED:
- case -NFS4ERR_RECLAIM_BAD:
- case -NFS4ERR_RECLAIM_CONFLICT:
- /* kill_proc(fl->fl_pid, SIGLOST, 1); */
- status = 0;
+ case 0:
+ break;
+ case -ESTALE:
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_STALE_STATEID:
+ case -NFS4ERR_BAD_STATEID:
+ case -NFS4ERR_EXPIRED:
+ case -NFS4ERR_NO_GRACE:
+ case -NFS4ERR_STALE_CLIENTID:
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_BADSLOT:
+ case -NFS4ERR_BAD_HIGH_SLOT:
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ goto out;
+ default:
+ pr_err("NFS: %s: unhandled error %d\n",
+ __func__, status);
+ case -ENOMEM:
+ case -NFS4ERR_DENIED:
+ case -NFS4ERR_RECLAIM_BAD:
+ case -NFS4ERR_RECLAIM_CONFLICT:
+ /* kill_proc(fl->fl_pid, SIGLOST, 1); */
+ status = 0;
}
- spin_lock(&inode->i_lock);
+ spin_lock(&flctx->flc_lock);
}
- spin_unlock(&inode->i_lock);
+ if (list == &flctx->flc_posix) {
+ list = &flctx->flc_flock;
+ goto restart;
+ }
+ spin_unlock(&flctx->flc_lock);
out:
up_write(&nfsi->rwsem);
return status;
diff --git a/fs/nfs/nfs4super.c b/fs/nfs/nfs4super.c
index ab30a3a..75090fe 100644
--- a/fs/nfs/nfs4super.c
+++ b/fs/nfs/nfs4super.c
@@ -345,6 +345,9 @@ out:
static void __exit exit_nfs_v4(void)
{
+ /* Not called in the _init(), conditionally loaded */
+ nfs4_pnfs_v3_ds_connect_unload();
+
unregister_nfs_version(&nfs_v4);
nfs4_unregister_sysctl();
nfs_idmap_quit();
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index cb4376b..e23a0a6 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -946,7 +946,10 @@ static void encode_uint64(struct xdr_stream *xdr, u64 n)
static void encode_nfs4_seqid(struct xdr_stream *xdr,
const struct nfs_seqid *seqid)
{
- encode_uint32(xdr, seqid->sequence->counter);
+ if (seqid != NULL)
+ encode_uint32(xdr, seqid->sequence->counter);
+ else
+ encode_uint32(xdr, 0);
}
static void encode_compound_hdr(struct xdr_stream *xdr,
@@ -1125,7 +1128,7 @@ static void encode_close(struct xdr_stream *xdr, const struct nfs_closeargs *arg
{
encode_op_hdr(xdr, OP_CLOSE, decode_close_maxsz, hdr);
encode_nfs4_seqid(xdr, arg->seqid);
- encode_nfs4_stateid(xdr, arg->stateid);
+ encode_nfs4_stateid(xdr, &arg->stateid);
}
static void encode_commit(struct xdr_stream *xdr, const struct nfs_commitargs *args, struct compound_hdr *hdr)
@@ -1301,12 +1304,12 @@ static void encode_lock(struct xdr_stream *xdr, const struct nfs_lock_args *args
*p = cpu_to_be32(args->new_lock_owner);
if (args->new_lock_owner){
encode_nfs4_seqid(xdr, args->open_seqid);
- encode_nfs4_stateid(xdr, args->open_stateid);
+ encode_nfs4_stateid(xdr, &args->open_stateid);
encode_nfs4_seqid(xdr, args->lock_seqid);
encode_lockowner(xdr, &args->lock_owner);
}
else {
- encode_nfs4_stateid(xdr, args->lock_stateid);
+ encode_nfs4_stateid(xdr, &args->lock_stateid);
encode_nfs4_seqid(xdr, args->lock_seqid);
}
}
@@ -1330,7 +1333,7 @@ static void encode_locku(struct xdr_stream *xdr, const struct nfs_locku_args *ar
encode_op_hdr(xdr, OP_LOCKU, decode_locku_maxsz, hdr);
encode_uint32(xdr, nfs4_lock_type(args->fl, 0));
encode_nfs4_seqid(xdr, args->seqid);
- encode_nfs4_stateid(xdr, args->stateid);
+ encode_nfs4_stateid(xdr, &args->stateid);
p = reserve_space(xdr, 16);
p = xdr_encode_hyper(p, args->fl->fl_start);
xdr_encode_hyper(p, nfs4_lock_length(args->fl));
@@ -1348,24 +1351,12 @@ static void encode_lookup(struct xdr_stream *xdr, const struct qstr *name, struc
encode_string(xdr, name->len, name->name);
}
-static void encode_share_access(struct xdr_stream *xdr, fmode_t fmode)
+static void encode_share_access(struct xdr_stream *xdr, u32 share_access)
{
__be32 *p;
p = reserve_space(xdr, 8);
- switch (fmode & (FMODE_READ|FMODE_WRITE)) {
- case FMODE_READ:
- *p++ = cpu_to_be32(NFS4_SHARE_ACCESS_READ);
- break;
- case FMODE_WRITE:
- *p++ = cpu_to_be32(NFS4_SHARE_ACCESS_WRITE);
- break;
- case FMODE_READ|FMODE_WRITE:
- *p++ = cpu_to_be32(NFS4_SHARE_ACCESS_BOTH);
- break;
- default:
- *p++ = cpu_to_be32(0);
- }
+ *p++ = cpu_to_be32(share_access);
*p = cpu_to_be32(0); /* for linux, share_deny = 0 always */
}
@@ -1377,7 +1368,7 @@ static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_opena
* owner 4 = 32
*/
encode_nfs4_seqid(xdr, arg->seqid);
- encode_share_access(xdr, arg->fmode);
+ encode_share_access(xdr, arg->share_access);
p = reserve_space(xdr, 36);
p = xdr_encode_hyper(p, arg->clientid);
*p++ = cpu_to_be32(24);
@@ -1530,9 +1521,9 @@ static void encode_open_confirm(struct xdr_stream *xdr, const struct nfs_open_co
static void encode_open_downgrade(struct xdr_stream *xdr, const struct nfs_closeargs *arg, struct compound_hdr *hdr)
{
encode_op_hdr(xdr, OP_OPEN_DOWNGRADE, decode_open_downgrade_maxsz, hdr);
- encode_nfs4_stateid(xdr, arg->stateid);
+ encode_nfs4_stateid(xdr, &arg->stateid);
encode_nfs4_seqid(xdr, arg->seqid);
- encode_share_access(xdr, arg->fmode);
+ encode_share_access(xdr, arg->share_access);
}
static void
@@ -1801,9 +1792,8 @@ static void encode_create_session(struct xdr_stream *xdr,
struct compound_hdr *hdr)
{
__be32 *p;
- char machine_name[NFS4_MAX_MACHINE_NAME_LEN];
- uint32_t len;
struct nfs_client *clp = args->client;
+ struct rpc_clnt *clnt = clp->cl_rpcclient;
struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
u32 max_resp_sz_cached;
@@ -1814,11 +1804,8 @@ static void encode_create_session(struct xdr_stream *xdr,
max_resp_sz_cached = (NFS4_dec_open_sz + RPC_REPHDRSIZE +
RPC_MAX_AUTH_SIZE + 2) * XDR_UNIT;
- len = scnprintf(machine_name, sizeof(machine_name), "%s",
- clp->cl_ipaddr);
-
encode_op_hdr(xdr, OP_CREATE_SESSION, decode_create_session_maxsz, hdr);
- p = reserve_space(xdr, 16 + 2*28 + 20 + len + 12);
+ p = reserve_space(xdr, 16 + 2*28 + 20 + clnt->cl_nodelen + 12);
p = xdr_encode_hyper(p, clp->cl_clientid);
*p++ = cpu_to_be32(clp->cl_seqid); /*Sequence id */
*p++ = cpu_to_be32(args->flags); /*flags */
@@ -1847,7 +1834,7 @@ static void encode_create_session(struct xdr_stream *xdr,
/* authsys_parms rfc1831 */
*p++ = cpu_to_be32(nn->boot_time.tv_nsec); /* stamp */
- p = xdr_encode_opaque(p, machine_name, len);
+ p = xdr_encode_array(p, clnt->cl_nodename, clnt->cl_nodelen);
*p++ = cpu_to_be32(0); /* UID */
*p++ = cpu_to_be32(0); /* GID */
*p = cpu_to_be32(0); /* No more gids */
@@ -2012,11 +1999,11 @@ encode_layoutreturn(struct xdr_stream *xdr,
p = reserve_space(xdr, 16);
*p++ = cpu_to_be32(0); /* reclaim. always 0 for now */
*p++ = cpu_to_be32(args->layout_type);
- *p++ = cpu_to_be32(IOMODE_ANY);
+ *p++ = cpu_to_be32(args->range.iomode);
*p = cpu_to_be32(RETURN_FILE);
p = reserve_space(xdr, 16);
- p = xdr_encode_hyper(p, 0);
- p = xdr_encode_hyper(p, NFS4_MAX_UINT64);
+ p = xdr_encode_hyper(p, args->range.offset);
+ p = xdr_encode_hyper(p, args->range.length);
spin_lock(&args->inode->i_lock);
encode_nfs4_stateid(xdr, &args->stateid);
spin_unlock(&args->inode->i_lock);
@@ -4936,20 +4923,13 @@ out_overflow:
return -EIO;
}
-static int decode_delegation(struct xdr_stream *xdr, struct nfs_openres *res)
+static int decode_rw_delegation(struct xdr_stream *xdr,
+ uint32_t delegation_type,
+ struct nfs_openres *res)
{
__be32 *p;
- uint32_t delegation_type;
int status;
- p = xdr_inline_decode(xdr, 4);
- if (unlikely(!p))
- goto out_overflow;
- delegation_type = be32_to_cpup(p);
- if (delegation_type == NFS4_OPEN_DELEGATE_NONE) {
- res->delegation_type = 0;
- return 0;
- }
status = decode_stateid(xdr, &res->delegation);
if (unlikely(status))
return status;
@@ -4973,6 +4953,52 @@ out_overflow:
return -EIO;
}
+static int decode_no_delegation(struct xdr_stream *xdr, struct nfs_openres *res)
+{
+ __be32 *p;
+ uint32_t why_no_delegation;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ why_no_delegation = be32_to_cpup(p);
+ switch (why_no_delegation) {
+ case WND4_CONTENTION:
+ case WND4_RESOURCE:
+ xdr_inline_decode(xdr, 4);
+ /* Ignore for now */
+ }
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_delegation(struct xdr_stream *xdr, struct nfs_openres *res)
+{
+ __be32 *p;
+ uint32_t delegation_type;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ delegation_type = be32_to_cpup(p);
+ res->delegation_type = 0;
+ switch (delegation_type) {
+ case NFS4_OPEN_DELEGATE_NONE:
+ return 0;
+ case NFS4_OPEN_DELEGATE_READ:
+ case NFS4_OPEN_DELEGATE_WRITE:
+ return decode_rw_delegation(xdr, delegation_type, res);
+ case NFS4_OPEN_DELEGATE_NONE_EXT:
+ return decode_no_delegation(xdr, res);
+ }
+ return -EIO;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res)
{
__be32 *p;
@@ -6567,6 +6593,7 @@ static int nfs4_xdr_dec_read(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
int status;
status = decode_compound_hdr(xdr, &hdr);
+ res->op_status = hdr.status;
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
@@ -6592,6 +6619,7 @@ static int nfs4_xdr_dec_write(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
int status;
status = decode_compound_hdr(xdr, &hdr);
+ res->op_status = hdr.status;
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
@@ -6621,6 +6649,7 @@ static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
int status;
status = decode_compound_hdr(xdr, &hdr);
+ res->op_status = hdr.status;
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c
index cd3c910..9bc9f04 100644
--- a/fs/nfs/nfsroot.c
+++ b/fs/nfs/nfsroot.c
@@ -261,11 +261,11 @@ static int __init root_nfs_data(char *cmdline)
*/
len = snprintf(nfs_export_path, sizeof(nfs_export_path),
tmp, utsname()->nodename);
- if (len > (int)sizeof(nfs_export_path))
+ if (len >= (int)sizeof(nfs_export_path))
goto out_devnametoolong;
len = snprintf(nfs_root_device, sizeof(nfs_root_device),
"%pI4:%s", &servaddr, nfs_export_path);
- if (len > (int)sizeof(nfs_root_device))
+ if (len >= (int)sizeof(nfs_root_device))
goto out_devnametoolong;
retval = 0;
diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
index 9e5bc42..24e1d74 100644
--- a/fs/nfs/objlayout/objio_osd.c
+++ b/fs/nfs/objlayout/objio_osd.c
@@ -537,11 +537,12 @@ int objio_write_pagelist(struct nfs_pgio_header *hdr, int how)
static size_t objio_pg_test(struct nfs_pageio_descriptor *pgio,
struct nfs_page *prev, struct nfs_page *req)
{
+ struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(pgio);
unsigned int size;
size = pnfs_generic_pg_test(pgio, prev, req);
- if (!size || pgio->pg_count + req->wb_bytes >
+ if (!size || mirror->pg_count + req->wb_bytes >
(unsigned long)pgio->pg_layout_private)
return 0;
@@ -607,12 +608,14 @@ static const struct nfs_pageio_ops objio_pg_read_ops = {
.pg_init = objio_init_read,
.pg_test = objio_pg_test,
.pg_doio = pnfs_generic_pg_readpages,
+ .pg_cleanup = pnfs_generic_pg_cleanup,
};
static const struct nfs_pageio_ops objio_pg_write_ops = {
.pg_init = objio_init_write,
.pg_test = objio_pg_test,
.pg_doio = pnfs_generic_pg_writepages,
+ .pg_cleanup = pnfs_generic_pg_cleanup,
};
static struct pnfs_layoutdriver_type objlayout_type = {
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 2b5e769..d57190a 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -42,21 +42,35 @@ static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount)
return p->pagevec != NULL;
}
+struct nfs_pgio_mirror *
+nfs_pgio_current_mirror(struct nfs_pageio_descriptor *desc)
+{
+ return nfs_pgio_has_mirroring(desc) ?
+ &desc->pg_mirrors[desc->pg_mirror_idx] :
+ &desc->pg_mirrors[0];
+}
+EXPORT_SYMBOL_GPL(nfs_pgio_current_mirror);
+
void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
struct nfs_pgio_header *hdr,
void (*release)(struct nfs_pgio_header *hdr))
{
- hdr->req = nfs_list_entry(desc->pg_list.next);
+ struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
+
+
+ hdr->req = nfs_list_entry(mirror->pg_list.next);
hdr->inode = desc->pg_inode;
hdr->cred = hdr->req->wb_context->cred;
hdr->io_start = req_offset(hdr->req);
- hdr->good_bytes = desc->pg_count;
+ hdr->good_bytes = mirror->pg_count;
hdr->dreq = desc->pg_dreq;
hdr->layout_private = desc->pg_layout_private;
hdr->release = release;
hdr->completion_ops = desc->pg_completion_ops;
if (hdr->completion_ops->init_hdr)
hdr->completion_ops->init_hdr(hdr);
+
+ hdr->pgio_mirror_idx = desc->pg_mirror_idx;
}
EXPORT_SYMBOL_GPL(nfs_pgheader_init);
@@ -480,7 +494,10 @@ nfs_wait_on_request(struct nfs_page *req)
size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
struct nfs_page *prev, struct nfs_page *req)
{
- if (desc->pg_count > desc->pg_bsize) {
+ struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
+
+
+ if (mirror->pg_count > mirror->pg_bsize) {
/* should never happen */
WARN_ON_ONCE(1);
return 0;
@@ -490,11 +507,11 @@ size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
* Limit the request size so that we can still allocate a page array
* for it without upsetting the slab allocator.
*/
- if (((desc->pg_count + req->wb_bytes) >> PAGE_SHIFT) *
+ if (((mirror->pg_count + req->wb_bytes) >> PAGE_SHIFT) *
sizeof(struct page) > PAGE_SIZE)
return 0;
- return min(desc->pg_bsize - desc->pg_count, (size_t)req->wb_bytes);
+ return min(mirror->pg_bsize - mirror->pg_count, (size_t)req->wb_bytes);
}
EXPORT_SYMBOL_GPL(nfs_generic_pg_test);
@@ -597,13 +614,14 @@ static void nfs_pgio_prepare(struct rpc_task *task, void *calldata)
}
int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
+ struct rpc_cred *cred, const struct nfs_rpc_ops *rpc_ops,
const struct rpc_call_ops *call_ops, int how, int flags)
{
struct rpc_task *task;
struct rpc_message msg = {
.rpc_argp = &hdr->args,
.rpc_resp = &hdr->res,
- .rpc_cred = hdr->cred,
+ .rpc_cred = cred,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = clnt,
@@ -616,7 +634,7 @@ int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
};
int ret = 0;
- hdr->rw_ops->rw_initiate(hdr, &msg, &task_setup_data, how);
+ hdr->rw_ops->rw_initiate(hdr, &msg, rpc_ops, &task_setup_data, how);
dprintk("NFS: %5u initiated pgio call "
"(req %s/%llu, %u bytes @ offset %llu)\n",
@@ -650,10 +668,18 @@ EXPORT_SYMBOL_GPL(nfs_initiate_pgio);
static int nfs_pgio_error(struct nfs_pageio_descriptor *desc,
struct nfs_pgio_header *hdr)
{
+ struct nfs_pgio_mirror *mirror;
+ u32 midx;
+
set_bit(NFS_IOHDR_REDO, &hdr->flags);
nfs_pgio_data_destroy(hdr);
hdr->completion_ops->completion(hdr);
- desc->pg_completion_ops->error_cleanup(&desc->pg_list);
+ /* TODO: Make sure it's right to clean up all mirrors here
+ * and not just hdr->pgio_mirror_idx */
+ for (midx = 0; midx < desc->pg_mirror_count; midx++) {
+ mirror = &desc->pg_mirrors[midx];
+ desc->pg_completion_ops->error_cleanup(&mirror->pg_list);
+ }
return -ENOMEM;
}
@@ -670,6 +696,17 @@ static void nfs_pgio_release(void *calldata)
hdr->completion_ops->completion(hdr);
}
+static void nfs_pageio_mirror_init(struct nfs_pgio_mirror *mirror,
+ unsigned int bsize)
+{
+ INIT_LIST_HEAD(&mirror->pg_list);
+ mirror->pg_bytes_written = 0;
+ mirror->pg_count = 0;
+ mirror->pg_bsize = bsize;
+ mirror->pg_base = 0;
+ mirror->pg_recoalesce = 0;
+}
+
/**
* nfs_pageio_init - initialise a page io descriptor
* @desc: pointer to descriptor
@@ -686,13 +723,10 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
size_t bsize,
int io_flags)
{
- INIT_LIST_HEAD(&desc->pg_list);
- desc->pg_bytes_written = 0;
- desc->pg_count = 0;
- desc->pg_bsize = bsize;
- desc->pg_base = 0;
+ struct nfs_pgio_mirror *new;
+ int i;
+
desc->pg_moreio = 0;
- desc->pg_recoalesce = 0;
desc->pg_inode = inode;
desc->pg_ops = pg_ops;
desc->pg_completion_ops = compl_ops;
@@ -702,6 +736,26 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
desc->pg_lseg = NULL;
desc->pg_dreq = NULL;
desc->pg_layout_private = NULL;
+ desc->pg_bsize = bsize;
+
+ desc->pg_mirror_count = 1;
+ desc->pg_mirror_idx = 0;
+
+ if (pg_ops->pg_get_mirror_count) {
+ /* until we have a request, we don't have an lseg and no
+ * idea how many mirrors there will be */
+ new = kcalloc(NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX,
+ sizeof(struct nfs_pgio_mirror), GFP_KERNEL);
+ desc->pg_mirrors_dynamic = new;
+ desc->pg_mirrors = new;
+
+ for (i = 0; i < NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX; i++)
+ nfs_pageio_mirror_init(&desc->pg_mirrors[i], bsize);
+ } else {
+ desc->pg_mirrors_dynamic = NULL;
+ desc->pg_mirrors = desc->pg_mirrors_static;
+ nfs_pageio_mirror_init(&desc->pg_mirrors[0], bsize);
+ }
}
EXPORT_SYMBOL_GPL(nfs_pageio_init);
@@ -737,14 +791,16 @@ static void nfs_pgio_result(struct rpc_task *task, void *calldata)
int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
struct nfs_pgio_header *hdr)
{
+ struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
+
struct nfs_page *req;
struct page **pages,
*last_page;
- struct list_head *head = &desc->pg_list;
+ struct list_head *head = &mirror->pg_list;
struct nfs_commit_info cinfo;
unsigned int pagecount, pageused;
- pagecount = nfs_page_array_len(desc->pg_base, desc->pg_count);
+ pagecount = nfs_page_array_len(mirror->pg_base, mirror->pg_count);
if (!nfs_pgarray_set(&hdr->page_array, pagecount))
return nfs_pgio_error(desc, hdr);
@@ -772,7 +828,7 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
desc->pg_ioflags &= ~FLUSH_COND_STABLE;
/* Set up the argument struct */
- nfs_pgio_rpcsetup(hdr, desc->pg_count, 0, desc->pg_ioflags, &cinfo);
+ nfs_pgio_rpcsetup(hdr, mirror->pg_count, 0, desc->pg_ioflags, &cinfo);
desc->pg_rpc_callops = &nfs_pgio_common_ops;
return 0;
}
@@ -780,23 +836,74 @@ EXPORT_SYMBOL_GPL(nfs_generic_pgio);
static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc)
{
+ struct nfs_pgio_mirror *mirror;
struct nfs_pgio_header *hdr;
int ret;
+ mirror = nfs_pgio_current_mirror(desc);
+
hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
if (!hdr) {
- desc->pg_completion_ops->error_cleanup(&desc->pg_list);
+ /* TODO: make sure this is right with mirroring - or
+ * should it back out all mirrors? */
+ desc->pg_completion_ops->error_cleanup(&mirror->pg_list);
return -ENOMEM;
}
nfs_pgheader_init(desc, hdr, nfs_pgio_header_free);
ret = nfs_generic_pgio(desc, hdr);
if (ret == 0)
ret = nfs_initiate_pgio(NFS_CLIENT(hdr->inode),
- hdr, desc->pg_rpc_callops,
+ hdr,
+ hdr->cred,
+ NFS_PROTO(hdr->inode),
+ desc->pg_rpc_callops,
desc->pg_ioflags, 0);
return ret;
}
+/*
+ * nfs_pageio_setup_mirroring - determine if mirroring is to be used
+ * by calling the pg_get_mirror_count op
+ */
+static int nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor *pgio,
+ struct nfs_page *req)
+{
+ int mirror_count = 1;
+
+ if (!pgio->pg_ops->pg_get_mirror_count)
+ return 0;
+
+ mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
+
+ if (!mirror_count || mirror_count > NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX)
+ return -EINVAL;
+
+ if (WARN_ON_ONCE(!pgio->pg_mirrors_dynamic))
+ return -EINVAL;
+
+ pgio->pg_mirror_count = mirror_count;
+
+ return 0;
+}
+
+/*
+ * nfs_pageio_stop_mirroring - stop using mirroring (set mirror count to 1)
+ */
+void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio)
+{
+ pgio->pg_mirror_count = 1;
+ pgio->pg_mirror_idx = 0;
+}
+
+static void nfs_pageio_cleanup_mirroring(struct nfs_pageio_descriptor *pgio)
+{
+ pgio->pg_mirror_count = 1;
+ pgio->pg_mirror_idx = 0;
+ pgio->pg_mirrors = pgio->pg_mirrors_static;
+ kfree(pgio->pg_mirrors_dynamic);
+ pgio->pg_mirrors_dynamic = NULL;
+}
+
static bool nfs_match_open_context(const struct nfs_open_context *ctx1,
const struct nfs_open_context *ctx2)
{
@@ -826,11 +933,15 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev,
struct nfs_pageio_descriptor *pgio)
{
size_t size;
+ struct file_lock_context *flctx;
if (prev) {
if (!nfs_match_open_context(req->wb_context, prev->wb_context))
return false;
- if (req->wb_context->dentry->d_inode->i_flock != NULL &&
+ flctx = req->wb_context->dentry->d_inode->i_flctx;
+ if (flctx != NULL &&
+ !(list_empty_careful(&flctx->flc_posix) &&
+ list_empty_careful(&flctx->flc_flock)) &&
!nfs_match_lock_context(req->wb_lock_context,
prev->wb_lock_context))
return false;
@@ -863,19 +974,22 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev,
static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
struct nfs_page *req)
{
+ struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
+
struct nfs_page *prev = NULL;
- if (desc->pg_count != 0) {
- prev = nfs_list_entry(desc->pg_list.prev);
+
+ if (mirror->pg_count != 0) {
+ prev = nfs_list_entry(mirror->pg_list.prev);
} else {
if (desc->pg_ops->pg_init)
desc->pg_ops->pg_init(desc, req);
- desc->pg_base = req->wb_pgbase;
+ mirror->pg_base = req->wb_pgbase;
}
if (!nfs_can_coalesce_requests(prev, req, desc))
return 0;
nfs_list_remove_request(req);
- nfs_list_add_request(req, &desc->pg_list);
- desc->pg_count += req->wb_bytes;
+ nfs_list_add_request(req, &mirror->pg_list);
+ mirror->pg_count += req->wb_bytes;
return 1;
}
@@ -884,16 +998,19 @@ static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
*/
static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
{
- if (!list_empty(&desc->pg_list)) {
+ struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
+
+
+ if (!list_empty(&mirror->pg_list)) {
int error = desc->pg_ops->pg_doio(desc);
if (error < 0)
desc->pg_error = error;
else
- desc->pg_bytes_written += desc->pg_count;
+ mirror->pg_bytes_written += mirror->pg_count;
}
- if (list_empty(&desc->pg_list)) {
- desc->pg_count = 0;
- desc->pg_base = 0;
+ if (list_empty(&mirror->pg_list)) {
+ mirror->pg_count = 0;
+ mirror->pg_base = 0;
}
}
@@ -911,6 +1028,8 @@ static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
struct nfs_page *req)
{
+ struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
+
struct nfs_page *subreq;
unsigned int bytes_left = 0;
unsigned int offset, pgbase;
@@ -934,7 +1053,7 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
nfs_pageio_doio(desc);
if (desc->pg_error < 0)
return 0;
- if (desc->pg_recoalesce)
+ if (mirror->pg_recoalesce)
return 0;
/* retry add_request for this subreq */
nfs_page_group_lock(req, false);
@@ -972,14 +1091,16 @@ err_ptr:
static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
{
+ struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
LIST_HEAD(head);
do {
- list_splice_init(&desc->pg_list, &head);
- desc->pg_bytes_written -= desc->pg_count;
- desc->pg_count = 0;
- desc->pg_base = 0;
- desc->pg_recoalesce = 0;
+ list_splice_init(&mirror->pg_list, &head);
+ mirror->pg_bytes_written -= mirror->pg_count;
+ mirror->pg_count = 0;
+ mirror->pg_base = 0;
+ mirror->pg_recoalesce = 0;
+
desc->pg_moreio = 0;
while (!list_empty(&head)) {
@@ -993,11 +1114,11 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
return 0;
break;
}
- } while (desc->pg_recoalesce);
+ } while (mirror->pg_recoalesce);
return 1;
}
-int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
+static int nfs_pageio_add_request_mirror(struct nfs_pageio_descriptor *desc,
struct nfs_page *req)
{
int ret;
@@ -1010,9 +1131,80 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
break;
ret = nfs_do_recoalesce(desc);
} while (ret);
+
return ret;
}
+int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
+ struct nfs_page *req)
+{
+ u32 midx;
+ unsigned int pgbase, offset, bytes;
+ struct nfs_page *dupreq, *lastreq;
+
+ pgbase = req->wb_pgbase;
+ offset = req->wb_offset;
+ bytes = req->wb_bytes;
+
+ nfs_pageio_setup_mirroring(desc, req);
+
+ for (midx = 0; midx < desc->pg_mirror_count; midx++) {
+ if (midx) {
+ nfs_page_group_lock(req, false);
+
+ /* find the last request */
+ for (lastreq = req->wb_head;
+ lastreq->wb_this_page != req->wb_head;
+ lastreq = lastreq->wb_this_page)
+ ;
+
+ dupreq = nfs_create_request(req->wb_context,
+ req->wb_page, lastreq, pgbase, bytes);
+
+ if (IS_ERR(dupreq)) {
+ nfs_page_group_unlock(req);
+ return 0;
+ }
+
+ nfs_lock_request(dupreq);
+ nfs_page_group_unlock(req);
+ dupreq->wb_offset = offset;
+ dupreq->wb_index = req->wb_index;
+ } else
+ dupreq = req;
+
+ if (nfs_pgio_has_mirroring(desc))
+ desc->pg_mirror_idx = midx;
+ if (!nfs_pageio_add_request_mirror(desc, dupreq))
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * nfs_pageio_complete_mirror - Complete I/O on the current mirror of an
+ * nfs_pageio_descriptor
+ * @desc: pointer to io descriptor
+ */
+static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc,
+ u32 mirror_idx)
+{
+ struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[mirror_idx];
+ u32 restore_idx = desc->pg_mirror_idx;
+
+ if (nfs_pgio_has_mirroring(desc))
+ desc->pg_mirror_idx = mirror_idx;
+ for (;;) {
+ nfs_pageio_doio(desc);
+ if (!mirror->pg_recoalesce)
+ break;
+ if (!nfs_do_recoalesce(desc))
+ break;
+ }
+ desc->pg_mirror_idx = restore_idx;
+}
+
/*
* nfs_pageio_resend - Transfer requests to new descriptor and resend
* @hdr - the pgio header to move request from
@@ -1046,18 +1238,19 @@ int nfs_pageio_resend(struct nfs_pageio_descriptor *desc,
EXPORT_SYMBOL_GPL(nfs_pageio_resend);
/**
- * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor
+ * nfs_pageio_complete - Complete I/O then cleanup an nfs_pageio_descriptor
* @desc: pointer to io descriptor
*/
void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
{
- for (;;) {
- nfs_pageio_doio(desc);
- if (!desc->pg_recoalesce)
- break;
- if (!nfs_do_recoalesce(desc))
- break;
- }
+ u32 midx;
+
+ for (midx = 0; midx < desc->pg_mirror_count; midx++)
+ nfs_pageio_complete_mirror(desc, midx);
+
+ if (desc->pg_ops->pg_cleanup)
+ desc->pg_ops->pg_cleanup(desc);
+ nfs_pageio_cleanup_mirroring(desc);
}
/**
@@ -1073,10 +1266,17 @@ void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
*/
void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
{
- if (!list_empty(&desc->pg_list)) {
- struct nfs_page *prev = nfs_list_entry(desc->pg_list.prev);
- if (index != prev->wb_index + 1)
- nfs_pageio_complete(desc);
+ struct nfs_pgio_mirror *mirror;
+ struct nfs_page *prev;
+ u32 midx;
+
+ for (midx = 0; midx < desc->pg_mirror_count; midx++) {
+ mirror = &desc->pg_mirrors[midx];
+ if (!list_empty(&mirror->pg_list)) {
+ prev = nfs_list_entry(mirror->pg_list.prev);
+ if (index != prev->wb_index + 1)
+ nfs_pageio_complete_mirror(desc, midx);
+ }
}
}
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 0a5dda4..4f802b0 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -34,6 +34,7 @@
#include "pnfs.h"
#include "iostat.h"
#include "nfs4trace.h"
+#include "delegation.h"
#define NFSDBG_FACILITY NFSDBG_PNFS
#define PNFS_LAYOUTGET_RETRY_TIMEOUT (120*HZ)
@@ -50,6 +51,10 @@ static DEFINE_SPINLOCK(pnfs_spinlock);
*/
static LIST_HEAD(pnfs_modules_tbl);
+static int
+pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, nfs4_stateid stateid,
+ enum pnfs_iomode iomode, bool sync);
+
/* Return the registered pnfs layout driver module matching given id */
static struct pnfs_layoutdriver_type *
find_pnfs_driver_locked(u32 id)
@@ -238,6 +243,8 @@ pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
struct inode *inode = lo->plh_inode;
if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
+ if (!list_empty(&lo->plh_segs))
+ WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n");
pnfs_detach_layout_hdr(lo);
spin_unlock(&inode->i_lock);
pnfs_free_layout_hdr(lo);
@@ -337,6 +344,48 @@ pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
}
+/* Return true if layoutreturn is needed */
+static bool
+pnfs_layout_need_return(struct pnfs_layout_hdr *lo,
+ struct pnfs_layout_segment *lseg)
+{
+ struct pnfs_layout_segment *s;
+
+ if (!test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
+ return false;
+
+ list_for_each_entry(s, &lo->plh_segs, pls_list)
+ if (s != lseg && test_bit(NFS_LSEG_LAYOUTRETURN, &s->pls_flags))
+ return false;
+
+ return true;
+}
+
+static void pnfs_layoutreturn_before_put_lseg(struct pnfs_layout_segment *lseg,
+ struct pnfs_layout_hdr *lo, struct inode *inode)
+{
+ lo = lseg->pls_layout;
+ inode = lo->plh_inode;
+
+ spin_lock(&inode->i_lock);
+ if (pnfs_layout_need_return(lo, lseg)) {
+ nfs4_stateid stateid;
+ enum pnfs_iomode iomode;
+
+ stateid = lo->plh_stateid;
+ iomode = lo->plh_return_iomode;
+ /* decreased in pnfs_send_layoutreturn() */
+ lo->plh_block_lgets++;
+ lo->plh_return_iomode = 0;
+ spin_unlock(&inode->i_lock);
+ pnfs_get_layout_hdr(lo);
+
+ /* Send an async layoutreturn so we dont deadlock */
+ pnfs_send_layoutreturn(lo, stateid, iomode, false);
+ } else
+ spin_unlock(&inode->i_lock);
+}
+
void
pnfs_put_lseg(struct pnfs_layout_segment *lseg)
{
@@ -349,8 +398,17 @@ pnfs_put_lseg(struct pnfs_layout_segment *lseg)
dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
atomic_read(&lseg->pls_refcount),
test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
+
+ /* Handle the case where refcount != 1 */
+ if (atomic_add_unless(&lseg->pls_refcount, -1, 1))
+ return;
+
lo = lseg->pls_layout;
inode = lo->plh_inode;
+ /* Do we need a layoutreturn? */
+ if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
+ pnfs_layoutreturn_before_put_lseg(lseg, lo, inode);
+
if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
pnfs_get_layout_hdr(lo);
pnfs_layout_remove_lseg(lo, lseg);
@@ -543,6 +601,7 @@ pnfs_destroy_layout(struct nfs_inode *nfsi)
pnfs_get_layout_hdr(lo);
pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RO_FAILED);
pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RW_FAILED);
+ pnfs_clear_retry_layoutget(lo);
spin_unlock(&nfsi->vfs_inode.i_lock);
pnfs_free_lseg_list(&tmp_list);
pnfs_put_layout_hdr(lo);
@@ -740,25 +799,37 @@ pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr *lo,
return !pnfs_seqid_is_newer(seqid, lo->plh_barrier);
}
+static bool
+pnfs_layout_returning(const struct pnfs_layout_hdr *lo,
+ struct pnfs_layout_range *range)
+{
+ return test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags) &&
+ (lo->plh_return_iomode == IOMODE_ANY ||
+ lo->plh_return_iomode == range->iomode);
+}
+
/* lget is set to 1 if called from inside send_layoutget call chain */
static bool
-pnfs_layoutgets_blocked(const struct pnfs_layout_hdr *lo, int lget)
+pnfs_layoutgets_blocked(const struct pnfs_layout_hdr *lo,
+ struct pnfs_layout_range *range, int lget)
{
return lo->plh_block_lgets ||
test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
(list_empty(&lo->plh_segs) &&
- (atomic_read(&lo->plh_outstanding) > lget));
+ (atomic_read(&lo->plh_outstanding) > lget)) ||
+ pnfs_layout_returning(lo, range);
}
int
pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
+ struct pnfs_layout_range *range,
struct nfs4_state *open_state)
{
int status = 0;
dprintk("--> %s\n", __func__);
spin_lock(&lo->plh_inode->i_lock);
- if (pnfs_layoutgets_blocked(lo, 1)) {
+ if (pnfs_layoutgets_blocked(lo, range, 1)) {
status = -EAGAIN;
} else if (!nfs4_valid_open_stateid(open_state)) {
status = -EBADF;
@@ -825,7 +896,9 @@ send_layoutget(struct pnfs_layout_hdr *lo,
pnfs_layout_io_set_failed(lo, range->iomode);
}
return NULL;
- }
+ } else
+ pnfs_layout_clear_fail_bit(lo,
+ pnfs_iomode_to_fail_bit(range->iomode));
return lseg;
}
@@ -845,6 +918,49 @@ static void pnfs_clear_layoutcommit(struct inode *inode,
}
}
+void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo)
+{
+ clear_bit_unlock(NFS_LAYOUT_RETURN, &lo->plh_flags);
+ smp_mb__after_atomic();
+ wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN);
+}
+
+static int
+pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, nfs4_stateid stateid,
+ enum pnfs_iomode iomode, bool sync)
+{
+ struct inode *ino = lo->plh_inode;
+ struct nfs4_layoutreturn *lrp;
+ int status = 0;
+
+ lrp = kzalloc(sizeof(*lrp), GFP_NOFS);
+ if (unlikely(lrp == NULL)) {
+ status = -ENOMEM;
+ spin_lock(&ino->i_lock);
+ lo->plh_block_lgets--;
+ pnfs_clear_layoutreturn_waitbit(lo);
+ rpc_wake_up(&NFS_SERVER(ino)->roc_rpcwaitq);
+ spin_unlock(&ino->i_lock);
+ pnfs_put_layout_hdr(lo);
+ goto out;
+ }
+
+ lrp->args.stateid = stateid;
+ lrp->args.layout_type = NFS_SERVER(ino)->pnfs_curr_ld->id;
+ lrp->args.inode = ino;
+ lrp->args.range.iomode = iomode;
+ lrp->args.range.offset = 0;
+ lrp->args.range.length = NFS4_MAX_UINT64;
+ lrp->args.layout = lo;
+ lrp->clp = NFS_SERVER(ino)->nfs_client;
+ lrp->cred = lo->plh_lc_cred;
+
+ status = nfs4_proc_layoutreturn(lrp, sync);
+out:
+ dprintk("<-- %s status: %d\n", __func__, status);
+ return status;
+}
+
/*
* Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
* when the layout segment list is empty.
@@ -859,7 +975,6 @@ _pnfs_return_layout(struct inode *ino)
struct pnfs_layout_hdr *lo = NULL;
struct nfs_inode *nfsi = NFS_I(ino);
LIST_HEAD(tmp_list);
- struct nfs4_layoutreturn *lrp;
nfs4_stateid stateid;
int status = 0, empty;
@@ -901,24 +1016,7 @@ _pnfs_return_layout(struct inode *ino)
spin_unlock(&ino->i_lock);
pnfs_free_lseg_list(&tmp_list);
- lrp = kzalloc(sizeof(*lrp), GFP_KERNEL);
- if (unlikely(lrp == NULL)) {
- status = -ENOMEM;
- spin_lock(&ino->i_lock);
- lo->plh_block_lgets--;
- spin_unlock(&ino->i_lock);
- pnfs_put_layout_hdr(lo);
- goto out;
- }
-
- lrp->args.stateid = stateid;
- lrp->args.layout_type = NFS_SERVER(ino)->pnfs_curr_ld->id;
- lrp->args.inode = ino;
- lrp->args.layout = lo;
- lrp->clp = NFS_SERVER(ino)->nfs_client;
- lrp->cred = lo->plh_lc_cred;
-
- status = nfs4_proc_layoutreturn(lrp);
+ status = pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, true);
out:
dprintk("<-- %s status: %d\n", __func__, status);
return status;
@@ -954,31 +1052,60 @@ pnfs_commit_and_return_layout(struct inode *inode)
bool pnfs_roc(struct inode *ino)
{
+ struct nfs_inode *nfsi = NFS_I(ino);
+ struct nfs_open_context *ctx;
+ struct nfs4_state *state;
struct pnfs_layout_hdr *lo;
struct pnfs_layout_segment *lseg, *tmp;
+ nfs4_stateid stateid;
LIST_HEAD(tmp_list);
- bool found = false;
+ bool found = false, layoutreturn = false;
spin_lock(&ino->i_lock);
- lo = NFS_I(ino)->layout;
+ lo = nfsi->layout;
if (!lo || !test_and_clear_bit(NFS_LAYOUT_ROC, &lo->plh_flags) ||
test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
- goto out_nolayout;
+ goto out_noroc;
+
+ /* Don't return layout if we hold a delegation */
+ if (nfs4_check_delegation(ino, FMODE_READ))
+ goto out_noroc;
+
+ list_for_each_entry(ctx, &nfsi->open_files, list) {
+ state = ctx->state;
+ /* Don't return layout if there is open file state */
+ if (state != NULL && state->state != 0)
+ goto out_noroc;
+ }
+
+ pnfs_clear_retry_layoutget(lo);
list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list)
if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
mark_lseg_invalid(lseg, &tmp_list);
found = true;
}
if (!found)
- goto out_nolayout;
+ goto out_noroc;
lo->plh_block_lgets++;
pnfs_get_layout_hdr(lo); /* matched in pnfs_roc_release */
spin_unlock(&ino->i_lock);
pnfs_free_lseg_list(&tmp_list);
return true;
-out_nolayout:
+out_noroc:
+ if (lo) {
+ stateid = lo->plh_stateid;
+ layoutreturn =
+ test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
+ &lo->plh_flags);
+ if (layoutreturn) {
+ lo->plh_block_lgets++;
+ pnfs_get_layout_hdr(lo);
+ }
+ }
spin_unlock(&ino->i_lock);
+ if (layoutreturn)
+ pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, true);
return false;
}
@@ -1013,8 +1140,9 @@ bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task)
struct nfs_inode *nfsi = NFS_I(ino);
struct pnfs_layout_hdr *lo;
struct pnfs_layout_segment *lseg;
+ nfs4_stateid stateid;
u32 current_seqid;
- bool found = false;
+ bool found = false, layoutreturn = false;
spin_lock(&ino->i_lock);
list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list)
@@ -1031,7 +1159,21 @@ bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task)
*/
*barrier = current_seqid + atomic_read(&lo->plh_outstanding);
out:
+ if (!found) {
+ stateid = lo->plh_stateid;
+ layoutreturn =
+ test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
+ &lo->plh_flags);
+ if (layoutreturn) {
+ lo->plh_block_lgets++;
+ pnfs_get_layout_hdr(lo);
+ }
+ }
spin_unlock(&ino->i_lock);
+ if (layoutreturn) {
+ rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
+ pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, false);
+ }
return found;
}
@@ -1178,6 +1320,7 @@ pnfs_find_lseg(struct pnfs_layout_hdr *lo,
list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
+ !test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags) &&
pnfs_lseg_range_match(&lseg->pls_range, range)) {
ret = pnfs_get_lseg(lseg);
break;
@@ -1266,6 +1409,35 @@ static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
return ret;
}
+/* stop waiting if someone clears NFS_LAYOUT_RETRY_LAYOUTGET bit. */
+static int pnfs_layoutget_retry_bit_wait(struct wait_bit_key *key)
+{
+ if (!test_bit(NFS_LAYOUT_RETRY_LAYOUTGET, key->flags))
+ return 1;
+ return nfs_wait_bit_killable(key);
+}
+
+static bool pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo)
+{
+ /*
+ * send layoutcommit as it can hold up layoutreturn due to lseg
+ * reference
+ */
+ pnfs_layoutcommit_inode(lo->plh_inode, false);
+ return !wait_on_bit_action(&lo->plh_flags, NFS_LAYOUT_RETURN,
+ pnfs_layoutget_retry_bit_wait,
+ TASK_UNINTERRUPTIBLE);
+}
+
+static void pnfs_clear_first_layoutget(struct pnfs_layout_hdr *lo)
+{
+ unsigned long *bitlock = &lo->plh_flags;
+
+ clear_bit_unlock(NFS_LAYOUT_FIRST_LAYOUTGET, bitlock);
+ smp_mb__after_atomic();
+ wake_up_bit(bitlock, NFS_LAYOUT_FIRST_LAYOUTGET);
+}
+
/*
* Layout segment is retreived from the server if not cached.
* The appropriate layout segment is referenced and returned to the caller.
@@ -1296,6 +1468,8 @@ pnfs_update_layout(struct inode *ino,
if (pnfs_within_mdsthreshold(ctx, ino, iomode))
goto out;
+lookup_again:
+ first = false;
spin_lock(&ino->i_lock);
lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
if (lo == NULL) {
@@ -1310,27 +1484,62 @@ pnfs_update_layout(struct inode *ino,
}
/* if LAYOUTGET already failed once we don't try again */
- if (pnfs_layout_io_test_failed(lo, iomode))
+ if (pnfs_layout_io_test_failed(lo, iomode) &&
+ !pnfs_should_retry_layoutget(lo))
goto out_unlock;
- /* Check to see if the layout for the given range already exists */
- lseg = pnfs_find_lseg(lo, &arg);
- if (lseg)
- goto out_unlock;
+ first = list_empty(&lo->plh_segs);
+ if (first) {
+ /* The first layoutget for the file. Need to serialize per
+ * RFC 5661 Errata 3208.
+ */
+ if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET,
+ &lo->plh_flags)) {
+ spin_unlock(&ino->i_lock);
+ wait_on_bit(&lo->plh_flags, NFS_LAYOUT_FIRST_LAYOUTGET,
+ TASK_UNINTERRUPTIBLE);
+ pnfs_put_layout_hdr(lo);
+ goto lookup_again;
+ }
+ } else {
+ /* Check to see if the layout for the given range
+ * already exists
+ */
+ lseg = pnfs_find_lseg(lo, &arg);
+ if (lseg)
+ goto out_unlock;
+ }
+
+ /*
+ * Because we free lsegs before sending LAYOUTRETURN, we need to wait
+ * for LAYOUTRETURN even if first is true.
+ */
+ if (!lseg && pnfs_should_retry_layoutget(lo) &&
+ test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
+ spin_unlock(&ino->i_lock);
+ dprintk("%s wait for layoutreturn\n", __func__);
+ if (pnfs_prepare_to_retry_layoutget(lo)) {
+ if (first)
+ pnfs_clear_first_layoutget(lo);
+ pnfs_put_layout_hdr(lo);
+ dprintk("%s retrying\n", __func__);
+ goto lookup_again;
+ }
+ goto out_put_layout_hdr;
+ }
- if (pnfs_layoutgets_blocked(lo, 0))
+ if (pnfs_layoutgets_blocked(lo, &arg, 0))
goto out_unlock;
atomic_inc(&lo->plh_outstanding);
-
- first = list_empty(&lo->plh_layouts) ? true : false;
spin_unlock(&ino->i_lock);
- if (first) {
+ if (list_empty(&lo->plh_layouts)) {
/* The lo must be on the clp list if there is any
* chance of a CB_LAYOUTRECALL(FILE) coming in.
*/
spin_lock(&clp->cl_lock);
- list_add_tail(&lo->plh_layouts, &server->layouts);
+ if (list_empty(&lo->plh_layouts))
+ list_add_tail(&lo->plh_layouts, &server->layouts);
spin_unlock(&clp->cl_lock);
}
@@ -1343,8 +1552,11 @@ pnfs_update_layout(struct inode *ino,
arg.length = PAGE_CACHE_ALIGN(arg.length);
lseg = send_layoutget(lo, ctx, &arg, gfp_flags);
+ pnfs_clear_retry_layoutget(lo);
atomic_dec(&lo->plh_outstanding);
out_put_layout_hdr:
+ if (first)
+ pnfs_clear_first_layoutget(lo);
pnfs_put_layout_hdr(lo);
out:
dprintk("%s: inode %s/%llu pNFS layout segment %s for "
@@ -1393,7 +1605,7 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
goto out_forget_reply;
}
- if (pnfs_layoutgets_blocked(lo, 1)) {
+ if (pnfs_layoutgets_blocked(lo, &lgp->args.range, 1)) {
dprintk("%s forget reply due to state\n", __func__);
goto out_forget_reply;
}
@@ -1440,24 +1652,79 @@ out_forget_reply:
goto out;
}
+static void
+pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
+ struct list_head *tmp_list,
+ struct pnfs_layout_range *return_range)
+{
+ struct pnfs_layout_segment *lseg, *next;
+
+ dprintk("%s:Begin lo %p\n", __func__, lo);
+
+ if (list_empty(&lo->plh_segs))
+ return;
+
+ list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
+ if (should_free_lseg(&lseg->pls_range, return_range)) {
+ dprintk("%s: marking lseg %p iomode %d "
+ "offset %llu length %llu\n", __func__,
+ lseg, lseg->pls_range.iomode,
+ lseg->pls_range.offset,
+ lseg->pls_range.length);
+ set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
+ mark_lseg_invalid(lseg, tmp_list);
+ }
+}
+
+void pnfs_error_mark_layout_for_return(struct inode *inode,
+ struct pnfs_layout_segment *lseg)
+{
+ struct pnfs_layout_hdr *lo = NFS_I(inode)->layout;
+ int iomode = pnfs_iomode_to_fail_bit(lseg->pls_range.iomode);
+ struct pnfs_layout_range range = {
+ .iomode = lseg->pls_range.iomode,
+ .offset = 0,
+ .length = NFS4_MAX_UINT64,
+ };
+ LIST_HEAD(free_me);
+
+ spin_lock(&inode->i_lock);
+ /* set failure bit so that pnfs path will be retried later */
+ pnfs_layout_set_fail_bit(lo, iomode);
+ set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
+ if (lo->plh_return_iomode == 0)
+ lo->plh_return_iomode = range.iomode;
+ else if (lo->plh_return_iomode != range.iomode)
+ lo->plh_return_iomode = IOMODE_ANY;
+ /*
+ * mark all matching lsegs so that we are sure to have no live
+ * segments at hand when sending layoutreturn. See pnfs_put_lseg()
+ * for how it works.
+ */
+ pnfs_mark_matching_lsegs_return(lo, &free_me, &range);
+ spin_unlock(&inode->i_lock);
+ pnfs_free_lseg_list(&free_me);
+}
+EXPORT_SYMBOL_GPL(pnfs_error_mark_layout_for_return);
+
void
pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
{
u64 rd_size = req->wb_bytes;
- WARN_ON_ONCE(pgio->pg_lseg != NULL);
-
- if (pgio->pg_dreq == NULL)
- rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
- else
- rd_size = nfs_dreq_bytes_left(pgio->pg_dreq);
-
- pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
- req->wb_context,
- req_offset(req),
- rd_size,
- IOMODE_READ,
- GFP_KERNEL);
+ if (pgio->pg_lseg == NULL) {
+ if (pgio->pg_dreq == NULL)
+ rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
+ else
+ rd_size = nfs_dreq_bytes_left(pgio->pg_dreq);
+
+ pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
+ req->wb_context,
+ req_offset(req),
+ rd_size,
+ IOMODE_READ,
+ GFP_KERNEL);
+ }
/* If no lseg, fall back to read through mds */
if (pgio->pg_lseg == NULL)
nfs_pageio_reset_read_mds(pgio);
@@ -1469,27 +1736,36 @@ void
pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
struct nfs_page *req, u64 wb_size)
{
- WARN_ON_ONCE(pgio->pg_lseg != NULL);
-
- pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
- req->wb_context,
- req_offset(req),
- wb_size,
- IOMODE_RW,
- GFP_NOFS);
+ if (pgio->pg_lseg == NULL)
+ pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
+ req->wb_context,
+ req_offset(req),
+ wb_size,
+ IOMODE_RW,
+ GFP_NOFS);
/* If no lseg, fall back to write through mds */
if (pgio->pg_lseg == NULL)
nfs_pageio_reset_write_mds(pgio);
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write);
+void
+pnfs_generic_pg_cleanup(struct nfs_pageio_descriptor *desc)
+{
+ if (desc->pg_lseg) {
+ pnfs_put_lseg(desc->pg_lseg);
+ desc->pg_lseg = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(pnfs_generic_pg_cleanup);
+
/*
* Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
* of bytes (maximum @req->wb_bytes) that can be coalesced.
*/
size_t
-pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
- struct nfs_page *req)
+pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio,
+ struct nfs_page *prev, struct nfs_page *req)
{
unsigned int size;
u64 seg_end, req_start, seg_left;
@@ -1513,10 +1789,16 @@ pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
seg_end = end_offset(pgio->pg_lseg->pls_range.offset,
pgio->pg_lseg->pls_range.length);
req_start = req_offset(req);
- WARN_ON_ONCE(req_start > seg_end);
+ WARN_ON_ONCE(req_start >= seg_end);
/* start of request is past the last byte of this segment */
- if (req_start >= seg_end)
+ if (req_start >= seg_end) {
+ /* reference the new lseg */
+ if (pgio->pg_ops->pg_cleanup)
+ pgio->pg_ops->pg_cleanup(pgio);
+ if (pgio->pg_ops->pg_init)
+ pgio->pg_ops->pg_init(pgio, req);
return 0;
+ }
/* adjust 'size' iff there are fewer bytes left in the
* segment than what nfs_generic_pg_test returned */
@@ -1571,10 +1853,12 @@ static void
pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
struct nfs_pgio_header *hdr)
{
+ struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
+
if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
- list_splice_tail_init(&hdr->pages, &desc->pg_list);
+ list_splice_tail_init(&hdr->pages, &mirror->pg_list);
nfs_pageio_reset_write_mds(desc);
- desc->pg_recoalesce = 1;
+ mirror->pg_recoalesce = 1;
}
nfs_pgio_data_destroy(hdr);
}
@@ -1608,11 +1892,9 @@ pnfs_do_write(struct nfs_pageio_descriptor *desc,
struct pnfs_layout_segment *lseg = desc->pg_lseg;
enum pnfs_try_status trypnfs;
- desc->pg_lseg = NULL;
trypnfs = pnfs_try_to_write_data(hdr, call_ops, lseg, how);
if (trypnfs == PNFS_NOT_ATTEMPTED)
pnfs_write_through_mds(desc, hdr);
- pnfs_put_lseg(lseg);
}
static void pnfs_writehdr_free(struct nfs_pgio_header *hdr)
@@ -1625,24 +1907,23 @@ EXPORT_SYMBOL_GPL(pnfs_writehdr_free);
int
pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
{
+ struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
+
struct nfs_pgio_header *hdr;
int ret;
hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
if (!hdr) {
- desc->pg_completion_ops->error_cleanup(&desc->pg_list);
- pnfs_put_lseg(desc->pg_lseg);
- desc->pg_lseg = NULL;
+ desc->pg_completion_ops->error_cleanup(&mirror->pg_list);
return -ENOMEM;
}
nfs_pgheader_init(desc, hdr, pnfs_writehdr_free);
+
hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
ret = nfs_generic_pgio(desc, hdr);
- if (ret != 0) {
- pnfs_put_lseg(desc->pg_lseg);
- desc->pg_lseg = NULL;
- } else
+ if (!ret)
pnfs_do_write(desc, hdr, desc->pg_ioflags);
+
return ret;
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
@@ -1687,10 +1968,12 @@ static void
pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
struct nfs_pgio_header *hdr)
{
+ struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
+
if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
- list_splice_tail_init(&hdr->pages, &desc->pg_list);
+ list_splice_tail_init(&hdr->pages, &mirror->pg_list);
nfs_pageio_reset_read_mds(desc);
- desc->pg_recoalesce = 1;
+ mirror->pg_recoalesce = 1;
}
nfs_pgio_data_destroy(hdr);
}
@@ -1719,18 +2002,29 @@ pnfs_try_to_read_data(struct nfs_pgio_header *hdr,
return trypnfs;
}
+/* Resend all requests through pnfs. */
+int pnfs_read_resend_pnfs(struct nfs_pgio_header *hdr)
+{
+ struct nfs_pageio_descriptor pgio;
+
+ nfs_pageio_init_read(&pgio, hdr->inode, false, hdr->completion_ops);
+ return nfs_pageio_resend(&pgio, hdr);
+}
+EXPORT_SYMBOL_GPL(pnfs_read_resend_pnfs);
+
static void
pnfs_do_read(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr)
{
const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
struct pnfs_layout_segment *lseg = desc->pg_lseg;
enum pnfs_try_status trypnfs;
+ int err = 0;
- desc->pg_lseg = NULL;
trypnfs = pnfs_try_to_read_data(hdr, call_ops, lseg);
- if (trypnfs == PNFS_NOT_ATTEMPTED)
+ if (trypnfs == PNFS_TRY_AGAIN)
+ err = pnfs_read_resend_pnfs(hdr);
+ if (trypnfs == PNFS_NOT_ATTEMPTED || err)
pnfs_read_through_mds(desc, hdr);
- pnfs_put_lseg(lseg);
}
static void pnfs_readhdr_free(struct nfs_pgio_header *hdr)
@@ -1743,24 +2037,20 @@ EXPORT_SYMBOL_GPL(pnfs_readhdr_free);
int
pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
{
+ struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
+
struct nfs_pgio_header *hdr;
int ret;
hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
if (!hdr) {
- desc->pg_completion_ops->error_cleanup(&desc->pg_list);
- ret = -ENOMEM;
- pnfs_put_lseg(desc->pg_lseg);
- desc->pg_lseg = NULL;
- return ret;
+ desc->pg_completion_ops->error_cleanup(&mirror->pg_list);
+ return -ENOMEM;
}
nfs_pgheader_init(desc, hdr, pnfs_readhdr_free);
hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
ret = nfs_generic_pgio(desc, hdr);
- if (ret != 0) {
- pnfs_put_lseg(desc->pg_lseg);
- desc->pg_lseg = NULL;
- } else
+ if (!ret)
pnfs_do_read(desc, hdr);
return ret;
}
@@ -1966,6 +2256,7 @@ clear_layoutcommitting:
pnfs_clear_layoutcommitting(inode);
goto out;
}
+EXPORT_SYMBOL_GPL(pnfs_layoutcommit_inode);
struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
{
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 9ae5b76..797cd62 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -38,6 +38,25 @@ enum {
NFS_LSEG_VALID = 0, /* cleared when lseg is recalled/returned */
NFS_LSEG_ROC, /* roc bit received from server */
NFS_LSEG_LAYOUTCOMMIT, /* layoutcommit bit set for layoutcommit */
+ NFS_LSEG_LAYOUTRETURN, /* layoutreturn bit set for layoutreturn */
+};
+
+/* Individual ip address */
+struct nfs4_pnfs_ds_addr {
+ struct sockaddr_storage da_addr;
+ size_t da_addrlen;
+ struct list_head da_node; /* nfs4_pnfs_dev_hlist dev_dslist */
+ char *da_remotestr; /* human readable addr+port */
+};
+
+struct nfs4_pnfs_ds {
+ struct list_head ds_node; /* nfs4_pnfs_dev_hlist dev_dslist */
+ char *ds_remotestr; /* comma sep list of addrs */
+ struct list_head ds_addrs;
+ struct nfs_client *ds_clp;
+ atomic_t ds_count;
+ unsigned long ds_state;
+#define NFS4DS_CONNECTING 0 /* ds is establishing connection */
};
struct pnfs_layout_segment {
@@ -53,19 +72,34 @@ struct pnfs_layout_segment {
enum pnfs_try_status {
PNFS_ATTEMPTED = 0,
PNFS_NOT_ATTEMPTED = 1,
+ PNFS_TRY_AGAIN = 2,
};
#ifdef CONFIG_NFS_V4_1
#define LAYOUT_NFSV4_1_MODULE_PREFIX "nfs-layouttype4"
+/*
+ * Default data server connection timeout and retrans vaules.
+ * Set by module parameters dataserver_timeo and dataserver_retrans.
+ */
+#define NFS4_DEF_DS_TIMEO 600 /* in tenths of a second */
+#define NFS4_DEF_DS_RETRANS 5
+
+/* error codes for internal use */
+#define NFS4ERR_RESET_TO_MDS 12001
+#define NFS4ERR_RESET_TO_PNFS 12002
+
enum {
NFS_LAYOUT_RO_FAILED = 0, /* get ro layout failed stop trying */
NFS_LAYOUT_RW_FAILED, /* get rw layout failed stop trying */
NFS_LAYOUT_BULK_RECALL, /* bulk recall affecting layout */
NFS_LAYOUT_ROC, /* some lseg had roc bit set */
NFS_LAYOUT_RETURN, /* Return this layout ASAP */
+ NFS_LAYOUT_RETURN_BEFORE_CLOSE, /* Return this layout before close */
NFS_LAYOUT_INVALID_STID, /* layout stateid id is invalid */
+ NFS_LAYOUT_FIRST_LAYOUTGET, /* Serialize first layoutget */
+ NFS_LAYOUT_RETRY_LAYOUTGET, /* Retry layoutget */
};
enum layoutdriver_policy_flags {
@@ -106,7 +140,8 @@ struct pnfs_layoutdriver_type {
struct pnfs_ds_commit_info *(*get_ds_info) (struct inode *inode);
void (*mark_request_commit) (struct nfs_page *req,
struct pnfs_layout_segment *lseg,
- struct nfs_commit_info *cinfo);
+ struct nfs_commit_info *cinfo,
+ u32 ds_commit_idx);
void (*clear_request_commit) (struct nfs_page *req,
struct nfs_commit_info *cinfo);
int (*scan_commit_lists) (struct nfs_commit_info *cinfo,
@@ -154,6 +189,7 @@ struct pnfs_layout_hdr {
u32 plh_barrier; /* ignore lower seqids */
unsigned long plh_retry_timestamp;
unsigned long plh_flags;
+ enum pnfs_iomode plh_return_iomode;
loff_t plh_lwb; /* last write byte for layoutcommit */
struct rpc_cred *plh_lc_cred; /* layoutcommit cred */
struct inode *plh_inode;
@@ -185,7 +221,7 @@ extern int nfs4_proc_getdeviceinfo(struct nfs_server *server,
struct pnfs_device *dev,
struct rpc_cred *cred);
extern struct pnfs_layout_segment* nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags);
-extern int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp);
+extern int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync);
/* pnfs.c */
void pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo);
@@ -198,6 +234,7 @@ void pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *, struct nfs_page *
int pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc);
void pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
struct nfs_page *req, u64 wb_size);
+void pnfs_generic_pg_cleanup(struct nfs_pageio_descriptor *);
int pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc);
size_t pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio,
struct nfs_page *prev, struct nfs_page *req);
@@ -217,6 +254,7 @@ void pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo,
bool update_barrier);
int pnfs_choose_layoutget_stateid(nfs4_stateid *dst,
struct pnfs_layout_hdr *lo,
+ struct pnfs_layout_range *range,
struct nfs4_state *open_state);
int pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
struct list_head *tmp_list,
@@ -233,17 +271,21 @@ int _pnfs_return_layout(struct inode *);
int pnfs_commit_and_return_layout(struct inode *);
void pnfs_ld_write_done(struct nfs_pgio_header *);
void pnfs_ld_read_done(struct nfs_pgio_header *);
+int pnfs_read_resend_pnfs(struct nfs_pgio_header *);
struct pnfs_layout_segment *pnfs_update_layout(struct inode *ino,
struct nfs_open_context *ctx,
loff_t pos,
u64 count,
enum pnfs_iomode iomode,
gfp_t gfp_flags);
+void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo);
void nfs4_deviceid_mark_client_invalid(struct nfs_client *clp);
int pnfs_read_done_resend_to_mds(struct nfs_pgio_header *);
int pnfs_write_done_resend_to_mds(struct nfs_pgio_header *);
struct nfs4_threshold *pnfs_mdsthreshold_alloc(void);
+void pnfs_error_mark_layout_for_return(struct inode *inode,
+ struct pnfs_layout_segment *lseg);
/* nfs4_deviceid_flags */
enum {
@@ -275,6 +317,39 @@ void nfs4_mark_deviceid_unavailable(struct nfs4_deviceid_node *node);
bool nfs4_test_deviceid_unavailable(struct nfs4_deviceid_node *node);
void nfs4_deviceid_purge_client(const struct nfs_client *);
+/* pnfs_nfs.c */
+void pnfs_generic_clear_request_commit(struct nfs_page *req,
+ struct nfs_commit_info *cinfo);
+void pnfs_generic_commit_release(void *calldata);
+void pnfs_generic_prepare_to_resend_writes(struct nfs_commit_data *data);
+void pnfs_generic_rw_release(void *data);
+void pnfs_generic_recover_commit_reqs(struct list_head *dst,
+ struct nfs_commit_info *cinfo);
+int pnfs_generic_commit_pagelist(struct inode *inode,
+ struct list_head *mds_pages,
+ int how,
+ struct nfs_commit_info *cinfo,
+ int (*initiate_commit)(struct nfs_commit_data *data,
+ int how));
+int pnfs_generic_scan_commit_lists(struct nfs_commit_info *cinfo, int max);
+void pnfs_generic_write_commit_done(struct rpc_task *task, void *data);
+void nfs4_pnfs_ds_put(struct nfs4_pnfs_ds *ds);
+struct nfs4_pnfs_ds *nfs4_pnfs_ds_add(struct list_head *dsaddrs,
+ gfp_t gfp_flags);
+void nfs4_pnfs_v3_ds_connect_unload(void);
+void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
+ struct nfs4_deviceid_node *devid, unsigned int timeo,
+ unsigned int retrans, u32 version, u32 minor_version,
+ rpc_authflavor_t au_flavor);
+struct nfs4_pnfs_ds_addr *nfs4_decode_mp_ds_addr(struct net *net,
+ struct xdr_stream *xdr,
+ gfp_t gfp_flags);
+
+static inline bool nfs_have_layout(struct inode *inode)
+{
+ return NFS_I(inode)->layout != NULL;
+}
+
static inline struct nfs4_deviceid_node *
nfs4_get_deviceid(struct nfs4_deviceid_node *d)
{
@@ -282,6 +357,26 @@ nfs4_get_deviceid(struct nfs4_deviceid_node *d)
return d;
}
+static inline void pnfs_set_retry_layoutget(struct pnfs_layout_hdr *lo)
+{
+ if (!test_and_set_bit(NFS_LAYOUT_RETRY_LAYOUTGET, &lo->plh_flags))
+ atomic_inc(&lo->plh_refcount);
+}
+
+static inline void pnfs_clear_retry_layoutget(struct pnfs_layout_hdr *lo)
+{
+ if (test_and_clear_bit(NFS_LAYOUT_RETRY_LAYOUTGET, &lo->plh_flags)) {
+ atomic_dec(&lo->plh_refcount);
+ /* wake up waiters for LAYOUTRETURN as that is not needed */
+ wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN);
+ }
+}
+
+static inline bool pnfs_should_retry_layoutget(struct pnfs_layout_hdr *lo)
+{
+ return test_bit(NFS_LAYOUT_RETRY_LAYOUTGET, &lo->plh_flags);
+}
+
static inline struct pnfs_layout_segment *
pnfs_get_lseg(struct pnfs_layout_segment *lseg)
{
@@ -317,16 +412,22 @@ pnfs_get_ds_info(struct inode *inode)
return ld->get_ds_info(inode);
}
+static inline void
+pnfs_generic_mark_devid_invalid(struct nfs4_deviceid_node *node)
+{
+ set_bit(NFS_DEVICEID_INVALID, &node->flags);
+}
+
static inline bool
pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
- struct nfs_commit_info *cinfo)
+ struct nfs_commit_info *cinfo, u32 ds_commit_idx)
{
struct inode *inode = req->wb_context->dentry->d_inode;
struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
if (lseg == NULL || ld->mark_request_commit == NULL)
return false;
- ld->mark_request_commit(req, lseg, cinfo);
+ ld->mark_request_commit(req, lseg, cinfo, ds_commit_idx);
return true;
}
@@ -352,15 +453,6 @@ pnfs_scan_commit_lists(struct inode *inode, struct nfs_commit_info *cinfo,
return NFS_SERVER(inode)->pnfs_curr_ld->scan_commit_lists(cinfo, max);
}
-static inline void
-pnfs_recover_commit_reqs(struct inode *inode, struct list_head *list,
- struct nfs_commit_info *cinfo)
-{
- if (cinfo->ds == NULL || cinfo->ds->nwritten == 0)
- return;
- NFS_SERVER(inode)->pnfs_curr_ld->recover_commit_reqs(list, cinfo);
-}
-
static inline struct nfs_page *
pnfs_search_commit_reqs(struct inode *inode, struct nfs_commit_info *cinfo,
struct page *page)
@@ -427,6 +519,11 @@ static inline void nfs4_print_deviceid(const struct nfs4_deviceid *dev_id)
#endif /* NFS_DEBUG */
#else /* CONFIG_NFS_V4_1 */
+static inline bool nfs_have_layout(struct inode *inode)
+{
+ return false;
+}
+
static inline void pnfs_destroy_all_layouts(struct nfs_client *clp)
{
}
@@ -513,7 +610,7 @@ pnfs_get_ds_info(struct inode *inode)
static inline bool
pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
- struct nfs_commit_info *cinfo)
+ struct nfs_commit_info *cinfo, u32 ds_commit_idx)
{
return false;
}
@@ -531,12 +628,6 @@ pnfs_scan_commit_lists(struct inode *inode, struct nfs_commit_info *cinfo,
return 0;
}
-static inline void
-pnfs_recover_commit_reqs(struct inode *inode, struct list_head *list,
- struct nfs_commit_info *cinfo)
-{
-}
-
static inline struct nfs_page *
pnfs_search_commit_reqs(struct inode *inode, struct nfs_commit_info *cinfo,
struct page *page)
@@ -568,6 +659,10 @@ static inline struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
return NULL;
}
+static inline void nfs4_pnfs_v3_ds_connect_unload(void)
+{
+}
+
#endif /* CONFIG_NFS_V4_1 */
#endif /* FS_NFS_PNFS_H */
diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
new file mode 100644
index 0000000..fdc4f65
--- /dev/null
+++ b/fs/nfs/pnfs_nfs.c
@@ -0,0 +1,840 @@
+/*
+ * Common NFS I/O operations for the pnfs file based
+ * layout drivers.
+ *
+ * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
+ *
+ * Tom Haynes <loghyr@primarydata.com>
+ */
+
+#include <linux/nfs_fs.h>
+#include <linux/nfs_page.h>
+#include <linux/sunrpc/addr.h>
+#include <linux/module.h>
+
+#include "nfs4session.h"
+#include "internal.h"
+#include "pnfs.h"
+
+#define NFSDBG_FACILITY NFSDBG_PNFS
+
+void pnfs_generic_rw_release(void *data)
+{
+ struct nfs_pgio_header *hdr = data;
+
+ nfs_put_client(hdr->ds_clp);
+ hdr->mds_ops->rpc_release(data);
+}
+EXPORT_SYMBOL_GPL(pnfs_generic_rw_release);
+
+/* Fake up some data that will cause nfs_commit_release to retry the writes. */
+void pnfs_generic_prepare_to_resend_writes(struct nfs_commit_data *data)
+{
+ struct nfs_page *first = nfs_list_entry(data->pages.next);
+
+ data->task.tk_status = 0;
+ memcpy(&data->verf.verifier, &first->wb_verf,
+ sizeof(data->verf.verifier));
+ data->verf.verifier.data[0]++; /* ensure verifier mismatch */
+}
+EXPORT_SYMBOL_GPL(pnfs_generic_prepare_to_resend_writes);
+
+void pnfs_generic_write_commit_done(struct rpc_task *task, void *data)
+{
+ struct nfs_commit_data *wdata = data;
+
+ /* Note this may cause RPC to be resent */
+ wdata->mds_ops->rpc_call_done(task, data);
+}
+EXPORT_SYMBOL_GPL(pnfs_generic_write_commit_done);
+
+void pnfs_generic_commit_release(void *calldata)
+{
+ struct nfs_commit_data *data = calldata;
+
+ data->completion_ops->completion(data);
+ pnfs_put_lseg(data->lseg);
+ nfs_put_client(data->ds_clp);
+ nfs_commitdata_release(data);
+}
+EXPORT_SYMBOL_GPL(pnfs_generic_commit_release);
+
+/* The generic layer is about to remove the req from the commit list.
+ * If this will make the bucket empty, it will need to put the lseg reference.
+ * Note this must be called holding the inode (/cinfo) lock
+ */
+void
+pnfs_generic_clear_request_commit(struct nfs_page *req,
+ struct nfs_commit_info *cinfo)
+{
+ struct pnfs_layout_segment *freeme = NULL;
+
+ if (!test_and_clear_bit(PG_COMMIT_TO_DS, &req->wb_flags))
+ goto out;
+ cinfo->ds->nwritten--;
+ if (list_is_singular(&req->wb_list)) {
+ struct pnfs_commit_bucket *bucket;
+
+ bucket = list_first_entry(&req->wb_list,
+ struct pnfs_commit_bucket,
+ written);
+ freeme = bucket->wlseg;
+ bucket->wlseg = NULL;
+ }
+out:
+ nfs_request_remove_commit_list(req, cinfo);
+ pnfs_put_lseg_locked(freeme);
+}
+EXPORT_SYMBOL_GPL(pnfs_generic_clear_request_commit);
+
+static int
+pnfs_generic_transfer_commit_list(struct list_head *src, struct list_head *dst,
+ struct nfs_commit_info *cinfo, int max)
+{
+ struct nfs_page *req, *tmp;
+ int ret = 0;
+
+ list_for_each_entry_safe(req, tmp, src, wb_list) {
+ if (!nfs_lock_request(req))
+ continue;
+ kref_get(&req->wb_kref);
+ if (cond_resched_lock(cinfo->lock))
+ list_safe_reset_next(req, tmp, wb_list);
+ nfs_request_remove_commit_list(req, cinfo);
+ clear_bit(PG_COMMIT_TO_DS, &req->wb_flags);
+ nfs_list_add_request(req, dst);
+ ret++;
+ if ((ret == max) && !cinfo->dreq)
+ break;
+ }
+ return ret;
+}
+
+static int
+pnfs_generic_scan_ds_commit_list(struct pnfs_commit_bucket *bucket,
+ struct nfs_commit_info *cinfo,
+ int max)
+{
+ struct list_head *src = &bucket->written;
+ struct list_head *dst = &bucket->committing;
+ int ret;
+
+ lockdep_assert_held(cinfo->lock);
+ ret = pnfs_generic_transfer_commit_list(src, dst, cinfo, max);
+ if (ret) {
+ cinfo->ds->nwritten -= ret;
+ cinfo->ds->ncommitting += ret;
+ bucket->clseg = bucket->wlseg;
+ if (list_empty(src))
+ bucket->wlseg = NULL;
+ else
+ pnfs_get_lseg(bucket->clseg);
+ }
+ return ret;
+}
+
+/* Move reqs from written to committing lists, returning count
+ * of number moved.
+ */
+int pnfs_generic_scan_commit_lists(struct nfs_commit_info *cinfo,
+ int max)
+{
+ int i, rv = 0, cnt;
+
+ lockdep_assert_held(cinfo->lock);
+ for (i = 0; i < cinfo->ds->nbuckets && max != 0; i++) {
+ cnt = pnfs_generic_scan_ds_commit_list(&cinfo->ds->buckets[i],
+ cinfo, max);
+ max -= cnt;
+ rv += cnt;
+ }
+ return rv;
+}
+EXPORT_SYMBOL_GPL(pnfs_generic_scan_commit_lists);
+
+/* Pull everything off the committing lists and dump into @dst. */
+void pnfs_generic_recover_commit_reqs(struct list_head *dst,
+ struct nfs_commit_info *cinfo)
+{
+ struct pnfs_commit_bucket *b;
+ struct pnfs_layout_segment *freeme;
+ int i;
+
+ lockdep_assert_held(cinfo->lock);
+restart:
+ for (i = 0, b = cinfo->ds->buckets; i < cinfo->ds->nbuckets; i++, b++) {
+ if (pnfs_generic_transfer_commit_list(&b->written, dst,
+ cinfo, 0)) {
+ freeme = b->wlseg;
+ b->wlseg = NULL;
+ spin_unlock(cinfo->lock);
+ pnfs_put_lseg(freeme);
+ spin_lock(cinfo->lock);
+ goto restart;
+ }
+ }
+ cinfo->ds->nwritten = 0;
+}
+EXPORT_SYMBOL_GPL(pnfs_generic_recover_commit_reqs);
+
+static void pnfs_generic_retry_commit(struct nfs_commit_info *cinfo, int idx)
+{
+ struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
+ struct pnfs_commit_bucket *bucket;
+ struct pnfs_layout_segment *freeme;
+ int i;
+
+ for (i = idx; i < fl_cinfo->nbuckets; i++) {
+ bucket = &fl_cinfo->buckets[i];
+ if (list_empty(&bucket->committing))
+ continue;
+ nfs_retry_commit(&bucket->committing, bucket->clseg, cinfo, i);
+ spin_lock(cinfo->lock);
+ freeme = bucket->clseg;
+ bucket->clseg = NULL;
+ spin_unlock(cinfo->lock);
+ pnfs_put_lseg(freeme);
+ }
+}
+
+static unsigned int
+pnfs_generic_alloc_ds_commits(struct nfs_commit_info *cinfo,
+ struct list_head *list)
+{
+ struct pnfs_ds_commit_info *fl_cinfo;
+ struct pnfs_commit_bucket *bucket;
+ struct nfs_commit_data *data;
+ int i;
+ unsigned int nreq = 0;
+
+ fl_cinfo = cinfo->ds;
+ bucket = fl_cinfo->buckets;
+ for (i = 0; i < fl_cinfo->nbuckets; i++, bucket++) {
+ if (list_empty(&bucket->committing))
+ continue;
+ data = nfs_commitdata_alloc();
+ if (!data)
+ break;
+ data->ds_commit_index = i;
+ spin_lock(cinfo->lock);
+ data->lseg = bucket->clseg;
+ bucket->clseg = NULL;
+ spin_unlock(cinfo->lock);
+ list_add(&data->pages, list);
+ nreq++;
+ }
+
+ /* Clean up on error */
+ pnfs_generic_retry_commit(cinfo, i);
+ return nreq;
+}
+
+/* This follows nfs_commit_list pretty closely */
+int
+pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
+ int how, struct nfs_commit_info *cinfo,
+ int (*initiate_commit)(struct nfs_commit_data *data,
+ int how))
+{
+ struct nfs_commit_data *data, *tmp;
+ LIST_HEAD(list);
+ unsigned int nreq = 0;
+
+ if (!list_empty(mds_pages)) {
+ data = nfs_commitdata_alloc();
+ if (data != NULL) {
+ data->lseg = NULL;
+ list_add(&data->pages, &list);
+ nreq++;
+ } else {
+ nfs_retry_commit(mds_pages, NULL, cinfo, 0);
+ pnfs_generic_retry_commit(cinfo, 0);
+ cinfo->completion_ops->error_cleanup(NFS_I(inode));
+ return -ENOMEM;
+ }
+ }
+
+ nreq += pnfs_generic_alloc_ds_commits(cinfo, &list);
+
+ if (nreq == 0) {
+ cinfo->completion_ops->error_cleanup(NFS_I(inode));
+ goto out;
+ }
+
+ atomic_add(nreq, &cinfo->mds->rpcs_out);
+
+ list_for_each_entry_safe(data, tmp, &list, pages) {
+ list_del_init(&data->pages);
+ if (!data->lseg) {
+ nfs_init_commit(data, mds_pages, NULL, cinfo);
+ nfs_initiate_commit(NFS_CLIENT(inode), data,
+ NFS_PROTO(data->inode),
+ data->mds_ops, how, 0);
+ } else {
+ struct pnfs_commit_bucket *buckets;
+
+ buckets = cinfo->ds->buckets;
+ nfs_init_commit(data,
+ &buckets[data->ds_commit_index].committing,
+ data->lseg,
+ cinfo);
+ initiate_commit(data, how);
+ }
+ }
+out:
+ cinfo->ds->ncommitting = 0;
+ return PNFS_ATTEMPTED;
+}
+EXPORT_SYMBOL_GPL(pnfs_generic_commit_pagelist);
+
+/*
+ * Data server cache
+ *
+ * Data servers can be mapped to different device ids.
+ * nfs4_pnfs_ds reference counting
+ * - set to 1 on allocation
+ * - incremented when a device id maps a data server already in the cache.
+ * - decremented when deviceid is removed from the cache.
+ */
+static DEFINE_SPINLOCK(nfs4_ds_cache_lock);
+static LIST_HEAD(nfs4_data_server_cache);
+
+/* Debug routines */
+static void
+print_ds(struct nfs4_pnfs_ds *ds)
+{
+ if (ds == NULL) {
+ printk(KERN_WARNING "%s NULL device\n", __func__);
+ return;
+ }
+ printk(KERN_WARNING " ds %s\n"
+ " ref count %d\n"
+ " client %p\n"
+ " cl_exchange_flags %x\n",
+ ds->ds_remotestr,
+ atomic_read(&ds->ds_count), ds->ds_clp,
+ ds->ds_clp ? ds->ds_clp->cl_exchange_flags : 0);
+}
+
+static bool
+same_sockaddr(struct sockaddr *addr1, struct sockaddr *addr2)
+{
+ struct sockaddr_in *a, *b;
+ struct sockaddr_in6 *a6, *b6;
+
+ if (addr1->sa_family != addr2->sa_family)
+ return false;
+
+ switch (addr1->sa_family) {
+ case AF_INET:
+ a = (struct sockaddr_in *)addr1;
+ b = (struct sockaddr_in *)addr2;
+
+ if (a->sin_addr.s_addr == b->sin_addr.s_addr &&
+ a->sin_port == b->sin_port)
+ return true;
+ break;
+
+ case AF_INET6:
+ a6 = (struct sockaddr_in6 *)addr1;
+ b6 = (struct sockaddr_in6 *)addr2;
+
+ /* LINKLOCAL addresses must have matching scope_id */
+ if (ipv6_addr_src_scope(&a6->sin6_addr) ==
+ IPV6_ADDR_SCOPE_LINKLOCAL &&
+ a6->sin6_scope_id != b6->sin6_scope_id)
+ return false;
+
+ if (ipv6_addr_equal(&a6->sin6_addr, &b6->sin6_addr) &&
+ a6->sin6_port == b6->sin6_port)
+ return true;
+ break;
+
+ default:
+ dprintk("%s: unhandled address family: %u\n",
+ __func__, addr1->sa_family);
+ return false;
+ }
+
+ return false;
+}
+
+static bool
+_same_data_server_addrs_locked(const struct list_head *dsaddrs1,
+ const struct list_head *dsaddrs2)
+{
+ struct nfs4_pnfs_ds_addr *da1, *da2;
+
+ /* step through both lists, comparing as we go */
+ for (da1 = list_first_entry(dsaddrs1, typeof(*da1), da_node),
+ da2 = list_first_entry(dsaddrs2, typeof(*da2), da_node);
+ da1 != NULL && da2 != NULL;
+ da1 = list_entry(da1->da_node.next, typeof(*da1), da_node),
+ da2 = list_entry(da2->da_node.next, typeof(*da2), da_node)) {
+ if (!same_sockaddr((struct sockaddr *)&da1->da_addr,
+ (struct sockaddr *)&da2->da_addr))
+ return false;
+ }
+ if (da1 == NULL && da2 == NULL)
+ return true;
+
+ return false;
+}
+
+/*
+ * Lookup DS by addresses. nfs4_ds_cache_lock is held
+ */
+static struct nfs4_pnfs_ds *
+_data_server_lookup_locked(const struct list_head *dsaddrs)
+{
+ struct nfs4_pnfs_ds *ds;
+
+ list_for_each_entry(ds, &nfs4_data_server_cache, ds_node)
+ if (_same_data_server_addrs_locked(&ds->ds_addrs, dsaddrs))
+ return ds;
+ return NULL;
+}
+
+static void destroy_ds(struct nfs4_pnfs_ds *ds)
+{
+ struct nfs4_pnfs_ds_addr *da;
+
+ dprintk("--> %s\n", __func__);
+ ifdebug(FACILITY)
+ print_ds(ds);
+
+ nfs_put_client(ds->ds_clp);
+
+ while (!list_empty(&ds->ds_addrs)) {
+ da = list_first_entry(&ds->ds_addrs,
+ struct nfs4_pnfs_ds_addr,
+ da_node);
+ list_del_init(&da->da_node);
+ kfree(da->da_remotestr);
+ kfree(da);
+ }
+
+ kfree(ds->ds_remotestr);
+ kfree(ds);
+}
+
+void nfs4_pnfs_ds_put(struct nfs4_pnfs_ds *ds)
+{
+ if (atomic_dec_and_lock(&ds->ds_count,
+ &nfs4_ds_cache_lock)) {
+ list_del_init(&ds->ds_node);
+ spin_unlock(&nfs4_ds_cache_lock);
+ destroy_ds(ds);
+ }
+}
+EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_put);
+
+/*
+ * Create a string with a human readable address and port to avoid
+ * complicated setup around many dprinks.
+ */
+static char *
+nfs4_pnfs_remotestr(struct list_head *dsaddrs, gfp_t gfp_flags)
+{
+ struct nfs4_pnfs_ds_addr *da;
+ char *remotestr;
+ size_t len;
+ char *p;
+
+ len = 3; /* '{', '}' and eol */
+ list_for_each_entry(da, dsaddrs, da_node) {
+ len += strlen(da->da_remotestr) + 1; /* string plus comma */
+ }
+
+ remotestr = kzalloc(len, gfp_flags);
+ if (!remotestr)
+ return NULL;
+
+ p = remotestr;
+ *(p++) = '{';
+ len--;
+ list_for_each_entry(da, dsaddrs, da_node) {
+ size_t ll = strlen(da->da_remotestr);
+
+ if (ll > len)
+ goto out_err;
+
+ memcpy(p, da->da_remotestr, ll);
+ p += ll;
+ len -= ll;
+
+ if (len < 1)
+ goto out_err;
+ (*p++) = ',';
+ len--;
+ }
+ if (len < 2)
+ goto out_err;
+ *(p++) = '}';
+ *p = '\0';
+ return remotestr;
+out_err:
+ kfree(remotestr);
+ return NULL;
+}
+
+/*
+ * Given a list of multipath struct nfs4_pnfs_ds_addr, add it to ds cache if
+ * uncached and return cached struct nfs4_pnfs_ds.
+ */
+struct nfs4_pnfs_ds *
+nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags)
+{
+ struct nfs4_pnfs_ds *tmp_ds, *ds = NULL;
+ char *remotestr;
+
+ if (list_empty(dsaddrs)) {
+ dprintk("%s: no addresses defined\n", __func__);
+ goto out;
+ }
+
+ ds = kzalloc(sizeof(*ds), gfp_flags);
+ if (!ds)
+ goto out;
+
+ /* this is only used for debugging, so it's ok if its NULL */
+ remotestr = nfs4_pnfs_remotestr(dsaddrs, gfp_flags);
+
+ spin_lock(&nfs4_ds_cache_lock);
+ tmp_ds = _data_server_lookup_locked(dsaddrs);
+ if (tmp_ds == NULL) {
+ INIT_LIST_HEAD(&ds->ds_addrs);
+ list_splice_init(dsaddrs, &ds->ds_addrs);
+ ds->ds_remotestr = remotestr;
+ atomic_set(&ds->ds_count, 1);
+ INIT_LIST_HEAD(&ds->ds_node);
+ ds->ds_clp = NULL;
+ list_add(&ds->ds_node, &nfs4_data_server_cache);
+ dprintk("%s add new data server %s\n", __func__,
+ ds->ds_remotestr);
+ } else {
+ kfree(remotestr);
+ kfree(ds);
+ atomic_inc(&tmp_ds->ds_count);
+ dprintk("%s data server %s found, inc'ed ds_count to %d\n",
+ __func__, tmp_ds->ds_remotestr,
+ atomic_read(&tmp_ds->ds_count));
+ ds = tmp_ds;
+ }
+ spin_unlock(&nfs4_ds_cache_lock);
+out:
+ return ds;
+}
+EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_add);
+
+static void nfs4_wait_ds_connect(struct nfs4_pnfs_ds *ds)
+{
+ might_sleep();
+ wait_on_bit(&ds->ds_state, NFS4DS_CONNECTING,
+ TASK_KILLABLE);
+}
+
+static void nfs4_clear_ds_conn_bit(struct nfs4_pnfs_ds *ds)
+{
+ smp_mb__before_atomic();
+ clear_bit(NFS4DS_CONNECTING, &ds->ds_state);
+ smp_mb__after_atomic();
+ wake_up_bit(&ds->ds_state, NFS4DS_CONNECTING);
+}
+
+static struct nfs_client *(*get_v3_ds_connect)(
+ struct nfs_client *mds_clp,
+ const struct sockaddr *ds_addr,
+ int ds_addrlen,
+ int ds_proto,
+ unsigned int ds_timeo,
+ unsigned int ds_retrans,
+ rpc_authflavor_t au_flavor);
+
+static bool load_v3_ds_connect(void)
+{
+ if (!get_v3_ds_connect) {
+ get_v3_ds_connect = symbol_request(nfs3_set_ds_client);
+ WARN_ON_ONCE(!get_v3_ds_connect);
+ }
+
+ return(get_v3_ds_connect != NULL);
+}
+
+void __exit nfs4_pnfs_v3_ds_connect_unload(void)
+{
+ if (get_v3_ds_connect) {
+ symbol_put(nfs3_set_ds_client);
+ get_v3_ds_connect = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(nfs4_pnfs_v3_ds_connect_unload);
+
+static int _nfs4_pnfs_v3_ds_connect(struct nfs_server *mds_srv,
+ struct nfs4_pnfs_ds *ds,
+ unsigned int timeo,
+ unsigned int retrans,
+ rpc_authflavor_t au_flavor)
+{
+ struct nfs_client *clp = ERR_PTR(-EIO);
+ struct nfs4_pnfs_ds_addr *da;
+ int status = 0;
+
+ dprintk("--> %s DS %s au_flavor %d\n", __func__,
+ ds->ds_remotestr, au_flavor);
+
+ if (!load_v3_ds_connect())
+ goto out;
+
+ list_for_each_entry(da, &ds->ds_addrs, da_node) {
+ dprintk("%s: DS %s: trying address %s\n",
+ __func__, ds->ds_remotestr, da->da_remotestr);
+
+ clp = get_v3_ds_connect(mds_srv->nfs_client,
+ (struct sockaddr *)&da->da_addr,
+ da->da_addrlen, IPPROTO_TCP,
+ timeo, retrans, au_flavor);
+ if (!IS_ERR(clp))
+ break;
+ }
+
+ if (IS_ERR(clp)) {
+ status = PTR_ERR(clp);
+ goto out;
+ }
+
+ smp_wmb();
+ ds->ds_clp = clp;
+ dprintk("%s [new] addr: %s\n", __func__, ds->ds_remotestr);
+out:
+ return status;
+}
+
+static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
+ struct nfs4_pnfs_ds *ds,
+ unsigned int timeo,
+ unsigned int retrans,
+ u32 minor_version,
+ rpc_authflavor_t au_flavor)
+{
+ struct nfs_client *clp = ERR_PTR(-EIO);
+ struct nfs4_pnfs_ds_addr *da;
+ int status = 0;
+
+ dprintk("--> %s DS %s au_flavor %d\n", __func__, ds->ds_remotestr,
+ au_flavor);
+
+ list_for_each_entry(da, &ds->ds_addrs, da_node) {
+ dprintk("%s: DS %s: trying address %s\n",
+ __func__, ds->ds_remotestr, da->da_remotestr);
+
+ clp = nfs4_set_ds_client(mds_srv->nfs_client,
+ (struct sockaddr *)&da->da_addr,
+ da->da_addrlen, IPPROTO_TCP,
+ timeo, retrans, minor_version,
+ au_flavor);
+ if (!IS_ERR(clp))
+ break;
+ }
+
+ if (IS_ERR(clp)) {
+ status = PTR_ERR(clp);
+ goto out;
+ }
+
+ status = nfs4_init_ds_session(clp, mds_srv->nfs_client->cl_lease_time);
+ if (status)
+ goto out_put;
+
+ smp_wmb();
+ ds->ds_clp = clp;
+ dprintk("%s [new] addr: %s\n", __func__, ds->ds_remotestr);
+out:
+ return status;
+out_put:
+ nfs_put_client(clp);
+ goto out;
+}
+
+/*
+ * Create an rpc connection to the nfs4_pnfs_ds data server.
+ * Currently only supports IPv4 and IPv6 addresses.
+ * If connection fails, make devid unavailable.
+ */
+void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
+ struct nfs4_deviceid_node *devid, unsigned int timeo,
+ unsigned int retrans, u32 version,
+ u32 minor_version, rpc_authflavor_t au_flavor)
+{
+ if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) {
+ int err = 0;
+
+ if (version == 3) {
+ err = _nfs4_pnfs_v3_ds_connect(mds_srv, ds, timeo,
+ retrans, au_flavor);
+ } else if (version == 4) {
+ err = _nfs4_pnfs_v4_ds_connect(mds_srv, ds, timeo,
+ retrans, minor_version,
+ au_flavor);
+ } else {
+ dprintk("%s: unsupported DS version %d\n", __func__,
+ version);
+ err = -EPROTONOSUPPORT;
+ }
+
+ if (err)
+ nfs4_mark_deviceid_unavailable(devid);
+ nfs4_clear_ds_conn_bit(ds);
+ } else {
+ nfs4_wait_ds_connect(ds);
+ }
+}
+EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_connect);
+
+/*
+ * Currently only supports ipv4, ipv6 and one multi-path address.
+ */
+struct nfs4_pnfs_ds_addr *
+nfs4_decode_mp_ds_addr(struct net *net, struct xdr_stream *xdr, gfp_t gfp_flags)
+{
+ struct nfs4_pnfs_ds_addr *da = NULL;
+ char *buf, *portstr;
+ __be16 port;
+ int nlen, rlen;
+ int tmp[2];
+ __be32 *p;
+ char *netid, *match_netid;
+ size_t len, match_netid_len;
+ char *startsep = "";
+ char *endsep = "";
+
+
+ /* r_netid */
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_err;
+ nlen = be32_to_cpup(p++);
+
+ p = xdr_inline_decode(xdr, nlen);
+ if (unlikely(!p))
+ goto out_err;
+
+ netid = kmalloc(nlen+1, gfp_flags);
+ if (unlikely(!netid))
+ goto out_err;
+
+ netid[nlen] = '\0';
+ memcpy(netid, p, nlen);
+
+ /* r_addr: ip/ip6addr with port in dec octets - see RFC 5665 */
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_free_netid;
+ rlen = be32_to_cpup(p);
+
+ p = xdr_inline_decode(xdr, rlen);
+ if (unlikely(!p))
+ goto out_free_netid;
+
+ /* port is ".ABC.DEF", 8 chars max */
+ if (rlen > INET6_ADDRSTRLEN + IPV6_SCOPE_ID_LEN + 8) {
+ dprintk("%s: Invalid address, length %d\n", __func__,
+ rlen);
+ goto out_free_netid;
+ }
+ buf = kmalloc(rlen + 1, gfp_flags);
+ if (!buf) {
+ dprintk("%s: Not enough memory\n", __func__);
+ goto out_free_netid;
+ }
+ buf[rlen] = '\0';
+ memcpy(buf, p, rlen);
+
+ /* replace port '.' with '-' */
+ portstr = strrchr(buf, '.');
+ if (!portstr) {
+ dprintk("%s: Failed finding expected dot in port\n",
+ __func__);
+ goto out_free_buf;
+ }
+ *portstr = '-';
+
+ /* find '.' between address and port */
+ portstr = strrchr(buf, '.');
+ if (!portstr) {
+ dprintk("%s: Failed finding expected dot between address and "
+ "port\n", __func__);
+ goto out_free_buf;
+ }
+ *portstr = '\0';
+
+ da = kzalloc(sizeof(*da), gfp_flags);
+ if (unlikely(!da))
+ goto out_free_buf;
+
+ INIT_LIST_HEAD(&da->da_node);
+
+ if (!rpc_pton(net, buf, portstr-buf, (struct sockaddr *)&da->da_addr,
+ sizeof(da->da_addr))) {
+ dprintk("%s: error parsing address %s\n", __func__, buf);
+ goto out_free_da;
+ }
+
+ portstr++;
+ sscanf(portstr, "%d-%d", &tmp[0], &tmp[1]);
+ port = htons((tmp[0] << 8) | (tmp[1]));
+
+ switch (da->da_addr.ss_family) {
+ case AF_INET:
+ ((struct sockaddr_in *)&da->da_addr)->sin_port = port;
+ da->da_addrlen = sizeof(struct sockaddr_in);
+ match_netid = "tcp";
+ match_netid_len = 3;
+ break;
+
+ case AF_INET6:
+ ((struct sockaddr_in6 *)&da->da_addr)->sin6_port = port;
+ da->da_addrlen = sizeof(struct sockaddr_in6);
+ match_netid = "tcp6";
+ match_netid_len = 4;
+ startsep = "[";
+ endsep = "]";
+ break;
+
+ default:
+ dprintk("%s: unsupported address family: %u\n",
+ __func__, da->da_addr.ss_family);
+ goto out_free_da;
+ }
+
+ if (nlen != match_netid_len || strncmp(netid, match_netid, nlen)) {
+ dprintk("%s: ERROR: r_netid \"%s\" != \"%s\"\n",
+ __func__, netid, match_netid);
+ goto out_free_da;
+ }
+
+ /* save human readable address */
+ len = strlen(startsep) + strlen(buf) + strlen(endsep) + 7;
+ da->da_remotestr = kzalloc(len, gfp_flags);
+
+ /* NULL is ok, only used for dprintk */
+ if (da->da_remotestr)
+ snprintf(da->da_remotestr, len, "%s%s%s:%u", startsep,
+ buf, endsep, ntohs(port));
+
+ dprintk("%s: Parsed DS addr %s\n", __func__, da->da_remotestr);
+ kfree(buf);
+ kfree(netid);
+ return da;
+
+out_free_da:
+ kfree(da);
+out_free_buf:
+ dprintk("%s: Error parsing DS addr: %s\n", __func__, buf);
+ kfree(buf);
+out_free_netid:
+ kfree(netid);
+out_err:
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(nfs4_decode_mp_ds_addr);
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index c91a479..568ecf0 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -70,8 +70,15 @@ EXPORT_SYMBOL_GPL(nfs_pageio_init_read);
void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
{
+ struct nfs_pgio_mirror *mirror;
+
pgio->pg_ops = &nfs_pgio_rw_ops;
- pgio->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize;
+
+ /* read path should never have more than one mirror */
+ WARN_ON_ONCE(pgio->pg_mirror_count != 1);
+
+ mirror = &pgio->pg_mirrors[0];
+ mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize;
}
EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
@@ -81,6 +88,7 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
struct nfs_page *new;
unsigned int len;
struct nfs_pageio_descriptor pgio;
+ struct nfs_pgio_mirror *pgm;
len = nfs_page_length(page);
if (len == 0)
@@ -97,7 +105,13 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
&nfs_async_read_completion_ops);
nfs_pageio_add_request(&pgio, new);
nfs_pageio_complete(&pgio);
- NFS_I(inode)->read_io += pgio.pg_bytes_written;
+
+ /* It doesn't make sense to do mirrored reads! */
+ WARN_ON_ONCE(pgio.pg_mirror_count != 1);
+
+ pgm = &pgio.pg_mirrors[0];
+ NFS_I(inode)->read_io += pgm->pg_bytes_written;
+
return 0;
}
@@ -168,13 +182,14 @@ out:
static void nfs_initiate_read(struct nfs_pgio_header *hdr,
struct rpc_message *msg,
+ const struct nfs_rpc_ops *rpc_ops,
struct rpc_task_setup *task_setup_data, int how)
{
struct inode *inode = hdr->inode;
int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0;
task_setup_data->flags |= swap_flags;
- NFS_PROTO(inode)->read_setup(hdr, msg);
+ rpc_ops->read_setup(hdr, msg);
}
static void
@@ -351,6 +366,7 @@ int nfs_readpages(struct file *filp, struct address_space *mapping,
struct list_head *pages, unsigned nr_pages)
{
struct nfs_pageio_descriptor pgio;
+ struct nfs_pgio_mirror *pgm;
struct nfs_readdesc desc = {
.pgio = &pgio,
};
@@ -386,10 +402,15 @@ int nfs_readpages(struct file *filp, struct address_space *mapping,
&nfs_async_read_completion_ops);
ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
-
nfs_pageio_complete(&pgio);
- NFS_I(inode)->read_io += pgio.pg_bytes_written;
- npages = (pgio.pg_bytes_written + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+
+ /* It doesn't make sense to do mirrored reads! */
+ WARN_ON_ONCE(pgio.pg_mirror_count != 1);
+
+ pgm = &pgio.pg_mirrors[0];
+ NFS_I(inode)->read_io += pgm->pg_bytes_written;
+ npages = (pgm->pg_bytes_written + PAGE_CACHE_SIZE - 1) >>
+ PAGE_CACHE_SHIFT;
nfs_add_stats(inode, NFSIOS_READPAGES, npages);
read_complete:
put_nfs_open_context(desc.ctx);
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 6ec4fe2..322b2de02 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -404,12 +404,15 @@ void __exit unregister_nfs_fs(void)
unregister_filesystem(&nfs_fs_type);
}
-void nfs_sb_active(struct super_block *sb)
+bool nfs_sb_active(struct super_block *sb)
{
struct nfs_server *server = NFS_SB(sb);
- if (atomic_inc_return(&server->active) == 1)
- atomic_inc(&sb->s_active);
+ if (!atomic_inc_not_zero(&sb->s_active))
+ return false;
+ if (atomic_inc_return(&server->active) != 1)
+ atomic_dec(&sb->s_active);
+ return true;
}
EXPORT_SYMBOL_GPL(nfs_sb_active);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 298abcc..88a6d21 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -473,13 +473,18 @@ try_again:
do {
/*
* Subrequests are always contiguous, non overlapping
- * and in order. If not, it's a programming error.
+ * and in order - but may be repeated (mirrored writes).
*/
- WARN_ON_ONCE(subreq->wb_offset !=
- (head->wb_offset + total_bytes));
-
- /* keep track of how many bytes this group covers */
- total_bytes += subreq->wb_bytes;
+ if (subreq->wb_offset == (head->wb_offset + total_bytes)) {
+ /* keep track of how many bytes this group covers */
+ total_bytes += subreq->wb_bytes;
+ } else if (WARN_ON_ONCE(subreq->wb_offset < head->wb_offset ||
+ ((subreq->wb_offset + subreq->wb_bytes) >
+ (head->wb_offset + total_bytes)))) {
+ nfs_page_group_unlock(head);
+ spin_unlock(&inode->i_lock);
+ return ERR_PTR(-EIO);
+ }
if (!nfs_lock_request(subreq)) {
/* releases page group bit lock and
@@ -842,9 +847,9 @@ EXPORT_SYMBOL_GPL(nfs_init_cinfo);
*/
void
nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
- struct nfs_commit_info *cinfo)
+ struct nfs_commit_info *cinfo, u32 ds_commit_idx)
{
- if (pnfs_mark_request_commit(req, lseg, cinfo))
+ if (pnfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx))
return;
nfs_request_add_commit_list(req, &cinfo->mds->list, cinfo);
}
@@ -900,7 +905,8 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr)
}
if (nfs_write_need_commit(hdr)) {
memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf));
- nfs_mark_request_commit(req, hdr->lseg, &cinfo);
+ nfs_mark_request_commit(req, hdr->lseg, &cinfo,
+ hdr->pgio_mirror_idx);
goto next;
}
remove_req:
@@ -1091,6 +1097,7 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
{
struct nfs_open_context *ctx = nfs_file_open_context(file);
struct nfs_lock_context *l_ctx;
+ struct file_lock_context *flctx = file_inode(file)->i_flctx;
struct nfs_page *req;
int do_flush, status;
/*
@@ -1109,7 +1116,9 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
do_flush = req->wb_page != page || req->wb_context != ctx;
/* for now, flush if more than 1 request in page_group */
do_flush |= req->wb_this_page != req;
- if (l_ctx && ctx->dentry->d_inode->i_flock != NULL) {
+ if (l_ctx && flctx &&
+ !(list_empty_careful(&flctx->flc_posix) &&
+ list_empty_careful(&flctx->flc_flock))) {
do_flush |= l_ctx->lockowner.l_owner != current->files
|| l_ctx->lockowner.l_pid != current->tgid;
}
@@ -1170,6 +1179,13 @@ out:
return PageUptodate(page) != 0;
}
+static bool
+is_whole_file_wrlock(struct file_lock *fl)
+{
+ return fl->fl_start == 0 && fl->fl_end == OFFSET_MAX &&
+ fl->fl_type == F_WRLCK;
+}
+
/* If we know the page is up to date, and we're not using byte range locks (or
* if we have the whole file locked for writing), it may be more efficient to
* extend the write to cover the entire page in order to avoid fragmentation
@@ -1180,17 +1196,36 @@ out:
*/
static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode)
{
+ int ret;
+ struct file_lock_context *flctx = inode->i_flctx;
+ struct file_lock *fl;
+
if (file->f_flags & O_DSYNC)
return 0;
if (!nfs_write_pageuptodate(page, inode))
return 0;
if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
return 1;
- if (inode->i_flock == NULL || (inode->i_flock->fl_start == 0 &&
- inode->i_flock->fl_end == OFFSET_MAX &&
- inode->i_flock->fl_type != F_RDLCK))
- return 1;
- return 0;
+ if (!flctx || (list_empty_careful(&flctx->flc_flock) &&
+ list_empty_careful(&flctx->flc_posix)))
+ return 0;
+
+ /* Check to see if there are whole file write locks */
+ ret = 0;
+ spin_lock(&flctx->flc_lock);
+ if (!list_empty(&flctx->flc_posix)) {
+ fl = list_first_entry(&flctx->flc_posix, struct file_lock,
+ fl_list);
+ if (is_whole_file_wrlock(fl))
+ ret = 1;
+ } else if (!list_empty(&flctx->flc_flock)) {
+ fl = list_first_entry(&flctx->flc_flock, struct file_lock,
+ fl_list);
+ if (fl->fl_type == F_WRLCK)
+ ret = 1;
+ }
+ spin_unlock(&flctx->flc_lock);
+ return ret;
}
/*
@@ -1240,15 +1275,15 @@ static int flush_task_priority(int how)
static void nfs_initiate_write(struct nfs_pgio_header *hdr,
struct rpc_message *msg,
+ const struct nfs_rpc_ops *rpc_ops,
struct rpc_task_setup *task_setup_data, int how)
{
- struct inode *inode = hdr->inode;
int priority = flush_task_priority(how);
task_setup_data->priority = priority;
- NFS_PROTO(inode)->write_setup(hdr, msg);
+ rpc_ops->write_setup(hdr, msg);
- nfs4_state_protect_write(NFS_SERVER(inode)->nfs_client,
+ nfs4_state_protect_write(NFS_SERVER(hdr->inode)->nfs_client,
&task_setup_data->rpc_client, msg, hdr);
}
@@ -1298,8 +1333,14 @@ EXPORT_SYMBOL_GPL(nfs_pageio_init_write);
void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio)
{
+ struct nfs_pgio_mirror *mirror;
+
pgio->pg_ops = &nfs_pgio_rw_ops;
- pgio->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize;
+
+ nfs_pageio_stop_mirroring(pgio);
+
+ mirror = &pgio->pg_mirrors[0];
+ mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize;
}
EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds);
@@ -1465,6 +1506,7 @@ void nfs_commitdata_release(struct nfs_commit_data *data)
EXPORT_SYMBOL_GPL(nfs_commitdata_release);
int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
+ const struct nfs_rpc_ops *nfs_ops,
const struct rpc_call_ops *call_ops,
int how, int flags)
{
@@ -1486,7 +1528,7 @@ int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
.priority = priority,
};
/* Set up the initial task struct. */
- NFS_PROTO(data->inode)->commit_setup(data, &msg);
+ nfs_ops->commit_setup(data, &msg);
dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
@@ -1554,14 +1596,15 @@ EXPORT_SYMBOL_GPL(nfs_init_commit);
void nfs_retry_commit(struct list_head *page_list,
struct pnfs_layout_segment *lseg,
- struct nfs_commit_info *cinfo)
+ struct nfs_commit_info *cinfo,
+ u32 ds_commit_idx)
{
struct nfs_page *req;
while (!list_empty(page_list)) {
req = nfs_list_entry(page_list->next);
nfs_list_remove_request(req);
- nfs_mark_request_commit(req, lseg, cinfo);
+ nfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx);
if (!cinfo->dreq) {
dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
dec_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host),
@@ -1589,10 +1632,10 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how,
/* Set up the argument struct */
nfs_init_commit(data, head, NULL, cinfo);
atomic_inc(&cinfo->mds->rpcs_out);
- return nfs_initiate_commit(NFS_CLIENT(inode), data, data->mds_ops,
- how, 0);
+ return nfs_initiate_commit(NFS_CLIENT(inode), data, NFS_PROTO(inode),
+ data->mds_ops, how, 0);
out_bad:
- nfs_retry_commit(head, NULL, cinfo);
+ nfs_retry_commit(head, NULL, cinfo, 0);
cinfo->completion_ops->error_cleanup(NFS_I(inode));
return -ENOMEM;
}
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
index 7339515..683bf71 100644
--- a/fs/nfsd/Kconfig
+++ b/fs/nfsd/Kconfig
@@ -82,6 +82,16 @@ config NFSD_V4
If unsure, say N.
+config NFSD_PNFS
+ bool "NFSv4.1 server support for Parallel NFS (pNFS)"
+ depends on NFSD_V4
+ help
+ This option enables support for the parallel NFS features of the
+ minor version 1 of the NFSv4 protocol (RFC5661) in the kernel's NFS
+ server.
+
+ If unsure, say N.
+
config NFSD_V4_SECURITY_LABEL
bool "Provide Security Label support for NFSv4 server"
depends on NFSD_V4 && SECURITY
diff --git a/fs/nfsd/Makefile b/fs/nfsd/Makefile
index af32ef0..9a6028e 100644
--- a/fs/nfsd/Makefile
+++ b/fs/nfsd/Makefile
@@ -2,9 +2,14 @@
# Makefile for the Linux nfs server
#
+ccflags-y += -I$(src) # needed for trace events
+
obj-$(CONFIG_NFSD) += nfsd.o
-nfsd-y := nfssvc.o nfsctl.o nfsproc.o nfsfh.o vfs.o \
+# this one should be compiled first, as the tracing macros can easily blow up
+nfsd-y += trace.o
+
+nfsd-y += nfssvc.o nfsctl.o nfsproc.o nfsfh.o vfs.o \
export.o auth.o lockd.o nfscache.o nfsxdr.o stats.o
nfsd-$(CONFIG_NFSD_FAULT_INJECTION) += fault_inject.o
nfsd-$(CONFIG_NFSD_V2_ACL) += nfs2acl.o
@@ -12,3 +17,4 @@ nfsd-$(CONFIG_NFSD_V3) += nfs3proc.o nfs3xdr.o
nfsd-$(CONFIG_NFSD_V3_ACL) += nfs3acl.o
nfsd-$(CONFIG_NFSD_V4) += nfs4proc.o nfs4xdr.o nfs4state.o nfs4idmap.o \
nfs4acl.o nfs4callback.o nfs4recover.o
+nfsd-$(CONFIG_NFSD_PNFS) += nfs4layouts.o blocklayout.o blocklayoutxdr.o
diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c
new file mode 100644
index 0000000..cdbc78c
--- /dev/null
+++ b/fs/nfsd/blocklayout.c
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2014 Christoph Hellwig.
+ */
+#include <linux/exportfs.h>
+#include <linux/genhd.h>
+#include <linux/slab.h>
+
+#include <linux/nfsd/debug.h>
+
+#include "blocklayoutxdr.h"
+#include "pnfs.h"
+
+#define NFSDDBG_FACILITY NFSDDBG_PNFS
+
+
+static int
+nfsd4_block_get_device_info_simple(struct super_block *sb,
+ struct nfsd4_getdeviceinfo *gdp)
+{
+ struct pnfs_block_deviceaddr *dev;
+ struct pnfs_block_volume *b;
+
+ dev = kzalloc(sizeof(struct pnfs_block_deviceaddr) +
+ sizeof(struct pnfs_block_volume), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+ gdp->gd_device = dev;
+
+ dev->nr_volumes = 1;
+ b = &dev->volumes[0];
+
+ b->type = PNFS_BLOCK_VOLUME_SIMPLE;
+ b->simple.sig_len = PNFS_BLOCK_UUID_LEN;
+ return sb->s_export_op->get_uuid(sb, b->simple.sig, &b->simple.sig_len,
+ &b->simple.offset);
+}
+
+static __be32
+nfsd4_block_proc_getdeviceinfo(struct super_block *sb,
+ struct nfsd4_getdeviceinfo *gdp)
+{
+ if (sb->s_bdev != sb->s_bdev->bd_contains)
+ return nfserr_inval;
+ return nfserrno(nfsd4_block_get_device_info_simple(sb, gdp));
+}
+
+static __be32
+nfsd4_block_proc_layoutget(struct inode *inode, const struct svc_fh *fhp,
+ struct nfsd4_layoutget *args)
+{
+ struct nfsd4_layout_seg *seg = &args->lg_seg;
+ struct super_block *sb = inode->i_sb;
+ u32 block_size = (1 << inode->i_blkbits);
+ struct pnfs_block_extent *bex;
+ struct iomap iomap;
+ u32 device_generation = 0;
+ int error;
+
+ /*
+ * We do not attempt to support I/O smaller than the fs block size,
+ * or not aligned to it.
+ */
+ if (args->lg_minlength < block_size) {
+ dprintk("pnfsd: I/O too small\n");
+ goto out_layoutunavailable;
+ }
+ if (seg->offset & (block_size - 1)) {
+ dprintk("pnfsd: I/O misaligned\n");
+ goto out_layoutunavailable;
+ }
+
+ /*
+ * Some clients barf on non-zero block numbers for NONE or INVALID
+ * layouts, so make sure to zero the whole structure.
+ */
+ error = -ENOMEM;
+ bex = kzalloc(sizeof(*bex), GFP_KERNEL);
+ if (!bex)
+ goto out_error;
+ args->lg_content = bex;
+
+ error = sb->s_export_op->map_blocks(inode, seg->offset, seg->length,
+ &iomap, seg->iomode != IOMODE_READ,
+ &device_generation);
+ if (error) {
+ if (error == -ENXIO)
+ goto out_layoutunavailable;
+ goto out_error;
+ }
+
+ if (iomap.length < args->lg_minlength) {
+ dprintk("pnfsd: extent smaller than minlength\n");
+ goto out_layoutunavailable;
+ }
+
+ switch (iomap.type) {
+ case IOMAP_MAPPED:
+ if (seg->iomode == IOMODE_READ)
+ bex->es = PNFS_BLOCK_READ_DATA;
+ else
+ bex->es = PNFS_BLOCK_READWRITE_DATA;
+ bex->soff = (iomap.blkno << 9);
+ break;
+ case IOMAP_UNWRITTEN:
+ if (seg->iomode & IOMODE_RW) {
+ /*
+ * Crack monkey special case from section 2.3.1.
+ */
+ if (args->lg_minlength == 0) {
+ dprintk("pnfsd: no soup for you!\n");
+ goto out_layoutunavailable;
+ }
+
+ bex->es = PNFS_BLOCK_INVALID_DATA;
+ bex->soff = (iomap.blkno << 9);
+ break;
+ }
+ /*FALLTHRU*/
+ case IOMAP_HOLE:
+ if (seg->iomode == IOMODE_READ) {
+ bex->es = PNFS_BLOCK_NONE_DATA;
+ break;
+ }
+ /*FALLTHRU*/
+ case IOMAP_DELALLOC:
+ default:
+ WARN(1, "pnfsd: filesystem returned %d extent\n", iomap.type);
+ goto out_layoutunavailable;
+ }
+
+ error = nfsd4_set_deviceid(&bex->vol_id, fhp, device_generation);
+ if (error)
+ goto out_error;
+ bex->foff = iomap.offset;
+ bex->len = iomap.length;
+
+ seg->offset = iomap.offset;
+ seg->length = iomap.length;
+
+ dprintk("GET: %lld:%lld %d\n", bex->foff, bex->len, bex->es);
+ return 0;
+
+out_error:
+ seg->length = 0;
+ return nfserrno(error);
+out_layoutunavailable:
+ seg->length = 0;
+ return nfserr_layoutunavailable;
+}
+
+static __be32
+nfsd4_block_proc_layoutcommit(struct inode *inode,
+ struct nfsd4_layoutcommit *lcp)
+{
+ loff_t new_size = lcp->lc_last_wr + 1;
+ struct iattr iattr = { .ia_valid = 0 };
+ struct iomap *iomaps;
+ int nr_iomaps;
+ int error;
+
+ nr_iomaps = nfsd4_block_decode_layoutupdate(lcp->lc_up_layout,
+ lcp->lc_up_len, &iomaps, 1 << inode->i_blkbits);
+ if (nr_iomaps < 0)
+ return nfserrno(nr_iomaps);
+
+ if (lcp->lc_mtime.tv_nsec == UTIME_NOW ||
+ timespec_compare(&lcp->lc_mtime, &inode->i_mtime) < 0)
+ lcp->lc_mtime = current_fs_time(inode->i_sb);
+ iattr.ia_valid |= ATTR_ATIME | ATTR_CTIME | ATTR_MTIME;
+ iattr.ia_atime = iattr.ia_ctime = iattr.ia_mtime = lcp->lc_mtime;
+
+ if (new_size > i_size_read(inode)) {
+ iattr.ia_valid |= ATTR_SIZE;
+ iattr.ia_size = new_size;
+ }
+
+ error = inode->i_sb->s_export_op->commit_blocks(inode, iomaps,
+ nr_iomaps, &iattr);
+ kfree(iomaps);
+ return nfserrno(error);
+}
+
+const struct nfsd4_layout_ops bl_layout_ops = {
+ .proc_getdeviceinfo = nfsd4_block_proc_getdeviceinfo,
+ .encode_getdeviceinfo = nfsd4_block_encode_getdeviceinfo,
+ .proc_layoutget = nfsd4_block_proc_layoutget,
+ .encode_layoutget = nfsd4_block_encode_layoutget,
+ .proc_layoutcommit = nfsd4_block_proc_layoutcommit,
+};
diff --git a/fs/nfsd/blocklayoutxdr.c b/fs/nfsd/blocklayoutxdr.c
new file mode 100644
index 0000000..9da89fd
--- /dev/null
+++ b/fs/nfsd/blocklayoutxdr.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 2014 Christoph Hellwig.
+ */
+#include <linux/sunrpc/svc.h>
+#include <linux/exportfs.h>
+#include <linux/nfs4.h>
+
+#include "nfsd.h"
+#include "blocklayoutxdr.h"
+
+#define NFSDDBG_FACILITY NFSDDBG_PNFS
+
+
+__be32
+nfsd4_block_encode_layoutget(struct xdr_stream *xdr,
+ struct nfsd4_layoutget *lgp)
+{
+ struct pnfs_block_extent *b = lgp->lg_content;
+ int len = sizeof(__be32) + 5 * sizeof(__be64) + sizeof(__be32);
+ __be32 *p;
+
+ p = xdr_reserve_space(xdr, sizeof(__be32) + len);
+ if (!p)
+ return nfserr_toosmall;
+
+ *p++ = cpu_to_be32(len);
+ *p++ = cpu_to_be32(1); /* we always return a single extent */
+
+ p = xdr_encode_opaque_fixed(p, &b->vol_id,
+ sizeof(struct nfsd4_deviceid));
+ p = xdr_encode_hyper(p, b->foff);
+ p = xdr_encode_hyper(p, b->len);
+ p = xdr_encode_hyper(p, b->soff);
+ *p++ = cpu_to_be32(b->es);
+ return 0;
+}
+
+static int
+nfsd4_block_encode_volume(struct xdr_stream *xdr, struct pnfs_block_volume *b)
+{
+ __be32 *p;
+ int len;
+
+ switch (b->type) {
+ case PNFS_BLOCK_VOLUME_SIMPLE:
+ len = 4 + 4 + 8 + 4 + b->simple.sig_len;
+ p = xdr_reserve_space(xdr, len);
+ if (!p)
+ return -ETOOSMALL;
+
+ *p++ = cpu_to_be32(b->type);
+ *p++ = cpu_to_be32(1); /* single signature */
+ p = xdr_encode_hyper(p, b->simple.offset);
+ p = xdr_encode_opaque(p, b->simple.sig, b->simple.sig_len);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return len;
+}
+
+__be32
+nfsd4_block_encode_getdeviceinfo(struct xdr_stream *xdr,
+ struct nfsd4_getdeviceinfo *gdp)
+{
+ struct pnfs_block_deviceaddr *dev = gdp->gd_device;
+ int len = sizeof(__be32), ret, i;
+ __be32 *p;
+
+ p = xdr_reserve_space(xdr, len + sizeof(__be32));
+ if (!p)
+ return nfserr_resource;
+
+ for (i = 0; i < dev->nr_volumes; i++) {
+ ret = nfsd4_block_encode_volume(xdr, &dev->volumes[i]);
+ if (ret < 0)
+ return nfserrno(ret);
+ len += ret;
+ }
+
+ /*
+ * Fill in the overall length and number of volumes at the beginning
+ * of the layout.
+ */
+ *p++ = cpu_to_be32(len);
+ *p++ = cpu_to_be32(dev->nr_volumes);
+ return 0;
+}
+
+int
+nfsd4_block_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp,
+ u32 block_size)
+{
+ struct iomap *iomaps;
+ u32 nr_iomaps, expected, i;
+
+ if (len < sizeof(u32)) {
+ dprintk("%s: extent array too small: %u\n", __func__, len);
+ return -EINVAL;
+ }
+
+ nr_iomaps = be32_to_cpup(p++);
+ expected = sizeof(__be32) + nr_iomaps * NFS4_BLOCK_EXTENT_SIZE;
+ if (len != expected) {
+ dprintk("%s: extent array size mismatch: %u/%u\n",
+ __func__, len, expected);
+ return -EINVAL;
+ }
+
+ iomaps = kcalloc(nr_iomaps, sizeof(*iomaps), GFP_KERNEL);
+ if (!iomaps) {
+ dprintk("%s: failed to allocate extent array\n", __func__);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < nr_iomaps; i++) {
+ struct pnfs_block_extent bex;
+
+ memcpy(&bex.vol_id, p, sizeof(struct nfsd4_deviceid));
+ p += XDR_QUADLEN(sizeof(struct nfsd4_deviceid));
+
+ p = xdr_decode_hyper(p, &bex.foff);
+ if (bex.foff & (block_size - 1)) {
+ dprintk("%s: unaligned offset %lld\n",
+ __func__, bex.foff);
+ goto fail;
+ }
+ p = xdr_decode_hyper(p, &bex.len);
+ if (bex.len & (block_size - 1)) {
+ dprintk("%s: unaligned length %lld\n",
+ __func__, bex.foff);
+ goto fail;
+ }
+ p = xdr_decode_hyper(p, &bex.soff);
+ if (bex.soff & (block_size - 1)) {
+ dprintk("%s: unaligned disk offset %lld\n",
+ __func__, bex.soff);
+ goto fail;
+ }
+ bex.es = be32_to_cpup(p++);
+ if (bex.es != PNFS_BLOCK_READWRITE_DATA) {
+ dprintk("%s: incorrect extent state %d\n",
+ __func__, bex.es);
+ goto fail;
+ }
+
+ iomaps[i].offset = bex.foff;
+ iomaps[i].length = bex.len;
+ }
+
+ *iomapp = iomaps;
+ return nr_iomaps;
+fail:
+ kfree(iomaps);
+ return -EINVAL;
+}
diff --git a/fs/nfsd/blocklayoutxdr.h b/fs/nfsd/blocklayoutxdr.h
new file mode 100644
index 0000000..fdc7903
--- /dev/null
+++ b/fs/nfsd/blocklayoutxdr.h
@@ -0,0 +1,62 @@
+#ifndef _NFSD_BLOCKLAYOUTXDR_H
+#define _NFSD_BLOCKLAYOUTXDR_H 1
+
+#include <linux/blkdev.h>
+#include "xdr4.h"
+
+struct iomap;
+struct xdr_stream;
+
+enum pnfs_block_extent_state {
+ PNFS_BLOCK_READWRITE_DATA = 0,
+ PNFS_BLOCK_READ_DATA = 1,
+ PNFS_BLOCK_INVALID_DATA = 2,
+ PNFS_BLOCK_NONE_DATA = 3,
+};
+
+struct pnfs_block_extent {
+ struct nfsd4_deviceid vol_id;
+ u64 foff;
+ u64 len;
+ u64 soff;
+ enum pnfs_block_extent_state es;
+};
+#define NFS4_BLOCK_EXTENT_SIZE 44
+
+enum pnfs_block_volume_type {
+ PNFS_BLOCK_VOLUME_SIMPLE = 0,
+ PNFS_BLOCK_VOLUME_SLICE = 1,
+ PNFS_BLOCK_VOLUME_CONCAT = 2,
+ PNFS_BLOCK_VOLUME_STRIPE = 3,
+};
+
+/*
+ * Random upper cap for the uuid length to avoid unbounded allocation.
+ * Not actually limited by the protocol.
+ */
+#define PNFS_BLOCK_UUID_LEN 128
+
+struct pnfs_block_volume {
+ enum pnfs_block_volume_type type;
+ union {
+ struct {
+ u64 offset;
+ u32 sig_len;
+ u8 sig[PNFS_BLOCK_UUID_LEN];
+ } simple;
+ };
+};
+
+struct pnfs_block_deviceaddr {
+ u32 nr_volumes;
+ struct pnfs_block_volume volumes[];
+};
+
+__be32 nfsd4_block_encode_getdeviceinfo(struct xdr_stream *xdr,
+ struct nfsd4_getdeviceinfo *gdp);
+__be32 nfsd4_block_encode_layoutget(struct xdr_stream *xdr,
+ struct nfsd4_layoutget *lgp);
+int nfsd4_block_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp,
+ u32 block_size);
+
+#endif /* _NFSD_BLOCKLAYOUTXDR_H */
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 30a739d..c3e3b6e 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -20,6 +20,7 @@
#include "nfsd.h"
#include "nfsfh.h"
#include "netns.h"
+#include "pnfs.h"
#define NFSDDBG_FACILITY NFSDDBG_EXPORT
@@ -545,6 +546,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
exp.ex_client = dom;
exp.cd = cd;
+ exp.ex_devid_map = NULL;
/* expiry */
err = -EINVAL;
@@ -621,6 +623,8 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
if (!gid_valid(exp.ex_anon_gid))
goto out4;
err = 0;
+
+ nfsd4_setup_layout_type(&exp);
}
expp = svc_export_lookup(&exp);
@@ -703,6 +707,7 @@ static void svc_export_init(struct cache_head *cnew, struct cache_head *citem)
new->ex_fslocs.locations = NULL;
new->ex_fslocs.locations_count = 0;
new->ex_fslocs.migrated = 0;
+ new->ex_layout_type = 0;
new->ex_uuid = NULL;
new->cd = item->cd;
}
@@ -717,6 +722,8 @@ static void export_update(struct cache_head *cnew, struct cache_head *citem)
new->ex_anon_uid = item->ex_anon_uid;
new->ex_anon_gid = item->ex_anon_gid;
new->ex_fsid = item->ex_fsid;
+ new->ex_devid_map = item->ex_devid_map;
+ item->ex_devid_map = NULL;
new->ex_uuid = item->ex_uuid;
item->ex_uuid = NULL;
new->ex_fslocs.locations = item->ex_fslocs.locations;
@@ -725,6 +732,7 @@ static void export_update(struct cache_head *cnew, struct cache_head *citem)
item->ex_fslocs.locations_count = 0;
new->ex_fslocs.migrated = item->ex_fslocs.migrated;
item->ex_fslocs.migrated = 0;
+ new->ex_layout_type = item->ex_layout_type;
new->ex_nflavors = item->ex_nflavors;
for (i = 0; i < MAX_SECINFO_LIST; i++) {
new->ex_flavors[i] = item->ex_flavors[i];
diff --git a/fs/nfsd/export.h b/fs/nfsd/export.h
index 04dc8c1..1f52bfc 100644
--- a/fs/nfsd/export.h
+++ b/fs/nfsd/export.h
@@ -56,6 +56,8 @@ struct svc_export {
struct nfsd4_fs_locations ex_fslocs;
uint32_t ex_nflavors;
struct exp_flavor_info ex_flavors[MAX_SECINFO_LIST];
+ enum pnfs_layouttype ex_layout_type;
+ struct nfsd4_deviceid_map *ex_devid_map;
struct cache_detail *cd;
};
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 7cbdf1b..5827785 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -546,6 +546,102 @@ out:
return status;
}
+#ifdef CONFIG_NFSD_PNFS
+/*
+ * CB_LAYOUTRECALL4args
+ *
+ * struct layoutrecall_file4 {
+ * nfs_fh4 lor_fh;
+ * offset4 lor_offset;
+ * length4 lor_length;
+ * stateid4 lor_stateid;
+ * };
+ *
+ * union layoutrecall4 switch(layoutrecall_type4 lor_recalltype) {
+ * case LAYOUTRECALL4_FILE:
+ * layoutrecall_file4 lor_layout;
+ * case LAYOUTRECALL4_FSID:
+ * fsid4 lor_fsid;
+ * case LAYOUTRECALL4_ALL:
+ * void;
+ * };
+ *
+ * struct CB_LAYOUTRECALL4args {
+ * layouttype4 clora_type;
+ * layoutiomode4 clora_iomode;
+ * bool clora_changed;
+ * layoutrecall4 clora_recall;
+ * };
+ */
+static void encode_cb_layout4args(struct xdr_stream *xdr,
+ const struct nfs4_layout_stateid *ls,
+ struct nfs4_cb_compound_hdr *hdr)
+{
+ __be32 *p;
+
+ BUG_ON(hdr->minorversion == 0);
+
+ p = xdr_reserve_space(xdr, 5 * 4);
+ *p++ = cpu_to_be32(OP_CB_LAYOUTRECALL);
+ *p++ = cpu_to_be32(ls->ls_layout_type);
+ *p++ = cpu_to_be32(IOMODE_ANY);
+ *p++ = cpu_to_be32(1);
+ *p = cpu_to_be32(RETURN_FILE);
+
+ encode_nfs_fh4(xdr, &ls->ls_stid.sc_file->fi_fhandle);
+
+ p = xdr_reserve_space(xdr, 2 * 8);
+ p = xdr_encode_hyper(p, 0);
+ xdr_encode_hyper(p, NFS4_MAX_UINT64);
+
+ encode_stateid4(xdr, &ls->ls_recall_sid);
+
+ hdr->nops++;
+}
+
+static void nfs4_xdr_enc_cb_layout(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfsd4_callback *cb)
+{
+ const struct nfs4_layout_stateid *ls =
+ container_of(cb, struct nfs4_layout_stateid, ls_recall);
+ struct nfs4_cb_compound_hdr hdr = {
+ .ident = 0,
+ .minorversion = cb->cb_minorversion,
+ };
+
+ encode_cb_compound4args(xdr, &hdr);
+ encode_cb_sequence4args(xdr, cb, &hdr);
+ encode_cb_layout4args(xdr, ls, &hdr);
+ encode_cb_nops(&hdr);
+}
+
+static int nfs4_xdr_dec_cb_layout(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct nfsd4_callback *cb)
+{
+ struct nfs4_cb_compound_hdr hdr;
+ enum nfsstat4 nfserr;
+ int status;
+
+ status = decode_cb_compound4res(xdr, &hdr);
+ if (unlikely(status))
+ goto out;
+ if (cb) {
+ status = decode_cb_sequence4res(xdr, cb);
+ if (unlikely(status))
+ goto out;
+ }
+ status = decode_cb_op_status(xdr, OP_CB_LAYOUTRECALL, &nfserr);
+ if (unlikely(status))
+ goto out;
+ if (unlikely(nfserr != NFS4_OK))
+ status = nfs_cb_stat_to_errno(nfserr);
+out:
+ return status;
+}
+#endif /* CONFIG_NFSD_PNFS */
+
/*
* RPC procedure tables
*/
@@ -563,6 +659,9 @@ out:
static struct rpc_procinfo nfs4_cb_procedures[] = {
PROC(CB_NULL, NULL, cb_null, cb_null),
PROC(CB_RECALL, COMPOUND, cb_recall, cb_recall),
+#ifdef CONFIG_NFSD_PNFS
+ PROC(CB_LAYOUT, COMPOUND, cb_layout, cb_layout),
+#endif
};
static struct rpc_version nfs_cb_version4 = {
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
new file mode 100644
index 0000000..3c1bfa1
--- /dev/null
+++ b/fs/nfsd/nfs4layouts.c
@@ -0,0 +1,721 @@
+/*
+ * Copyright (c) 2014 Christoph Hellwig.
+ */
+#include <linux/kmod.h>
+#include <linux/file.h>
+#include <linux/jhash.h>
+#include <linux/sched.h>
+#include <linux/sunrpc/addr.h>
+
+#include "pnfs.h"
+#include "netns.h"
+#include "trace.h"
+
+#define NFSDDBG_FACILITY NFSDDBG_PNFS
+
+struct nfs4_layout {
+ struct list_head lo_perstate;
+ struct nfs4_layout_stateid *lo_state;
+ struct nfsd4_layout_seg lo_seg;
+};
+
+static struct kmem_cache *nfs4_layout_cache;
+static struct kmem_cache *nfs4_layout_stateid_cache;
+
+static struct nfsd4_callback_ops nfsd4_cb_layout_ops;
+static const struct lock_manager_operations nfsd4_layouts_lm_ops;
+
+const struct nfsd4_layout_ops *nfsd4_layout_ops[LAYOUT_TYPE_MAX] = {
+ [LAYOUT_BLOCK_VOLUME] = &bl_layout_ops,
+};
+
+/* pNFS device ID to export fsid mapping */
+#define DEVID_HASH_BITS 8
+#define DEVID_HASH_SIZE (1 << DEVID_HASH_BITS)
+#define DEVID_HASH_MASK (DEVID_HASH_SIZE - 1)
+static u64 nfsd_devid_seq = 1;
+static struct list_head nfsd_devid_hash[DEVID_HASH_SIZE];
+static DEFINE_SPINLOCK(nfsd_devid_lock);
+
+static inline u32 devid_hashfn(u64 idx)
+{
+ return jhash_2words(idx, idx >> 32, 0) & DEVID_HASH_MASK;
+}
+
+static void
+nfsd4_alloc_devid_map(const struct svc_fh *fhp)
+{
+ const struct knfsd_fh *fh = &fhp->fh_handle;
+ size_t fsid_len = key_len(fh->fh_fsid_type);
+ struct nfsd4_deviceid_map *map, *old;
+ int i;
+
+ map = kzalloc(sizeof(*map) + fsid_len, GFP_KERNEL);
+ if (!map)
+ return;
+
+ map->fsid_type = fh->fh_fsid_type;
+ memcpy(&map->fsid, fh->fh_fsid, fsid_len);
+
+ spin_lock(&nfsd_devid_lock);
+ if (fhp->fh_export->ex_devid_map)
+ goto out_unlock;
+
+ for (i = 0; i < DEVID_HASH_SIZE; i++) {
+ list_for_each_entry(old, &nfsd_devid_hash[i], hash) {
+ if (old->fsid_type != fh->fh_fsid_type)
+ continue;
+ if (memcmp(old->fsid, fh->fh_fsid,
+ key_len(old->fsid_type)))
+ continue;
+
+ fhp->fh_export->ex_devid_map = old;
+ goto out_unlock;
+ }
+ }
+
+ map->idx = nfsd_devid_seq++;
+ list_add_tail_rcu(&map->hash, &nfsd_devid_hash[devid_hashfn(map->idx)]);
+ fhp->fh_export->ex_devid_map = map;
+ map = NULL;
+
+out_unlock:
+ spin_unlock(&nfsd_devid_lock);
+ kfree(map);
+}
+
+struct nfsd4_deviceid_map *
+nfsd4_find_devid_map(int idx)
+{
+ struct nfsd4_deviceid_map *map, *ret = NULL;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(map, &nfsd_devid_hash[devid_hashfn(idx)], hash)
+ if (map->idx == idx)
+ ret = map;
+ rcu_read_unlock();
+
+ return ret;
+}
+
+int
+nfsd4_set_deviceid(struct nfsd4_deviceid *id, const struct svc_fh *fhp,
+ u32 device_generation)
+{
+ if (!fhp->fh_export->ex_devid_map) {
+ nfsd4_alloc_devid_map(fhp);
+ if (!fhp->fh_export->ex_devid_map)
+ return -ENOMEM;
+ }
+
+ id->fsid_idx = fhp->fh_export->ex_devid_map->idx;
+ id->generation = device_generation;
+ id->pad = 0;
+ return 0;
+}
+
+void nfsd4_setup_layout_type(struct svc_export *exp)
+{
+ struct super_block *sb = exp->ex_path.mnt->mnt_sb;
+
+ if (exp->ex_flags & NFSEXP_NOPNFS)
+ return;
+
+ if (sb->s_export_op->get_uuid &&
+ sb->s_export_op->map_blocks &&
+ sb->s_export_op->commit_blocks)
+ exp->ex_layout_type = LAYOUT_BLOCK_VOLUME;
+}
+
+static void
+nfsd4_free_layout_stateid(struct nfs4_stid *stid)
+{
+ struct nfs4_layout_stateid *ls = layoutstateid(stid);
+ struct nfs4_client *clp = ls->ls_stid.sc_client;
+ struct nfs4_file *fp = ls->ls_stid.sc_file;
+
+ trace_layoutstate_free(&ls->ls_stid.sc_stateid);
+
+ spin_lock(&clp->cl_lock);
+ list_del_init(&ls->ls_perclnt);
+ spin_unlock(&clp->cl_lock);
+
+ spin_lock(&fp->fi_lock);
+ list_del_init(&ls->ls_perfile);
+ spin_unlock(&fp->fi_lock);
+
+ vfs_setlease(ls->ls_file, F_UNLCK, NULL, (void **)&ls);
+ fput(ls->ls_file);
+
+ if (ls->ls_recalled)
+ atomic_dec(&ls->ls_stid.sc_file->fi_lo_recalls);
+
+ kmem_cache_free(nfs4_layout_stateid_cache, ls);
+}
+
+static int
+nfsd4_layout_setlease(struct nfs4_layout_stateid *ls)
+{
+ struct file_lock *fl;
+ int status;
+
+ fl = locks_alloc_lock();
+ if (!fl)
+ return -ENOMEM;
+ locks_init_lock(fl);
+ fl->fl_lmops = &nfsd4_layouts_lm_ops;
+ fl->fl_flags = FL_LAYOUT;
+ fl->fl_type = F_RDLCK;
+ fl->fl_end = OFFSET_MAX;
+ fl->fl_owner = ls;
+ fl->fl_pid = current->tgid;
+ fl->fl_file = ls->ls_file;
+
+ status = vfs_setlease(fl->fl_file, fl->fl_type, &fl, NULL);
+ if (status) {
+ locks_free_lock(fl);
+ return status;
+ }
+ BUG_ON(fl != NULL);
+ return 0;
+}
+
+static struct nfs4_layout_stateid *
+nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate,
+ struct nfs4_stid *parent, u32 layout_type)
+{
+ struct nfs4_client *clp = cstate->clp;
+ struct nfs4_file *fp = parent->sc_file;
+ struct nfs4_layout_stateid *ls;
+ struct nfs4_stid *stp;
+
+ stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache);
+ if (!stp)
+ return NULL;
+ stp->sc_free = nfsd4_free_layout_stateid;
+ get_nfs4_file(fp);
+ stp->sc_file = fp;
+
+ ls = layoutstateid(stp);
+ INIT_LIST_HEAD(&ls->ls_perclnt);
+ INIT_LIST_HEAD(&ls->ls_perfile);
+ spin_lock_init(&ls->ls_lock);
+ INIT_LIST_HEAD(&ls->ls_layouts);
+ ls->ls_layout_type = layout_type;
+ nfsd4_init_cb(&ls->ls_recall, clp, &nfsd4_cb_layout_ops,
+ NFSPROC4_CLNT_CB_LAYOUT);
+
+ if (parent->sc_type == NFS4_DELEG_STID)
+ ls->ls_file = get_file(fp->fi_deleg_file);
+ else
+ ls->ls_file = find_any_file(fp);
+ BUG_ON(!ls->ls_file);
+
+ if (nfsd4_layout_setlease(ls)) {
+ put_nfs4_file(fp);
+ kmem_cache_free(nfs4_layout_stateid_cache, ls);
+ return NULL;
+ }
+
+ spin_lock(&clp->cl_lock);
+ stp->sc_type = NFS4_LAYOUT_STID;
+ list_add(&ls->ls_perclnt, &clp->cl_lo_states);
+ spin_unlock(&clp->cl_lock);
+
+ spin_lock(&fp->fi_lock);
+ list_add(&ls->ls_perfile, &fp->fi_lo_states);
+ spin_unlock(&fp->fi_lock);
+
+ trace_layoutstate_alloc(&ls->ls_stid.sc_stateid);
+ return ls;
+}
+
+__be32
+nfsd4_preprocess_layout_stateid(struct svc_rqst *rqstp,
+ struct nfsd4_compound_state *cstate, stateid_t *stateid,
+ bool create, u32 layout_type, struct nfs4_layout_stateid **lsp)
+{
+ struct nfs4_layout_stateid *ls;
+ struct nfs4_stid *stid;
+ unsigned char typemask = NFS4_LAYOUT_STID;
+ __be32 status;
+
+ if (create)
+ typemask |= (NFS4_OPEN_STID | NFS4_LOCK_STID | NFS4_DELEG_STID);
+
+ status = nfsd4_lookup_stateid(cstate, stateid, typemask, &stid,
+ net_generic(SVC_NET(rqstp), nfsd_net_id));
+ if (status)
+ goto out;
+
+ if (!fh_match(&cstate->current_fh.fh_handle,
+ &stid->sc_file->fi_fhandle)) {
+ status = nfserr_bad_stateid;
+ goto out_put_stid;
+ }
+
+ if (stid->sc_type != NFS4_LAYOUT_STID) {
+ ls = nfsd4_alloc_layout_stateid(cstate, stid, layout_type);
+ nfs4_put_stid(stid);
+
+ status = nfserr_jukebox;
+ if (!ls)
+ goto out;
+ } else {
+ ls = container_of(stid, struct nfs4_layout_stateid, ls_stid);
+
+ status = nfserr_bad_stateid;
+ if (stateid->si_generation > stid->sc_stateid.si_generation)
+ goto out_put_stid;
+ if (layout_type != ls->ls_layout_type)
+ goto out_put_stid;
+ }
+
+ *lsp = ls;
+ return 0;
+
+out_put_stid:
+ nfs4_put_stid(stid);
+out:
+ return status;
+}
+
+static void
+nfsd4_recall_file_layout(struct nfs4_layout_stateid *ls)
+{
+ spin_lock(&ls->ls_lock);
+ if (ls->ls_recalled)
+ goto out_unlock;
+
+ ls->ls_recalled = true;
+ atomic_inc(&ls->ls_stid.sc_file->fi_lo_recalls);
+ if (list_empty(&ls->ls_layouts))
+ goto out_unlock;
+
+ trace_layout_recall(&ls->ls_stid.sc_stateid);
+
+ atomic_inc(&ls->ls_stid.sc_count);
+ update_stateid(&ls->ls_stid.sc_stateid);
+ memcpy(&ls->ls_recall_sid, &ls->ls_stid.sc_stateid, sizeof(stateid_t));
+ nfsd4_run_cb(&ls->ls_recall);
+
+out_unlock:
+ spin_unlock(&ls->ls_lock);
+}
+
+static inline u64
+layout_end(struct nfsd4_layout_seg *seg)
+{
+ u64 end = seg->offset + seg->length;
+ return end >= seg->offset ? end : NFS4_MAX_UINT64;
+}
+
+static void
+layout_update_len(struct nfsd4_layout_seg *lo, u64 end)
+{
+ if (end == NFS4_MAX_UINT64)
+ lo->length = NFS4_MAX_UINT64;
+ else
+ lo->length = end - lo->offset;
+}
+
+static bool
+layouts_overlapping(struct nfs4_layout *lo, struct nfsd4_layout_seg *s)
+{
+ if (s->iomode != IOMODE_ANY && s->iomode != lo->lo_seg.iomode)
+ return false;
+ if (layout_end(&lo->lo_seg) <= s->offset)
+ return false;
+ if (layout_end(s) <= lo->lo_seg.offset)
+ return false;
+ return true;
+}
+
+static bool
+layouts_try_merge(struct nfsd4_layout_seg *lo, struct nfsd4_layout_seg *new)
+{
+ if (lo->iomode != new->iomode)
+ return false;
+ if (layout_end(new) < lo->offset)
+ return false;
+ if (layout_end(lo) < new->offset)
+ return false;
+
+ lo->offset = min(lo->offset, new->offset);
+ layout_update_len(lo, max(layout_end(lo), layout_end(new)));
+ return true;
+}
+
+static __be32
+nfsd4_recall_conflict(struct nfs4_layout_stateid *ls)
+{
+ struct nfs4_file *fp = ls->ls_stid.sc_file;
+ struct nfs4_layout_stateid *l, *n;
+ __be32 nfserr = nfs_ok;
+
+ assert_spin_locked(&fp->fi_lock);
+
+ list_for_each_entry_safe(l, n, &fp->fi_lo_states, ls_perfile) {
+ if (l != ls) {
+ nfsd4_recall_file_layout(l);
+ nfserr = nfserr_recallconflict;
+ }
+ }
+
+ return nfserr;
+}
+
+__be32
+nfsd4_insert_layout(struct nfsd4_layoutget *lgp, struct nfs4_layout_stateid *ls)
+{
+ struct nfsd4_layout_seg *seg = &lgp->lg_seg;
+ struct nfs4_file *fp = ls->ls_stid.sc_file;
+ struct nfs4_layout *lp, *new = NULL;
+ __be32 nfserr;
+
+ spin_lock(&fp->fi_lock);
+ nfserr = nfsd4_recall_conflict(ls);
+ if (nfserr)
+ goto out;
+ spin_lock(&ls->ls_lock);
+ list_for_each_entry(lp, &ls->ls_layouts, lo_perstate) {
+ if (layouts_try_merge(&lp->lo_seg, seg))
+ goto done;
+ }
+ spin_unlock(&ls->ls_lock);
+ spin_unlock(&fp->fi_lock);
+
+ new = kmem_cache_alloc(nfs4_layout_cache, GFP_KERNEL);
+ if (!new)
+ return nfserr_jukebox;
+ memcpy(&new->lo_seg, seg, sizeof(lp->lo_seg));
+ new->lo_state = ls;
+
+ spin_lock(&fp->fi_lock);
+ nfserr = nfsd4_recall_conflict(ls);
+ if (nfserr)
+ goto out;
+ spin_lock(&ls->ls_lock);
+ list_for_each_entry(lp, &ls->ls_layouts, lo_perstate) {
+ if (layouts_try_merge(&lp->lo_seg, seg))
+ goto done;
+ }
+
+ atomic_inc(&ls->ls_stid.sc_count);
+ list_add_tail(&new->lo_perstate, &ls->ls_layouts);
+ new = NULL;
+done:
+ update_stateid(&ls->ls_stid.sc_stateid);
+ memcpy(&lgp->lg_sid, &ls->ls_stid.sc_stateid, sizeof(stateid_t));
+ spin_unlock(&ls->ls_lock);
+out:
+ spin_unlock(&fp->fi_lock);
+ if (new)
+ kmem_cache_free(nfs4_layout_cache, new);
+ return nfserr;
+}
+
+static void
+nfsd4_free_layouts(struct list_head *reaplist)
+{
+ while (!list_empty(reaplist)) {
+ struct nfs4_layout *lp = list_first_entry(reaplist,
+ struct nfs4_layout, lo_perstate);
+
+ list_del(&lp->lo_perstate);
+ nfs4_put_stid(&lp->lo_state->ls_stid);
+ kmem_cache_free(nfs4_layout_cache, lp);
+ }
+}
+
+static void
+nfsd4_return_file_layout(struct nfs4_layout *lp, struct nfsd4_layout_seg *seg,
+ struct list_head *reaplist)
+{
+ struct nfsd4_layout_seg *lo = &lp->lo_seg;
+ u64 end = layout_end(lo);
+
+ if (seg->offset <= lo->offset) {
+ if (layout_end(seg) >= end) {
+ list_move_tail(&lp->lo_perstate, reaplist);
+ return;
+ }
+ end = seg->offset;
+ } else {
+ /* retain the whole layout segment on a split. */
+ if (layout_end(seg) < end) {
+ dprintk("%s: split not supported\n", __func__);
+ return;
+ }
+
+ lo->offset = layout_end(seg);
+ }
+
+ layout_update_len(lo, end);
+}
+
+__be32
+nfsd4_return_file_layouts(struct svc_rqst *rqstp,
+ struct nfsd4_compound_state *cstate,
+ struct nfsd4_layoutreturn *lrp)
+{
+ struct nfs4_layout_stateid *ls;
+ struct nfs4_layout *lp, *n;
+ LIST_HEAD(reaplist);
+ __be32 nfserr;
+ int found = 0;
+
+ nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate, &lrp->lr_sid,
+ false, lrp->lr_layout_type,
+ &ls);
+ if (nfserr) {
+ trace_layout_return_lookup_fail(&lrp->lr_sid);
+ return nfserr;
+ }
+
+ spin_lock(&ls->ls_lock);
+ list_for_each_entry_safe(lp, n, &ls->ls_layouts, lo_perstate) {
+ if (layouts_overlapping(lp, &lrp->lr_seg)) {
+ nfsd4_return_file_layout(lp, &lrp->lr_seg, &reaplist);
+ found++;
+ }
+ }
+ if (!list_empty(&ls->ls_layouts)) {
+ if (found) {
+ update_stateid(&ls->ls_stid.sc_stateid);
+ memcpy(&lrp->lr_sid, &ls->ls_stid.sc_stateid,
+ sizeof(stateid_t));
+ }
+ lrp->lrs_present = 1;
+ } else {
+ trace_layoutstate_unhash(&ls->ls_stid.sc_stateid);
+ nfs4_unhash_stid(&ls->ls_stid);
+ lrp->lrs_present = 0;
+ }
+ spin_unlock(&ls->ls_lock);
+
+ nfs4_put_stid(&ls->ls_stid);
+ nfsd4_free_layouts(&reaplist);
+ return nfs_ok;
+}
+
+__be32
+nfsd4_return_client_layouts(struct svc_rqst *rqstp,
+ struct nfsd4_compound_state *cstate,
+ struct nfsd4_layoutreturn *lrp)
+{
+ struct nfs4_layout_stateid *ls, *n;
+ struct nfs4_client *clp = cstate->clp;
+ struct nfs4_layout *lp, *t;
+ LIST_HEAD(reaplist);
+
+ lrp->lrs_present = 0;
+
+ spin_lock(&clp->cl_lock);
+ list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt) {
+ if (lrp->lr_return_type == RETURN_FSID &&
+ !fh_fsid_match(&ls->ls_stid.sc_file->fi_fhandle,
+ &cstate->current_fh.fh_handle))
+ continue;
+
+ spin_lock(&ls->ls_lock);
+ list_for_each_entry_safe(lp, t, &ls->ls_layouts, lo_perstate) {
+ if (lrp->lr_seg.iomode == IOMODE_ANY ||
+ lrp->lr_seg.iomode == lp->lo_seg.iomode)
+ list_move_tail(&lp->lo_perstate, &reaplist);
+ }
+ spin_unlock(&ls->ls_lock);
+ }
+ spin_unlock(&clp->cl_lock);
+
+ nfsd4_free_layouts(&reaplist);
+ return 0;
+}
+
+static void
+nfsd4_return_all_layouts(struct nfs4_layout_stateid *ls,
+ struct list_head *reaplist)
+{
+ spin_lock(&ls->ls_lock);
+ list_splice_init(&ls->ls_layouts, reaplist);
+ spin_unlock(&ls->ls_lock);
+}
+
+void
+nfsd4_return_all_client_layouts(struct nfs4_client *clp)
+{
+ struct nfs4_layout_stateid *ls, *n;
+ LIST_HEAD(reaplist);
+
+ spin_lock(&clp->cl_lock);
+ list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt)
+ nfsd4_return_all_layouts(ls, &reaplist);
+ spin_unlock(&clp->cl_lock);
+
+ nfsd4_free_layouts(&reaplist);
+}
+
+void
+nfsd4_return_all_file_layouts(struct nfs4_client *clp, struct nfs4_file *fp)
+{
+ struct nfs4_layout_stateid *ls, *n;
+ LIST_HEAD(reaplist);
+
+ spin_lock(&fp->fi_lock);
+ list_for_each_entry_safe(ls, n, &fp->fi_lo_states, ls_perfile) {
+ if (ls->ls_stid.sc_client == clp)
+ nfsd4_return_all_layouts(ls, &reaplist);
+ }
+ spin_unlock(&fp->fi_lock);
+
+ nfsd4_free_layouts(&reaplist);
+}
+
+static void
+nfsd4_cb_layout_fail(struct nfs4_layout_stateid *ls)
+{
+ struct nfs4_client *clp = ls->ls_stid.sc_client;
+ char addr_str[INET6_ADDRSTRLEN];
+ static char *envp[] = {
+ "HOME=/",
+ "TERM=linux",
+ "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
+ NULL
+ };
+ char *argv[8];
+ int error;
+
+ rpc_ntop((struct sockaddr *)&clp->cl_addr, addr_str, sizeof(addr_str));
+
+ nfsd4_cb_layout_fail(ls);
+
+ printk(KERN_WARNING
+ "nfsd: client %s failed to respond to layout recall. "
+ " Fencing..\n", addr_str);
+
+ argv[0] = "/sbin/nfsd-recall-failed";
+ argv[1] = addr_str;
+ argv[2] = ls->ls_file->f_path.mnt->mnt_sb->s_id;
+ argv[3] = NULL;
+
+ error = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
+ if (error) {
+ printk(KERN_ERR "nfsd: fence failed for client %s: %d!\n",
+ addr_str, error);
+ }
+}
+
+static int
+nfsd4_cb_layout_done(struct nfsd4_callback *cb, struct rpc_task *task)
+{
+ struct nfs4_layout_stateid *ls =
+ container_of(cb, struct nfs4_layout_stateid, ls_recall);
+ LIST_HEAD(reaplist);
+
+ switch (task->tk_status) {
+ case 0:
+ return 1;
+ case -NFS4ERR_NOMATCHING_LAYOUT:
+ trace_layout_recall_done(&ls->ls_stid.sc_stateid);
+ task->tk_status = 0;
+ return 1;
+ case -NFS4ERR_DELAY:
+ /* Poll the client until it's done with the layout */
+ /* FIXME: cap number of retries.
+ * The pnfs standard states that we need to only expire
+ * the client after at-least "lease time" .eg lease-time * 2
+ * when failing to communicate a recall
+ */
+ rpc_delay(task, HZ/100); /* 10 mili-seconds */
+ return 0;
+ default:
+ /*
+ * Unknown error or non-responding client, we'll need to fence.
+ */
+ nfsd4_cb_layout_fail(ls);
+ return -1;
+ }
+}
+
+static void
+nfsd4_cb_layout_release(struct nfsd4_callback *cb)
+{
+ struct nfs4_layout_stateid *ls =
+ container_of(cb, struct nfs4_layout_stateid, ls_recall);
+ LIST_HEAD(reaplist);
+
+ trace_layout_recall_release(&ls->ls_stid.sc_stateid);
+
+ nfsd4_return_all_layouts(ls, &reaplist);
+ nfsd4_free_layouts(&reaplist);
+ nfs4_put_stid(&ls->ls_stid);
+}
+
+static struct nfsd4_callback_ops nfsd4_cb_layout_ops = {
+ .done = nfsd4_cb_layout_done,
+ .release = nfsd4_cb_layout_release,
+};
+
+static bool
+nfsd4_layout_lm_break(struct file_lock *fl)
+{
+ /*
+ * We don't want the locks code to timeout the lease for us;
+ * we'll remove it ourself if a layout isn't returned
+ * in time:
+ */
+ fl->fl_break_time = 0;
+ nfsd4_recall_file_layout(fl->fl_owner);
+ return false;
+}
+
+static int
+nfsd4_layout_lm_change(struct file_lock *onlist, int arg,
+ struct list_head *dispose)
+{
+ BUG_ON(!(arg & F_UNLCK));
+ return lease_modify(onlist, arg, dispose);
+}
+
+static const struct lock_manager_operations nfsd4_layouts_lm_ops = {
+ .lm_break = nfsd4_layout_lm_break,
+ .lm_change = nfsd4_layout_lm_change,
+};
+
+int
+nfsd4_init_pnfs(void)
+{
+ int i;
+
+ for (i = 0; i < DEVID_HASH_SIZE; i++)
+ INIT_LIST_HEAD(&nfsd_devid_hash[i]);
+
+ nfs4_layout_cache = kmem_cache_create("nfs4_layout",
+ sizeof(struct nfs4_layout), 0, 0, NULL);
+ if (!nfs4_layout_cache)
+ return -ENOMEM;
+
+ nfs4_layout_stateid_cache = kmem_cache_create("nfs4_layout_stateid",
+ sizeof(struct nfs4_layout_stateid), 0, 0, NULL);
+ if (!nfs4_layout_stateid_cache) {
+ kmem_cache_destroy(nfs4_layout_cache);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void
+nfsd4_exit_pnfs(void)
+{
+ int i;
+
+ kmem_cache_destroy(nfs4_layout_cache);
+ kmem_cache_destroy(nfs4_layout_stateid_cache);
+
+ for (i = 0; i < DEVID_HASH_SIZE; i++) {
+ struct nfsd4_deviceid_map *map, *n;
+
+ list_for_each_entry_safe(map, n, &nfsd_devid_hash[i], hash)
+ kfree(map);
+ }
+}
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index ac71d13..d30bea8d 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -43,6 +43,8 @@
#include "current_stateid.h"
#include "netns.h"
#include "acl.h"
+#include "pnfs.h"
+#include "trace.h"
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
#include <linux/security.h>
@@ -1178,6 +1180,259 @@ nfsd4_verify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
return status == nfserr_same ? nfs_ok : status;
}
+#ifdef CONFIG_NFSD_PNFS
+static const struct nfsd4_layout_ops *
+nfsd4_layout_verify(struct svc_export *exp, unsigned int layout_type)
+{
+ if (!exp->ex_layout_type) {
+ dprintk("%s: export does not support pNFS\n", __func__);
+ return NULL;
+ }
+
+ if (exp->ex_layout_type != layout_type) {
+ dprintk("%s: layout type %d not supported\n",
+ __func__, layout_type);
+ return NULL;
+ }
+
+ return nfsd4_layout_ops[layout_type];
+}
+
+static __be32
+nfsd4_getdeviceinfo(struct svc_rqst *rqstp,
+ struct nfsd4_compound_state *cstate,
+ struct nfsd4_getdeviceinfo *gdp)
+{
+ const struct nfsd4_layout_ops *ops;
+ struct nfsd4_deviceid_map *map;
+ struct svc_export *exp;
+ __be32 nfserr;
+
+ dprintk("%s: layout_type %u dev_id [0x%llx:0x%x] maxcnt %u\n",
+ __func__,
+ gdp->gd_layout_type,
+ gdp->gd_devid.fsid_idx, gdp->gd_devid.generation,
+ gdp->gd_maxcount);
+
+ map = nfsd4_find_devid_map(gdp->gd_devid.fsid_idx);
+ if (!map) {
+ dprintk("%s: couldn't find device ID to export mapping!\n",
+ __func__);
+ return nfserr_noent;
+ }
+
+ exp = rqst_exp_find(rqstp, map->fsid_type, map->fsid);
+ if (IS_ERR(exp)) {
+ dprintk("%s: could not find device id\n", __func__);
+ return nfserr_noent;
+ }
+
+ nfserr = nfserr_layoutunavailable;
+ ops = nfsd4_layout_verify(exp, gdp->gd_layout_type);
+ if (!ops)
+ goto out;
+
+ nfserr = nfs_ok;
+ if (gdp->gd_maxcount != 0)
+ nfserr = ops->proc_getdeviceinfo(exp->ex_path.mnt->mnt_sb, gdp);
+
+ gdp->gd_notify_types &= ops->notify_types;
+ exp_put(exp);
+out:
+ return nfserr;
+}
+
+static __be32
+nfsd4_layoutget(struct svc_rqst *rqstp,
+ struct nfsd4_compound_state *cstate,
+ struct nfsd4_layoutget *lgp)
+{
+ struct svc_fh *current_fh = &cstate->current_fh;
+ const struct nfsd4_layout_ops *ops;
+ struct nfs4_layout_stateid *ls;
+ __be32 nfserr;
+ int accmode;
+
+ switch (lgp->lg_seg.iomode) {
+ case IOMODE_READ:
+ accmode = NFSD_MAY_READ;
+ break;
+ case IOMODE_RW:
+ accmode = NFSD_MAY_READ | NFSD_MAY_WRITE;
+ break;
+ default:
+ dprintk("%s: invalid iomode %d\n",
+ __func__, lgp->lg_seg.iomode);
+ nfserr = nfserr_badiomode;
+ goto out;
+ }
+
+ nfserr = fh_verify(rqstp, current_fh, 0, accmode);
+ if (nfserr)
+ goto out;
+
+ nfserr = nfserr_layoutunavailable;
+ ops = nfsd4_layout_verify(current_fh->fh_export, lgp->lg_layout_type);
+ if (!ops)
+ goto out;
+
+ /*
+ * Verify minlength and range as per RFC5661:
+ * o If loga_length is less than loga_minlength,
+ * the metadata server MUST return NFS4ERR_INVAL.
+ * o If the sum of loga_offset and loga_minlength exceeds
+ * NFS4_UINT64_MAX, and loga_minlength is not
+ * NFS4_UINT64_MAX, the error NFS4ERR_INVAL MUST result.
+ * o If the sum of loga_offset and loga_length exceeds
+ * NFS4_UINT64_MAX, and loga_length is not NFS4_UINT64_MAX,
+ * the error NFS4ERR_INVAL MUST result.
+ */
+ nfserr = nfserr_inval;
+ if (lgp->lg_seg.length < lgp->lg_minlength ||
+ (lgp->lg_minlength != NFS4_MAX_UINT64 &&
+ lgp->lg_minlength > NFS4_MAX_UINT64 - lgp->lg_seg.offset) ||
+ (lgp->lg_seg.length != NFS4_MAX_UINT64 &&
+ lgp->lg_seg.length > NFS4_MAX_UINT64 - lgp->lg_seg.offset))
+ goto out;
+ if (lgp->lg_seg.length == 0)
+ goto out;
+
+ nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate, &lgp->lg_sid,
+ true, lgp->lg_layout_type, &ls);
+ if (nfserr) {
+ trace_layout_get_lookup_fail(&lgp->lg_sid);
+ goto out;
+ }
+
+ nfserr = nfserr_recallconflict;
+ if (atomic_read(&ls->ls_stid.sc_file->fi_lo_recalls))
+ goto out_put_stid;
+
+ nfserr = ops->proc_layoutget(current_fh->fh_dentry->d_inode,
+ current_fh, lgp);
+ if (nfserr)
+ goto out_put_stid;
+
+ nfserr = nfsd4_insert_layout(lgp, ls);
+
+out_put_stid:
+ nfs4_put_stid(&ls->ls_stid);
+out:
+ return nfserr;
+}
+
+static __be32
+nfsd4_layoutcommit(struct svc_rqst *rqstp,
+ struct nfsd4_compound_state *cstate,
+ struct nfsd4_layoutcommit *lcp)
+{
+ const struct nfsd4_layout_seg *seg = &lcp->lc_seg;
+ struct svc_fh *current_fh = &cstate->current_fh;
+ const struct nfsd4_layout_ops *ops;
+ loff_t new_size = lcp->lc_last_wr + 1;
+ struct inode *inode;
+ struct nfs4_layout_stateid *ls;
+ __be32 nfserr;
+
+ nfserr = fh_verify(rqstp, current_fh, 0, NFSD_MAY_WRITE);
+ if (nfserr)
+ goto out;
+
+ nfserr = nfserr_layoutunavailable;
+ ops = nfsd4_layout_verify(current_fh->fh_export, lcp->lc_layout_type);
+ if (!ops)
+ goto out;
+ inode = current_fh->fh_dentry->d_inode;
+
+ nfserr = nfserr_inval;
+ if (new_size <= seg->offset) {
+ dprintk("pnfsd: last write before layout segment\n");
+ goto out;
+ }
+ if (new_size > seg->offset + seg->length) {
+ dprintk("pnfsd: last write beyond layout segment\n");
+ goto out;
+ }
+ if (!lcp->lc_newoffset && new_size > i_size_read(inode)) {
+ dprintk("pnfsd: layoutcommit beyond EOF\n");
+ goto out;
+ }
+
+ nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate, &lcp->lc_sid,
+ false, lcp->lc_layout_type,
+ &ls);
+ if (nfserr) {
+ trace_layout_commit_lookup_fail(&lcp->lc_sid);
+ /* fixup error code as per RFC5661 */
+ if (nfserr == nfserr_bad_stateid)
+ nfserr = nfserr_badlayout;
+ goto out;
+ }
+
+ nfserr = ops->proc_layoutcommit(inode, lcp);
+ if (nfserr)
+ goto out_put_stid;
+
+ if (new_size > i_size_read(inode)) {
+ lcp->lc_size_chg = 1;
+ lcp->lc_newsize = new_size;
+ } else {
+ lcp->lc_size_chg = 0;
+ }
+
+out_put_stid:
+ nfs4_put_stid(&ls->ls_stid);
+out:
+ return nfserr;
+}
+
+static __be32
+nfsd4_layoutreturn(struct svc_rqst *rqstp,
+ struct nfsd4_compound_state *cstate,
+ struct nfsd4_layoutreturn *lrp)
+{
+ struct svc_fh *current_fh = &cstate->current_fh;
+ __be32 nfserr;
+
+ nfserr = fh_verify(rqstp, current_fh, 0, NFSD_MAY_NOP);
+ if (nfserr)
+ goto out;
+
+ nfserr = nfserr_layoutunavailable;
+ if (!nfsd4_layout_verify(current_fh->fh_export, lrp->lr_layout_type))
+ goto out;
+
+ switch (lrp->lr_seg.iomode) {
+ case IOMODE_READ:
+ case IOMODE_RW:
+ case IOMODE_ANY:
+ break;
+ default:
+ dprintk("%s: invalid iomode %d\n", __func__,
+ lrp->lr_seg.iomode);
+ nfserr = nfserr_inval;
+ goto out;
+ }
+
+ switch (lrp->lr_return_type) {
+ case RETURN_FILE:
+ nfserr = nfsd4_return_file_layouts(rqstp, cstate, lrp);
+ break;
+ case RETURN_FSID:
+ case RETURN_ALL:
+ nfserr = nfsd4_return_client_layouts(rqstp, cstate, lrp);
+ break;
+ default:
+ dprintk("%s: invalid return_type %d\n", __func__,
+ lrp->lr_return_type);
+ nfserr = nfserr_inval;
+ break;
+ }
+out:
+ return nfserr;
+}
+#endif /* CONFIG_NFSD_PNFS */
+
/*
* NULL call.
*/
@@ -1679,6 +1934,36 @@ static inline u32 nfsd4_create_session_rsize(struct svc_rqst *rqstp, struct nfsd
op_encode_channel_attrs_maxsz) * sizeof(__be32);
}
+#ifdef CONFIG_NFSD_PNFS
+/*
+ * At this stage we don't really know what layout driver will handle the request,
+ * so we need to define an arbitrary upper bound here.
+ */
+#define MAX_LAYOUT_SIZE 128
+static inline u32 nfsd4_layoutget_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+{
+ return (op_encode_hdr_size +
+ 1 /* logr_return_on_close */ +
+ op_encode_stateid_maxsz +
+ 1 /* nr of layouts */ +
+ MAX_LAYOUT_SIZE) * sizeof(__be32);
+}
+
+static inline u32 nfsd4_layoutcommit_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+{
+ return (op_encode_hdr_size +
+ 1 /* locr_newsize */ +
+ 2 /* ns_size */) * sizeof(__be32);
+}
+
+static inline u32 nfsd4_layoutreturn_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+{
+ return (op_encode_hdr_size +
+ 1 /* lrs_stateid */ +
+ op_encode_stateid_maxsz) * sizeof(__be32);
+}
+#endif /* CONFIG_NFSD_PNFS */
+
static struct nfsd4_operation nfsd4_ops[] = {
[OP_ACCESS] = {
.op_func = (nfsd4op_func)nfsd4_access,
@@ -1966,6 +2251,31 @@ static struct nfsd4_operation nfsd4_ops[] = {
.op_get_currentstateid = (stateid_getter)nfsd4_get_freestateid,
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
+#ifdef CONFIG_NFSD_PNFS
+ [OP_GETDEVICEINFO] = {
+ .op_func = (nfsd4op_func)nfsd4_getdeviceinfo,
+ .op_flags = ALLOWED_WITHOUT_FH,
+ .op_name = "OP_GETDEVICEINFO",
+ },
+ [OP_LAYOUTGET] = {
+ .op_func = (nfsd4op_func)nfsd4_layoutget,
+ .op_flags = OP_MODIFIES_SOMETHING,
+ .op_name = "OP_LAYOUTGET",
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_layoutget_rsize,
+ },
+ [OP_LAYOUTCOMMIT] = {
+ .op_func = (nfsd4op_func)nfsd4_layoutcommit,
+ .op_flags = OP_MODIFIES_SOMETHING,
+ .op_name = "OP_LAYOUTCOMMIT",
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_layoutcommit_rsize,
+ },
+ [OP_LAYOUTRETURN] = {
+ .op_func = (nfsd4op_func)nfsd4_layoutreturn,
+ .op_flags = OP_MODIFIES_SOMETHING,
+ .op_name = "OP_LAYOUTRETURN",
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_layoutreturn_rsize,
+ },
+#endif /* CONFIG_NFSD_PNFS */
/* NFSv4.2 operations */
[OP_ALLOCATE] = {
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index c06a1ba..f6b2a09 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -48,6 +48,7 @@
#include "current_stateid.h"
#include "netns.h"
+#include "pnfs.h"
#define NFSDDBG_FACILITY NFSDDBG_PROC
@@ -150,16 +151,6 @@ renew_client_locked(struct nfs4_client *clp)
clp->cl_time = get_seconds();
}
-static inline void
-renew_client(struct nfs4_client *clp)
-{
- struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
-
- spin_lock(&nn->client_lock);
- renew_client_locked(clp);
- spin_unlock(&nn->client_lock);
-}
-
static void put_client_renew_locked(struct nfs4_client *clp)
{
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
@@ -282,7 +273,7 @@ static void nfsd4_free_file_rcu(struct rcu_head *rcu)
kmem_cache_free(file_slab, fp);
}
-static inline void
+void
put_nfs4_file(struct nfs4_file *fi)
{
might_lock(&state_lock);
@@ -295,12 +286,6 @@ put_nfs4_file(struct nfs4_file *fi)
}
}
-static inline void
-get_nfs4_file(struct nfs4_file *fi)
-{
- atomic_inc(&fi->fi_ref);
-}
-
static struct file *
__nfs4_get_fd(struct nfs4_file *f, int oflag)
{
@@ -358,7 +343,7 @@ find_readable_file(struct nfs4_file *f)
return ret;
}
-static struct file *
+struct file *
find_any_file(struct nfs4_file *f)
{
struct file *ret;
@@ -408,14 +393,6 @@ static unsigned int file_hashval(struct knfsd_fh *fh)
return nfsd_fh_hashval(fh) & (FILE_HASH_SIZE - 1);
}
-static bool nfsd_fh_match(struct knfsd_fh *fh1, struct knfsd_fh *fh2)
-{
- return fh1->fh_size == fh2->fh_size &&
- !memcmp(fh1->fh_base.fh_pad,
- fh2->fh_base.fh_pad,
- fh1->fh_size);
-}
-
static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
static void
@@ -494,7 +471,7 @@ static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
__nfs4_file_put_access(fp, O_RDONLY);
}
-static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
+struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
struct kmem_cache *slab)
{
struct nfs4_stid *stid;
@@ -688,17 +665,17 @@ static void nfs4_put_deleg_lease(struct nfs4_file *fp)
struct file *filp = NULL;
spin_lock(&fp->fi_lock);
- if (fp->fi_deleg_file && atomic_dec_and_test(&fp->fi_delegees))
+ if (fp->fi_deleg_file && --fp->fi_delegees == 0)
swap(filp, fp->fi_deleg_file);
spin_unlock(&fp->fi_lock);
if (filp) {
- vfs_setlease(filp, F_UNLCK, NULL, NULL);
+ vfs_setlease(filp, F_UNLCK, NULL, (void **)&fp);
fput(filp);
}
}
-static void unhash_stid(struct nfs4_stid *s)
+void nfs4_unhash_stid(struct nfs4_stid *s)
{
s->sc_type = 0;
}
@@ -1006,7 +983,7 @@ static void unhash_lock_stateid(struct nfs4_ol_stateid *stp)
list_del_init(&stp->st_locks);
unhash_ol_stateid(stp);
- unhash_stid(&stp->st_stid);
+ nfs4_unhash_stid(&stp->st_stid);
}
static void release_lock_stateid(struct nfs4_ol_stateid *stp)
@@ -1518,7 +1495,12 @@ unhash_session(struct nfsd4_session *ses)
static int
STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
{
- if (clid->cl_boot == nn->boot_time)
+ /*
+ * We're assuming the clid was not given out from a boot
+ * precisely 2^32 (about 136 years) before this one. That seems
+ * a safe assumption:
+ */
+ if (clid->cl_boot == (u32)nn->boot_time)
return 0;
dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
clid->cl_boot, clid->cl_id, nn->boot_time);
@@ -1558,6 +1540,9 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name)
INIT_LIST_HEAD(&clp->cl_lru);
INIT_LIST_HEAD(&clp->cl_callbacks);
INIT_LIST_HEAD(&clp->cl_revoked);
+#ifdef CONFIG_NFSD_PNFS
+ INIT_LIST_HEAD(&clp->cl_lo_states);
+#endif
spin_lock_init(&clp->cl_lock);
rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
return clp;
@@ -1662,6 +1647,7 @@ __destroy_client(struct nfs4_client *clp)
nfs4_get_stateowner(&oo->oo_owner);
release_openowner(oo);
}
+ nfsd4_return_all_client_layouts(clp);
nfsd4_shutdown_callback(clp);
if (clp->cl_cb_conn.cb_xprt)
svc_xprt_put(clp->cl_cb_conn.cb_xprt);
@@ -2145,8 +2131,11 @@ nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
static void
nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
{
- /* pNFS is not supported */
+#ifdef CONFIG_NFSD_PNFS
+ new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS;
+#else
new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
+#endif
/* Referrals are supported, Migration is not. */
new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
@@ -3074,6 +3063,10 @@ static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval,
fp->fi_share_deny = 0;
memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
memset(fp->fi_access, 0, sizeof(fp->fi_access));
+#ifdef CONFIG_NFSD_PNFS
+ INIT_LIST_HEAD(&fp->fi_lo_states);
+ atomic_set(&fp->fi_lo_recalls, 0);
+#endif
hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]);
}
@@ -3300,7 +3293,7 @@ find_file_locked(struct knfsd_fh *fh, unsigned int hashval)
struct nfs4_file *fp;
hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash) {
- if (nfsd_fh_match(&fp->fi_fhandle, fh)) {
+ if (fh_match(&fp->fi_fhandle, fh)) {
if (atomic_inc_not_zero(&fp->fi_ref))
return fp;
}
@@ -3308,7 +3301,7 @@ find_file_locked(struct knfsd_fh *fh, unsigned int hashval)
return NULL;
}
-static struct nfs4_file *
+struct nfs4_file *
find_file(struct knfsd_fh *fh)
{
struct nfs4_file *fp;
@@ -3477,7 +3470,8 @@ nfsd_break_deleg_cb(struct file_lock *fl)
}
static int
-nfsd_change_deleg_cb(struct file_lock **onlist, int arg, struct list_head *dispose)
+nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
+ struct list_head *dispose)
{
if (arg & F_UNLCK)
return lease_modify(onlist, arg, dispose);
@@ -3855,12 +3849,12 @@ static int nfs4_setlease(struct nfs4_delegation *dp)
/* Race breaker */
if (fp->fi_deleg_file) {
status = 0;
- atomic_inc(&fp->fi_delegees);
+ ++fp->fi_delegees;
hash_delegation_locked(dp, fp);
goto out_unlock;
}
fp->fi_deleg_file = filp;
- atomic_set(&fp->fi_delegees, 1);
+ fp->fi_delegees = 1;
hash_delegation_locked(dp, fp);
spin_unlock(&fp->fi_lock);
spin_unlock(&state_lock);
@@ -3901,7 +3895,7 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
status = -EAGAIN;
goto out_unlock;
}
- atomic_inc(&fp->fi_delegees);
+ ++fp->fi_delegees;
hash_delegation_locked(dp, fp);
status = 0;
out_unlock:
@@ -4294,7 +4288,7 @@ laundromat_main(struct work_struct *laundry)
static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp)
{
- if (!nfsd_fh_match(&fhp->fh_handle, &stp->st_stid.sc_file->fi_fhandle))
+ if (!fh_match(&fhp->fh_handle, &stp->st_stid.sc_file->fi_fhandle))
return nfserr_bad_stateid;
return nfs_ok;
}
@@ -4445,7 +4439,7 @@ out_unlock:
return status;
}
-static __be32
+__be32
nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
stateid_t *stateid, unsigned char typemask,
struct nfs4_stid **s, struct nfsd_net *nn)
@@ -4859,6 +4853,9 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
update_stateid(&stp->st_stid.sc_stateid);
memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
+ nfsd4_return_all_file_layouts(stp->st_stateowner->so_client,
+ stp->st_stid.sc_file);
+
nfsd4_close_open_stateid(stp);
/* put reference from nfs4_preprocess_seqid_op */
@@ -5556,10 +5553,11 @@ out_nfserr:
static bool
check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
{
- struct file_lock **flpp;
+ struct file_lock *fl;
int status = false;
struct file *filp = find_any_file(fp);
struct inode *inode;
+ struct file_lock_context *flctx;
if (!filp) {
/* Any valid lock stateid should have some sort of access */
@@ -5568,15 +5566,18 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
}
inode = file_inode(filp);
+ flctx = inode->i_flctx;
- spin_lock(&inode->i_lock);
- for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) {
- if ((*flpp)->fl_owner == (fl_owner_t)lowner) {
- status = true;
- break;
+ if (flctx && !list_empty_careful(&flctx->flc_posix)) {
+ spin_lock(&flctx->flc_lock);
+ list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
+ if (fl->fl_owner == (fl_owner_t)lowner) {
+ status = true;
+ break;
+ }
}
+ spin_unlock(&flctx->flc_lock);
}
- spin_unlock(&inode->i_lock);
fput(filp);
return status;
}
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 15f7b73..df5e66c 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -47,6 +47,7 @@
#include "state.h"
#include "cache.h"
#include "netns.h"
+#include "pnfs.h"
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
#include <linux/security.h>
@@ -234,6 +235,26 @@ static char *savemem(struct nfsd4_compoundargs *argp, __be32 *p, int nbytes)
return ret;
}
+/*
+ * We require the high 32 bits of 'seconds' to be 0, and
+ * we ignore all 32 bits of 'nseconds'.
+ */
+static __be32
+nfsd4_decode_time(struct nfsd4_compoundargs *argp, struct timespec *tv)
+{
+ DECODE_HEAD;
+ u64 sec;
+
+ READ_BUF(12);
+ p = xdr_decode_hyper(p, &sec);
+ tv->tv_sec = sec;
+ tv->tv_nsec = be32_to_cpup(p++);
+ if (tv->tv_nsec >= (u32)1000000000)
+ return nfserr_inval;
+
+ DECODE_TAIL;
+}
+
static __be32
nfsd4_decode_bitmap(struct nfsd4_compoundargs *argp, u32 *bmval)
{
@@ -267,7 +288,6 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
{
int expected_len, len = 0;
u32 dummy32;
- u64 sec;
char *buf;
DECODE_HEAD;
@@ -358,15 +378,10 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
dummy32 = be32_to_cpup(p++);
switch (dummy32) {
case NFS4_SET_TO_CLIENT_TIME:
- /* We require the high 32 bits of 'seconds' to be 0, and we ignore
- all 32 bits of 'nseconds'. */
- READ_BUF(12);
len += 12;
- p = xdr_decode_hyper(p, &sec);
- iattr->ia_atime.tv_sec = (time_t)sec;
- iattr->ia_atime.tv_nsec = be32_to_cpup(p++);
- if (iattr->ia_atime.tv_nsec >= (u32)1000000000)
- return nfserr_inval;
+ status = nfsd4_decode_time(argp, &iattr->ia_atime);
+ if (status)
+ return status;
iattr->ia_valid |= (ATTR_ATIME | ATTR_ATIME_SET);
break;
case NFS4_SET_TO_SERVER_TIME:
@@ -382,15 +397,10 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
dummy32 = be32_to_cpup(p++);
switch (dummy32) {
case NFS4_SET_TO_CLIENT_TIME:
- /* We require the high 32 bits of 'seconds' to be 0, and we ignore
- all 32 bits of 'nseconds'. */
- READ_BUF(12);
len += 12;
- p = xdr_decode_hyper(p, &sec);
- iattr->ia_mtime.tv_sec = sec;
- iattr->ia_mtime.tv_nsec = be32_to_cpup(p++);
- if (iattr->ia_mtime.tv_nsec >= (u32)1000000000)
- return nfserr_inval;
+ status = nfsd4_decode_time(argp, &iattr->ia_mtime);
+ if (status)
+ return status;
iattr->ia_valid |= (ATTR_MTIME | ATTR_MTIME_SET);
break;
case NFS4_SET_TO_SERVER_TIME:
@@ -1513,6 +1523,127 @@ static __be32 nfsd4_decode_reclaim_complete(struct nfsd4_compoundargs *argp, str
DECODE_TAIL;
}
+#ifdef CONFIG_NFSD_PNFS
+static __be32
+nfsd4_decode_getdeviceinfo(struct nfsd4_compoundargs *argp,
+ struct nfsd4_getdeviceinfo *gdev)
+{
+ DECODE_HEAD;
+ u32 num, i;
+
+ READ_BUF(sizeof(struct nfsd4_deviceid) + 3 * 4);
+ COPYMEM(&gdev->gd_devid, sizeof(struct nfsd4_deviceid));
+ gdev->gd_layout_type = be32_to_cpup(p++);
+ gdev->gd_maxcount = be32_to_cpup(p++);
+ num = be32_to_cpup(p++);
+ if (num) {
+ READ_BUF(4 * num);
+ gdev->gd_notify_types = be32_to_cpup(p++);
+ for (i = 1; i < num; i++) {
+ if (be32_to_cpup(p++)) {
+ status = nfserr_inval;
+ goto out;
+ }
+ }
+ }
+ DECODE_TAIL;
+}
+
+static __be32
+nfsd4_decode_layoutget(struct nfsd4_compoundargs *argp,
+ struct nfsd4_layoutget *lgp)
+{
+ DECODE_HEAD;
+
+ READ_BUF(36);
+ lgp->lg_signal = be32_to_cpup(p++);
+ lgp->lg_layout_type = be32_to_cpup(p++);
+ lgp->lg_seg.iomode = be32_to_cpup(p++);
+ p = xdr_decode_hyper(p, &lgp->lg_seg.offset);
+ p = xdr_decode_hyper(p, &lgp->lg_seg.length);
+ p = xdr_decode_hyper(p, &lgp->lg_minlength);
+ nfsd4_decode_stateid(argp, &lgp->lg_sid);
+ READ_BUF(4);
+ lgp->lg_maxcount = be32_to_cpup(p++);
+
+ DECODE_TAIL;
+}
+
+static __be32
+nfsd4_decode_layoutcommit(struct nfsd4_compoundargs *argp,
+ struct nfsd4_layoutcommit *lcp)
+{
+ DECODE_HEAD;
+ u32 timechange;
+
+ READ_BUF(20);
+ p = xdr_decode_hyper(p, &lcp->lc_seg.offset);
+ p = xdr_decode_hyper(p, &lcp->lc_seg.length);
+ lcp->lc_reclaim = be32_to_cpup(p++);
+ nfsd4_decode_stateid(argp, &lcp->lc_sid);
+ READ_BUF(4);
+ lcp->lc_newoffset = be32_to_cpup(p++);
+ if (lcp->lc_newoffset) {
+ READ_BUF(8);
+ p = xdr_decode_hyper(p, &lcp->lc_last_wr);
+ } else
+ lcp->lc_last_wr = 0;
+ READ_BUF(4);
+ timechange = be32_to_cpup(p++);
+ if (timechange) {
+ status = nfsd4_decode_time(argp, &lcp->lc_mtime);
+ if (status)
+ return status;
+ } else {
+ lcp->lc_mtime.tv_nsec = UTIME_NOW;
+ }
+ READ_BUF(8);
+ lcp->lc_layout_type = be32_to_cpup(p++);
+
+ /*
+ * Save the layout update in XDR format and let the layout driver deal
+ * with it later.
+ */
+ lcp->lc_up_len = be32_to_cpup(p++);
+ if (lcp->lc_up_len > 0) {
+ READ_BUF(lcp->lc_up_len);
+ READMEM(lcp->lc_up_layout, lcp->lc_up_len);
+ }
+
+ DECODE_TAIL;
+}
+
+static __be32
+nfsd4_decode_layoutreturn(struct nfsd4_compoundargs *argp,
+ struct nfsd4_layoutreturn *lrp)
+{
+ DECODE_HEAD;
+
+ READ_BUF(16);
+ lrp->lr_reclaim = be32_to_cpup(p++);
+ lrp->lr_layout_type = be32_to_cpup(p++);
+ lrp->lr_seg.iomode = be32_to_cpup(p++);
+ lrp->lr_return_type = be32_to_cpup(p++);
+ if (lrp->lr_return_type == RETURN_FILE) {
+ READ_BUF(16);
+ p = xdr_decode_hyper(p, &lrp->lr_seg.offset);
+ p = xdr_decode_hyper(p, &lrp->lr_seg.length);
+ nfsd4_decode_stateid(argp, &lrp->lr_sid);
+ READ_BUF(4);
+ lrp->lrf_body_len = be32_to_cpup(p++);
+ if (lrp->lrf_body_len > 0) {
+ READ_BUF(lrp->lrf_body_len);
+ READMEM(lrp->lrf_body, lrp->lrf_body_len);
+ }
+ } else {
+ lrp->lr_seg.offset = 0;
+ lrp->lr_seg.length = NFS4_MAX_UINT64;
+ }
+
+ DECODE_TAIL;
+}
+#endif /* CONFIG_NFSD_PNFS */
+
static __be32
nfsd4_decode_fallocate(struct nfsd4_compoundargs *argp,
struct nfsd4_fallocate *fallocate)
@@ -1607,11 +1738,19 @@ static nfsd4_dec nfsd4_dec_ops[] = {
[OP_DESTROY_SESSION] = (nfsd4_dec)nfsd4_decode_destroy_session,
[OP_FREE_STATEID] = (nfsd4_dec)nfsd4_decode_free_stateid,
[OP_GET_DIR_DELEGATION] = (nfsd4_dec)nfsd4_decode_notsupp,
+#ifdef CONFIG_NFSD_PNFS
+ [OP_GETDEVICEINFO] = (nfsd4_dec)nfsd4_decode_getdeviceinfo,
+ [OP_GETDEVICELIST] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_LAYOUTCOMMIT] = (nfsd4_dec)nfsd4_decode_layoutcommit,
+ [OP_LAYOUTGET] = (nfsd4_dec)nfsd4_decode_layoutget,
+ [OP_LAYOUTRETURN] = (nfsd4_dec)nfsd4_decode_layoutreturn,
+#else
[OP_GETDEVICEINFO] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_GETDEVICELIST] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_LAYOUTCOMMIT] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_LAYOUTGET] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_LAYOUTRETURN] = (nfsd4_dec)nfsd4_decode_notsupp,
+#endif
[OP_SECINFO_NO_NAME] = (nfsd4_dec)nfsd4_decode_secinfo_no_name,
[OP_SEQUENCE] = (nfsd4_dec)nfsd4_decode_sequence,
[OP_SET_SSV] = (nfsd4_dec)nfsd4_decode_notsupp,
@@ -2539,6 +2678,30 @@ out_acl:
get_parent_attributes(exp, &stat);
p = xdr_encode_hyper(p, stat.ino);
}
+#ifdef CONFIG_NFSD_PNFS
+ if ((bmval1 & FATTR4_WORD1_FS_LAYOUT_TYPES) ||
+ (bmval2 & FATTR4_WORD2_LAYOUT_TYPES)) {
+ if (exp->ex_layout_type) {
+ p = xdr_reserve_space(xdr, 8);
+ if (!p)
+ goto out_resource;
+ *p++ = cpu_to_be32(1);
+ *p++ = cpu_to_be32(exp->ex_layout_type);
+ } else {
+ p = xdr_reserve_space(xdr, 4);
+ if (!p)
+ goto out_resource;
+ *p++ = cpu_to_be32(0);
+ }
+ }
+
+ if (bmval2 & FATTR4_WORD2_LAYOUT_BLKSIZE) {
+ p = xdr_reserve_space(xdr, 4);
+ if (!p)
+ goto out_resource;
+ *p++ = cpu_to_be32(stat.blksize);
+ }
+#endif /* CONFIG_NFSD_PNFS */
if (bmval2 & FATTR4_WORD2_SECURITY_LABEL) {
status = nfsd4_encode_security_label(xdr, rqstp, context,
contextlen);
@@ -2768,16 +2931,17 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
if (entry_bytes > cd->rd_maxcount)
goto fail;
cd->rd_maxcount -= entry_bytes;
- if (!cd->rd_dircount)
- goto fail;
/*
* RFC 3530 14.2.24 describes rd_dircount as only a "hint", so
* let's always let through the first entry, at least:
*/
- name_and_cookie = 4 * XDR_QUADLEN(namlen) + 8;
+ if (!cd->rd_dircount)
+ goto fail;
+ name_and_cookie = 4 + 4 * XDR_QUADLEN(namlen) + 8;
if (name_and_cookie > cd->rd_dircount && cd->cookie_offset)
goto fail;
cd->rd_dircount -= min(cd->rd_dircount, name_and_cookie);
+
cd->cookie_offset = cookie_offset;
skip_entry:
cd->common.err = nfs_ok;
@@ -3814,6 +3978,156 @@ nfsd4_encode_test_stateid(struct nfsd4_compoundres *resp, __be32 nfserr,
return nfserr;
}
+#ifdef CONFIG_NFSD_PNFS
+static __be32
+nfsd4_encode_getdeviceinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
+ struct nfsd4_getdeviceinfo *gdev)
+{
+ struct xdr_stream *xdr = &resp->xdr;
+ const struct nfsd4_layout_ops *ops =
+ nfsd4_layout_ops[gdev->gd_layout_type];
+ u32 starting_len = xdr->buf->len, needed_len;
+ __be32 *p;
+
+ dprintk("%s: err %d\n", __func__, nfserr);
+ if (nfserr)
+ goto out;
+
+ nfserr = nfserr_resource;
+ p = xdr_reserve_space(xdr, 4);
+ if (!p)
+ goto out;
+
+ *p++ = cpu_to_be32(gdev->gd_layout_type);
+
+ /* If maxcount is 0 then just update notifications */
+ if (gdev->gd_maxcount != 0) {
+ nfserr = ops->encode_getdeviceinfo(xdr, gdev);
+ if (nfserr) {
+ /*
+ * We don't bother to burden the layout drivers with
+ * enforcing gd_maxcount, just tell the client to
+ * come back with a bigger buffer if it's not enough.
+ */
+ if (xdr->buf->len + 4 > gdev->gd_maxcount)
+ goto toosmall;
+ goto out;
+ }
+ }
+
+ nfserr = nfserr_resource;
+ if (gdev->gd_notify_types) {
+ p = xdr_reserve_space(xdr, 4 + 4);
+ if (!p)
+ goto out;
+ *p++ = cpu_to_be32(1); /* bitmap length */
+ *p++ = cpu_to_be32(gdev->gd_notify_types);
+ } else {
+ p = xdr_reserve_space(xdr, 4);
+ if (!p)
+ goto out;
+ *p++ = 0;
+ }
+
+ nfserr = 0;
+out:
+ kfree(gdev->gd_device);
+ dprintk("%s: done: %d\n", __func__, be32_to_cpu(nfserr));
+ return nfserr;
+
+toosmall:
+ dprintk("%s: maxcount too small\n", __func__);
+ needed_len = xdr->buf->len + 4 /* notifications */;
+ xdr_truncate_encode(xdr, starting_len);
+ p = xdr_reserve_space(xdr, 4);
+ if (!p) {
+ nfserr = nfserr_resource;
+ } else {
+ *p++ = cpu_to_be32(needed_len);
+ nfserr = nfserr_toosmall;
+ }
+ goto out;
+}
+
+static __be32
+nfsd4_encode_layoutget(struct nfsd4_compoundres *resp, __be32 nfserr,
+ struct nfsd4_layoutget *lgp)
+{
+ struct xdr_stream *xdr = &resp->xdr;
+ const struct nfsd4_layout_ops *ops =
+ nfsd4_layout_ops[lgp->lg_layout_type];
+ __be32 *p;
+
+ dprintk("%s: err %d\n", __func__, nfserr);
+ if (nfserr)
+ goto out;
+
+ nfserr = nfserr_resource;
+ p = xdr_reserve_space(xdr, 36 + sizeof(stateid_opaque_t));
+ if (!p)
+ goto out;
+
+ *p++ = cpu_to_be32(1); /* we always set return-on-close */
+ *p++ = cpu_to_be32(lgp->lg_sid.si_generation);
+ p = xdr_encode_opaque_fixed(p, &lgp->lg_sid.si_opaque,
+ sizeof(stateid_opaque_t));
+
+ *p++ = cpu_to_be32(1); /* we always return a single layout */
+ p = xdr_encode_hyper(p, lgp->lg_seg.offset);
+ p = xdr_encode_hyper(p, lgp->lg_seg.length);
+ *p++ = cpu_to_be32(lgp->lg_seg.iomode);
+ *p++ = cpu_to_be32(lgp->lg_layout_type);
+
+ nfserr = ops->encode_layoutget(xdr, lgp);
+out:
+ kfree(lgp->lg_content);
+ return nfserr;
+}
+
+static __be32
+nfsd4_encode_layoutcommit(struct nfsd4_compoundres *resp, __be32 nfserr,
+ struct nfsd4_layoutcommit *lcp)
+{
+ struct xdr_stream *xdr = &resp->xdr;
+ __be32 *p;
+
+ if (nfserr)
+ return nfserr;
+
+ p = xdr_reserve_space(xdr, 4);
+ if (!p)
+ return nfserr_resource;
+ *p++ = cpu_to_be32(lcp->lc_size_chg);
+ if (lcp->lc_size_chg) {
+ p = xdr_reserve_space(xdr, 8);
+ if (!p)
+ return nfserr_resource;
+ p = xdr_encode_hyper(p, lcp->lc_newsize);
+ }
+
+ return nfs_ok;
+}
+
+static __be32
+nfsd4_encode_layoutreturn(struct nfsd4_compoundres *resp, __be32 nfserr,
+ struct nfsd4_layoutreturn *lrp)
+{
+ struct xdr_stream *xdr = &resp->xdr;
+ __be32 *p;
+
+ if (nfserr)
+ return nfserr;
+
+ p = xdr_reserve_space(xdr, 4);
+ if (!p)
+ return nfserr_resource;
+ *p++ = cpu_to_be32(lrp->lrs_present);
+ if (lrp->lrs_present)
+ nfsd4_encode_stateid(xdr, &lrp->lr_sid);
+ return nfs_ok;
+}
+#endif /* CONFIG_NFSD_PNFS */
+
static __be32
nfsd4_encode_seek(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_seek *seek)
@@ -3890,11 +4204,19 @@ static nfsd4_enc nfsd4_enc_ops[] = {
[OP_DESTROY_SESSION] = (nfsd4_enc)nfsd4_encode_noop,
[OP_FREE_STATEID] = (nfsd4_enc)nfsd4_encode_noop,
[OP_GET_DIR_DELEGATION] = (nfsd4_enc)nfsd4_encode_noop,
+#ifdef CONFIG_NFSD_PNFS
+ [OP_GETDEVICEINFO] = (nfsd4_enc)nfsd4_encode_getdeviceinfo,
+ [OP_GETDEVICELIST] = (nfsd4_enc)nfsd4_encode_noop,
+ [OP_LAYOUTCOMMIT] = (nfsd4_enc)nfsd4_encode_layoutcommit,
+ [OP_LAYOUTGET] = (nfsd4_enc)nfsd4_encode_layoutget,
+ [OP_LAYOUTRETURN] = (nfsd4_enc)nfsd4_encode_layoutreturn,
+#else
[OP_GETDEVICEINFO] = (nfsd4_enc)nfsd4_encode_noop,
[OP_GETDEVICELIST] = (nfsd4_enc)nfsd4_encode_noop,
[OP_LAYOUTCOMMIT] = (nfsd4_enc)nfsd4_encode_noop,
[OP_LAYOUTGET] = (nfsd4_enc)nfsd4_encode_noop,
[OP_LAYOUTRETURN] = (nfsd4_enc)nfsd4_encode_noop,
+#endif
[OP_SECINFO_NO_NAME] = (nfsd4_enc)nfsd4_encode_secinfo_no_name,
[OP_SEQUENCE] = (nfsd4_enc)nfsd4_encode_sequence,
[OP_SET_SSV] = (nfsd4_enc)nfsd4_encode_noop,
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 19ace74..aa47d75 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -21,6 +21,7 @@
#include "cache.h"
#include "state.h"
#include "netns.h"
+#include "pnfs.h"
/*
* We have a single directory with several nodes in it.
@@ -1258,9 +1259,12 @@ static int __init init_nfsd(void)
retval = nfsd4_init_slabs();
if (retval)
goto out_unregister_pernet;
- retval = nfsd_fault_inject_init(); /* nfsd fault injection controls */
+ retval = nfsd4_init_pnfs();
if (retval)
goto out_free_slabs;
+ retval = nfsd_fault_inject_init(); /* nfsd fault injection controls */
+ if (retval)
+ goto out_exit_pnfs;
nfsd_stat_init(); /* Statistics */
retval = nfsd_reply_cache_init();
if (retval)
@@ -1282,6 +1286,8 @@ out_free_lockd:
out_free_stat:
nfsd_stat_shutdown();
nfsd_fault_inject_cleanup();
+out_exit_pnfs:
+ nfsd4_exit_pnfs();
out_free_slabs:
nfsd4_free_slabs();
out_unregister_pernet:
@@ -1299,6 +1305,7 @@ static void __exit exit_nfsd(void)
nfsd_stat_shutdown();
nfsd_lockd_shutdown();
nfsd4_free_slabs();
+ nfsd4_exit_pnfs();
nfsd_fault_inject_cleanup();
unregister_filesystem(&nfsd_fs_type);
unregister_pernet_subsys(&nfsd_net_ops);
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index 33a46a8..565c4da 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -325,15 +325,27 @@ void nfsd_lockd_shutdown(void);
#define NFSD4_SUPPORTED_ATTRS_WORD2 0
+/* 4.1 */
+#ifdef CONFIG_NFSD_PNFS
+#define PNFSD_SUPPORTED_ATTRS_WORD1 FATTR4_WORD1_FS_LAYOUT_TYPES
+#define PNFSD_SUPPORTED_ATTRS_WORD2 \
+(FATTR4_WORD2_LAYOUT_BLKSIZE | FATTR4_WORD2_LAYOUT_TYPES)
+#else
+#define PNFSD_SUPPORTED_ATTRS_WORD1 0
+#define PNFSD_SUPPORTED_ATTRS_WORD2 0
+#endif /* CONFIG_NFSD_PNFS */
+
#define NFSD4_1_SUPPORTED_ATTRS_WORD0 \
NFSD4_SUPPORTED_ATTRS_WORD0
#define NFSD4_1_SUPPORTED_ATTRS_WORD1 \
- NFSD4_SUPPORTED_ATTRS_WORD1
+ (NFSD4_SUPPORTED_ATTRS_WORD1 | PNFSD_SUPPORTED_ATTRS_WORD1)
#define NFSD4_1_SUPPORTED_ATTRS_WORD2 \
- (NFSD4_SUPPORTED_ATTRS_WORD2 | FATTR4_WORD2_SUPPATTR_EXCLCREAT)
+ (NFSD4_SUPPORTED_ATTRS_WORD2 | PNFSD_SUPPORTED_ATTRS_WORD2 | \
+ FATTR4_WORD2_SUPPATTR_EXCLCREAT)
+/* 4.2 */
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
#define NFSD4_2_SECURITY_ATTRS FATTR4_WORD2_SECURITY_LABEL
#else
diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h
index 08236d7..84cae20 100644
--- a/fs/nfsd/nfsfh.h
+++ b/fs/nfsd/nfsfh.h
@@ -187,6 +187,24 @@ fh_init(struct svc_fh *fhp, int maxsize)
return fhp;
}
+static inline bool fh_match(struct knfsd_fh *fh1, struct knfsd_fh *fh2)
+{
+ if (fh1->fh_size != fh2->fh_size)
+ return false;
+ if (memcmp(fh1->fh_base.fh_pad, fh2->fh_base.fh_pad, fh1->fh_size) != 0)
+ return false;
+ return true;
+}
+
+static inline bool fh_fsid_match(struct knfsd_fh *fh1, struct knfsd_fh *fh2)
+{
+ if (fh1->fh_fsid_type != fh2->fh_fsid_type)
+ return false;
+ if (memcmp(fh1->fh_fsid, fh2->fh_fsid, key_len(fh1->fh_fsid_type) != 0))
+ return false;
+ return true;
+}
+
#ifdef CONFIG_NFSD_V3
/*
* The wcc data stored in current_fh should be cleared
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 314f5c8..9277cc9 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -119,6 +119,7 @@ struct svc_program nfsd_program = {
static bool nfsd_supported_minorversions[NFSD_SUPPORTED_MINOR_VERSION + 1] = {
[0] = 1,
[1] = 1,
+ [2] = 1,
};
int nfsd_vers(int vers, enum vers_op change)
diff --git a/fs/nfsd/pnfs.h b/fs/nfsd/pnfs.h
new file mode 100644
index 0000000..fedb4d6
--- /dev/null
+++ b/fs/nfsd/pnfs.h
@@ -0,0 +1,81 @@
+#ifndef _FS_NFSD_PNFS_H
+#define _FS_NFSD_PNFS_H 1
+
+#include <linux/exportfs.h>
+#include <linux/nfsd/export.h>
+
+#include "state.h"
+#include "xdr4.h"
+
+struct xdr_stream;
+
+struct nfsd4_deviceid_map {
+ struct list_head hash;
+ u64 idx;
+ int fsid_type;
+ u32 fsid[];
+};
+
+struct nfsd4_layout_ops {
+ u32 notify_types;
+
+ __be32 (*proc_getdeviceinfo)(struct super_block *sb,
+ struct nfsd4_getdeviceinfo *gdevp);
+ __be32 (*encode_getdeviceinfo)(struct xdr_stream *xdr,
+ struct nfsd4_getdeviceinfo *gdevp);
+
+ __be32 (*proc_layoutget)(struct inode *, const struct svc_fh *fhp,
+ struct nfsd4_layoutget *lgp);
+ __be32 (*encode_layoutget)(struct xdr_stream *,
+ struct nfsd4_layoutget *lgp);
+
+ __be32 (*proc_layoutcommit)(struct inode *inode,
+ struct nfsd4_layoutcommit *lcp);
+};
+
+extern const struct nfsd4_layout_ops *nfsd4_layout_ops[];
+extern const struct nfsd4_layout_ops bl_layout_ops;
+
+__be32 nfsd4_preprocess_layout_stateid(struct svc_rqst *rqstp,
+ struct nfsd4_compound_state *cstate, stateid_t *stateid,
+ bool create, u32 layout_type, struct nfs4_layout_stateid **lsp);
+__be32 nfsd4_insert_layout(struct nfsd4_layoutget *lgp,
+ struct nfs4_layout_stateid *ls);
+__be32 nfsd4_return_file_layouts(struct svc_rqst *rqstp,
+ struct nfsd4_compound_state *cstate,
+ struct nfsd4_layoutreturn *lrp);
+__be32 nfsd4_return_client_layouts(struct svc_rqst *rqstp,
+ struct nfsd4_compound_state *cstate,
+ struct nfsd4_layoutreturn *lrp);
+int nfsd4_set_deviceid(struct nfsd4_deviceid *id, const struct svc_fh *fhp,
+ u32 device_generation);
+struct nfsd4_deviceid_map *nfsd4_find_devid_map(int idx);
+
+#ifdef CONFIG_NFSD_PNFS
+void nfsd4_setup_layout_type(struct svc_export *exp);
+void nfsd4_return_all_client_layouts(struct nfs4_client *);
+void nfsd4_return_all_file_layouts(struct nfs4_client *clp,
+ struct nfs4_file *fp);
+int nfsd4_init_pnfs(void);
+void nfsd4_exit_pnfs(void);
+#else
+static inline void nfsd4_setup_layout_type(struct svc_export *exp)
+{
+}
+
+static inline void nfsd4_return_all_client_layouts(struct nfs4_client *clp)
+{
+}
+static inline void nfsd4_return_all_file_layouts(struct nfs4_client *clp,
+ struct nfs4_file *fp)
+{
+}
+static inline void nfsd4_exit_pnfs(void)
+{
+}
+static inline int nfsd4_init_pnfs(void)
+{
+ return 0;
+}
+#endif /* CONFIG_NFSD_PNFS */
+#endif /* _FS_NFSD_PNFS_H */
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 9d3be37..4f3bfeb 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -92,6 +92,7 @@ struct nfs4_stid {
/* For a deleg stateid kept around only to process free_stateid's: */
#define NFS4_REVOKED_DELEG_STID 16
#define NFS4_CLOSED_DELEG_STID 32
+#define NFS4_LAYOUT_STID 64
unsigned char sc_type;
stateid_t sc_stateid;
struct nfs4_client *sc_client;
@@ -297,6 +298,9 @@ struct nfs4_client {
struct list_head cl_delegations;
struct list_head cl_revoked; /* unacknowledged, revoked 4.1 state */
struct list_head cl_lru; /* tail queue */
+#ifdef CONFIG_NFSD_PNFS
+ struct list_head cl_lo_states; /* outstanding layout states */
+#endif
struct xdr_netobj cl_name; /* id generated by client */
nfs4_verifier cl_verifier; /* generated by client */
time_t cl_time; /* time of last lease renewal */
@@ -493,9 +497,13 @@ struct nfs4_file {
atomic_t fi_access[2];
u32 fi_share_deny;
struct file *fi_deleg_file;
- atomic_t fi_delegees;
+ int fi_delegees;
struct knfsd_fh fi_fhandle;
bool fi_had_conflict;
+#ifdef CONFIG_NFSD_PNFS
+ struct list_head fi_lo_states;
+ atomic_t fi_lo_recalls;
+#endif
};
/*
@@ -528,6 +536,24 @@ static inline struct nfs4_ol_stateid *openlockstateid(struct nfs4_stid *s)
return container_of(s, struct nfs4_ol_stateid, st_stid);
}
+struct nfs4_layout_stateid {
+ struct nfs4_stid ls_stid;
+ struct list_head ls_perclnt;
+ struct list_head ls_perfile;
+ spinlock_t ls_lock;
+ struct list_head ls_layouts;
+ u32 ls_layout_type;
+ struct file *ls_file;
+ struct nfsd4_callback ls_recall;
+ stateid_t ls_recall_sid;
+ bool ls_recalled;
+};
+
+static inline struct nfs4_layout_stateid *layoutstateid(struct nfs4_stid *s)
+{
+ return container_of(s, struct nfs4_layout_stateid, ls_stid);
+}
+
/* flags for preprocess_seqid_op() */
#define RD_STATE 0x00000010
#define WR_STATE 0x00000020
@@ -535,6 +561,7 @@ static inline struct nfs4_ol_stateid *openlockstateid(struct nfs4_stid *s)
enum nfsd4_cb_op {
NFSPROC4_CLNT_CB_NULL = 0,
NFSPROC4_CLNT_CB_RECALL,
+ NFSPROC4_CLNT_CB_LAYOUT,
NFSPROC4_CLNT_CB_SEQUENCE,
};
@@ -545,6 +572,12 @@ struct nfsd_net;
extern __be32 nfs4_preprocess_stateid_op(struct net *net,
struct nfsd4_compound_state *cstate,
stateid_t *stateid, int flags, struct file **filp);
+__be32 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
+ stateid_t *stateid, unsigned char typemask,
+ struct nfs4_stid **s, struct nfsd_net *nn);
+struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
+ struct kmem_cache *slab);
+void nfs4_unhash_stid(struct nfs4_stid *s);
void nfs4_put_stid(struct nfs4_stid *s);
void nfs4_remove_reclaim_record(struct nfs4_client_reclaim *, struct nfsd_net *);
extern void nfs4_release_reclaim(struct nfsd_net *);
@@ -567,6 +600,14 @@ extern struct nfs4_client_reclaim *nfs4_client_to_reclaim(const char *name,
struct nfsd_net *nn);
extern bool nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn);
+struct nfs4_file *find_file(struct knfsd_fh *fh);
+void put_nfs4_file(struct nfs4_file *fi);
+static inline void get_nfs4_file(struct nfs4_file *fi)
+{
+ atomic_inc(&fi->fi_ref);
+}
+struct file *find_any_file(struct nfs4_file *f);
+
/* grace period management */
void nfsd4_end_grace(struct nfsd_net *nn);
diff --git a/fs/nfsd/trace.c b/fs/nfsd/trace.c
new file mode 100644
index 0000000..82f8907
--- /dev/null
+++ b/fs/nfsd/trace.c
@@ -0,0 +1,5 @@
+
+#include "state.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h
new file mode 100644
index 0000000..c668520
--- /dev/null
+++ b/fs/nfsd/trace.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2014 Christoph Hellwig.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM nfsd
+
+#if !defined(_NFSD_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _NFSD_TRACE_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(nfsd_stateid_class,
+ TP_PROTO(stateid_t *stp),
+ TP_ARGS(stp),
+ TP_STRUCT__entry(
+ __field(u32, cl_boot)
+ __field(u32, cl_id)
+ __field(u32, si_id)
+ __field(u32, si_generation)
+ ),
+ TP_fast_assign(
+ __entry->cl_boot = stp->si_opaque.so_clid.cl_boot;
+ __entry->cl_id = stp->si_opaque.so_clid.cl_id;
+ __entry->si_id = stp->si_opaque.so_id;
+ __entry->si_generation = stp->si_generation;
+ ),
+ TP_printk("client %08x:%08x stateid %08x:%08x",
+ __entry->cl_boot,
+ __entry->cl_id,
+ __entry->si_id,
+ __entry->si_generation)
+)
+
+#define DEFINE_STATEID_EVENT(name) \
+DEFINE_EVENT(nfsd_stateid_class, name, \
+ TP_PROTO(stateid_t *stp), \
+ TP_ARGS(stp))
+DEFINE_STATEID_EVENT(layoutstate_alloc);
+DEFINE_STATEID_EVENT(layoutstate_unhash);
+DEFINE_STATEID_EVENT(layoutstate_free);
+DEFINE_STATEID_EVENT(layout_get_lookup_fail);
+DEFINE_STATEID_EVENT(layout_commit_lookup_fail);
+DEFINE_STATEID_EVENT(layout_return_lookup_fail);
+DEFINE_STATEID_EVENT(layout_recall);
+DEFINE_STATEID_EVENT(layout_recall_done);
+DEFINE_STATEID_EVENT(layout_recall_fail);
+DEFINE_STATEID_EVENT(layout_recall_release);
+
+#endif /* _NFSD_TRACE_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index 90a5925..0bda93e 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -428,6 +428,61 @@ struct nfsd4_reclaim_complete {
u32 rca_one_fs;
};
+struct nfsd4_deviceid {
+ u64 fsid_idx;
+ u32 generation;
+ u32 pad;
+};
+
+struct nfsd4_layout_seg {
+ u32 iomode;
+ u64 offset;
+ u64 length;
+};
+
+struct nfsd4_getdeviceinfo {
+ struct nfsd4_deviceid gd_devid; /* request */
+ u32 gd_layout_type; /* request */
+ u32 gd_maxcount; /* request */
+ u32 gd_notify_types;/* request - response */
+ void *gd_device; /* response */
+};
+
+struct nfsd4_layoutget {
+ u64 lg_minlength; /* request */
+ u32 lg_signal; /* request */
+ u32 lg_layout_type; /* request */
+ u32 lg_maxcount; /* request */
+ stateid_t lg_sid; /* request/response */
+ struct nfsd4_layout_seg lg_seg; /* request/response */
+ void *lg_content; /* response */
+};
+
+struct nfsd4_layoutcommit {
+ stateid_t lc_sid; /* request */
+ struct nfsd4_layout_seg lc_seg; /* request */
+ u32 lc_reclaim; /* request */
+ u32 lc_newoffset; /* request */
+ u64 lc_last_wr; /* request */
+ struct timespec lc_mtime; /* request */
+ u32 lc_layout_type; /* request */
+ u32 lc_up_len; /* layout length */
+ void *lc_up_layout; /* decoded by callback */
+ u32 lc_size_chg; /* boolean for response */
+ u64 lc_newsize; /* response */
+};
+
+struct nfsd4_layoutreturn {
+ u32 lr_return_type; /* request */
+ u32 lr_layout_type; /* request */
+ struct nfsd4_layout_seg lr_seg; /* request */
+ u32 lr_reclaim; /* request */
+ u32 lrf_body_len; /* request */
+ void *lrf_body; /* request */
+ stateid_t lr_sid; /* request/response */
+ u32 lrs_present; /* response */
+};
+
struct nfsd4_fallocate {
/* request */
stateid_t falloc_stateid;
@@ -491,6 +546,10 @@ struct nfsd4_op {
struct nfsd4_reclaim_complete reclaim_complete;
struct nfsd4_test_stateid test_stateid;
struct nfsd4_free_stateid free_stateid;
+ struct nfsd4_getdeviceinfo getdeviceinfo;
+ struct nfsd4_layoutget layoutget;
+ struct nfsd4_layoutcommit layoutcommit;
+ struct nfsd4_layoutreturn layoutreturn;
/* NFSv4.2 */
struct nfsd4_fallocate allocate;
diff --git a/fs/nfsd/xdr4cb.h b/fs/nfsd/xdr4cb.h
index c5c55df..c47f6fd 100644
--- a/fs/nfsd/xdr4cb.h
+++ b/fs/nfsd/xdr4cb.h
@@ -21,3 +21,10 @@
#define NFS4_dec_cb_recall_sz (cb_compound_dec_hdr_sz + \
cb_sequence_dec_sz + \
op_dec_sz)
+#define NFS4_enc_cb_layout_sz (cb_compound_enc_hdr_sz + \
+ cb_sequence_enc_sz + \
+ 1 + 3 + \
+ enc_nfs4_fh_sz + 4)
+#define NFS4_dec_cb_layout_sz (cb_compound_dec_hdr_sz + \
+ cb_sequence_dec_sz + \
+ op_dec_sz)
diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c
index 3a03e0a..a8c728ac 100644
--- a/fs/nilfs2/file.c
+++ b/fs/nilfs2/file.c
@@ -128,7 +128,6 @@ static const struct vm_operations_struct nilfs_file_vm_ops = {
.fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = nilfs_page_mkwrite,
- .remap_pages = generic_file_remap_pages,
};
static int nilfs_file_mmap(struct file *file, struct vm_area_struct *vma)
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index 91093cd..3857040 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -141,7 +141,6 @@ enum {
* @ti_save: Backup of journal_info field of task_struct
* @ti_flags: Flags
* @ti_count: Nest level
- * @ti_garbage: List of inode to be put when releasing semaphore
*/
struct nilfs_transaction_info {
u32 ti_magic;
@@ -150,7 +149,6 @@ struct nilfs_transaction_info {
one of other filesystems has a bug. */
unsigned short ti_flags;
unsigned short ti_count;
- struct list_head ti_garbage;
};
/* ti_magic */
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 7ef18fc..469086b 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -305,7 +305,6 @@ static void nilfs_transaction_lock(struct super_block *sb,
ti->ti_count = 0;
ti->ti_save = cur_ti;
ti->ti_magic = NILFS_TI_MAGIC;
- INIT_LIST_HEAD(&ti->ti_garbage);
current->journal_info = ti;
for (;;) {
@@ -332,8 +331,6 @@ static void nilfs_transaction_unlock(struct super_block *sb)
up_write(&nilfs->ns_segctor_sem);
current->journal_info = ti->ti_save;
- if (!list_empty(&ti->ti_garbage))
- nilfs_dispose_list(nilfs, &ti->ti_garbage, 0);
}
static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
@@ -746,6 +743,15 @@ static void nilfs_dispose_list(struct the_nilfs *nilfs,
}
}
+static void nilfs_iput_work_func(struct work_struct *work)
+{
+ struct nilfs_sc_info *sci = container_of(work, struct nilfs_sc_info,
+ sc_iput_work);
+ struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
+
+ nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 0);
+}
+
static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs,
struct nilfs_root *root)
{
@@ -1900,8 +1906,8 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
struct the_nilfs *nilfs)
{
- struct nilfs_transaction_info *ti = current->journal_info;
struct nilfs_inode_info *ii, *n;
+ int defer_iput = false;
spin_lock(&nilfs->ns_inode_lock);
list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) {
@@ -1912,9 +1918,24 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
clear_bit(NILFS_I_BUSY, &ii->i_state);
brelse(ii->i_bh);
ii->i_bh = NULL;
- list_move_tail(&ii->i_dirty, &ti->ti_garbage);
+ list_del_init(&ii->i_dirty);
+ if (!ii->vfs_inode.i_nlink) {
+ /*
+ * Defer calling iput() to avoid a deadlock
+ * over I_SYNC flag for inodes with i_nlink == 0
+ */
+ list_add_tail(&ii->i_dirty, &sci->sc_iput_queue);
+ defer_iput = true;
+ } else {
+ spin_unlock(&nilfs->ns_inode_lock);
+ iput(&ii->vfs_inode);
+ spin_lock(&nilfs->ns_inode_lock);
+ }
}
spin_unlock(&nilfs->ns_inode_lock);
+
+ if (defer_iput)
+ schedule_work(&sci->sc_iput_work);
}
/*
@@ -2583,6 +2604,8 @@ static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
INIT_LIST_HEAD(&sci->sc_segbufs);
INIT_LIST_HEAD(&sci->sc_write_logs);
INIT_LIST_HEAD(&sci->sc_gc_inodes);
+ INIT_LIST_HEAD(&sci->sc_iput_queue);
+ INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func);
init_timer(&sci->sc_timer);
sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
@@ -2609,6 +2632,8 @@ static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
ret = nilfs_segctor_construct(sci, SC_LSEG_SR);
nilfs_transaction_unlock(sci->sc_super);
+ flush_work(&sci->sc_iput_work);
+
} while (ret && retrycount-- > 0);
}
@@ -2633,6 +2658,9 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
|| sci->sc_seq_request != sci->sc_seq_done);
spin_unlock(&sci->sc_state_lock);
+ if (flush_work(&sci->sc_iput_work))
+ flag = true;
+
if (flag || !nilfs_segctor_confirm(sci))
nilfs_segctor_write_out(sci);
@@ -2642,6 +2670,12 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1);
}
+ if (!list_empty(&sci->sc_iput_queue)) {
+ nilfs_warning(sci->sc_super, __func__,
+ "iput queue is not empty\n");
+ nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1);
+ }
+
WARN_ON(!list_empty(&sci->sc_segbufs));
WARN_ON(!list_empty(&sci->sc_write_logs));
diff --git a/fs/nilfs2/segment.h b/fs/nilfs2/segment.h
index 38a1d00..a48d6de 100644
--- a/fs/nilfs2/segment.h
+++ b/fs/nilfs2/segment.h
@@ -26,6 +26,7 @@
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/buffer_head.h>
+#include <linux/workqueue.h>
#include <linux/nilfs2_fs.h>
#include "nilfs.h"
@@ -92,6 +93,8 @@ struct nilfs_segsum_pointer {
* @sc_nblk_inc: Block count of current generation
* @sc_dirty_files: List of files to be written
* @sc_gc_inodes: List of GC inodes having blocks to be written
+ * @sc_iput_queue: list of inodes for which iput should be done
+ * @sc_iput_work: work struct to defer iput call
* @sc_freesegs: array of segment numbers to be freed
* @sc_nfreesegs: number of segments on @sc_freesegs
* @sc_dsync_inode: inode whose data pages are written for a sync operation
@@ -135,6 +138,8 @@ struct nilfs_sc_info {
struct list_head sc_dirty_files;
struct list_head sc_gc_inodes;
+ struct list_head sc_iput_queue;
+ struct work_struct sc_iput_work;
__u64 *sc_freesegs;
size_t sc_nfreesegs;
diff --git a/fs/notify/Kconfig b/fs/notify/Kconfig
index 22c629e..2a24249 100644
--- a/fs/notify/Kconfig
+++ b/fs/notify/Kconfig
@@ -1,5 +1,6 @@
config FSNOTIFY
def_bool n
+ select SRCU
source "fs/notify/dnotify/Kconfig"
source "fs/notify/inotify/Kconfig"
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
index 30d3add..51ceb81 100644
--- a/fs/notify/fanotify/fanotify.c
+++ b/fs/notify/fanotify/fanotify.c
@@ -140,7 +140,7 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
}
if (S_ISDIR(path->dentry->d_inode->i_mode) &&
- (marks_ignored_mask & FS_ISDIR))
+ !(marks_mask & FS_ISDIR & ~marks_ignored_mask))
return false;
if (event_mask & marks_mask & ~marks_ignored_mask)
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index bff8567..cf27550 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -487,20 +487,27 @@ static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
unsigned int flags,
int *destroy)
{
- __u32 oldmask;
+ __u32 oldmask = 0;
spin_lock(&fsn_mark->lock);
if (!(flags & FAN_MARK_IGNORED_MASK)) {
+ __u32 tmask = fsn_mark->mask & ~mask;
+
+ if (flags & FAN_MARK_ONDIR)
+ tmask &= ~FAN_ONDIR;
+
oldmask = fsn_mark->mask;
- fsnotify_set_mark_mask_locked(fsn_mark, (oldmask & ~mask));
+ fsnotify_set_mark_mask_locked(fsn_mark, tmask);
} else {
- oldmask = fsn_mark->ignored_mask;
- fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask & ~mask));
+ __u32 tmask = fsn_mark->ignored_mask & ~mask;
+ if (flags & FAN_MARK_ONDIR)
+ tmask &= ~FAN_ONDIR;
+
+ fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
}
+ *destroy = !(fsn_mark->mask | fsn_mark->ignored_mask);
spin_unlock(&fsn_mark->lock);
- *destroy = !(oldmask & ~mask);
-
return mask & oldmask;
}
@@ -569,20 +576,22 @@ static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
spin_lock(&fsn_mark->lock);
if (!(flags & FAN_MARK_IGNORED_MASK)) {
+ __u32 tmask = fsn_mark->mask | mask;
+
+ if (flags & FAN_MARK_ONDIR)
+ tmask |= FAN_ONDIR;
+
oldmask = fsn_mark->mask;
- fsnotify_set_mark_mask_locked(fsn_mark, (oldmask | mask));
+ fsnotify_set_mark_mask_locked(fsn_mark, tmask);
} else {
__u32 tmask = fsn_mark->ignored_mask | mask;
+ if (flags & FAN_MARK_ONDIR)
+ tmask |= FAN_ONDIR;
+
fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
if (flags & FAN_MARK_IGNORED_SURV_MODIFY)
fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY;
}
-
- if (!(flags & FAN_MARK_ONDIR)) {
- __u32 tmask = fsn_mark->ignored_mask | FAN_ONDIR;
- fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
- }
-
spin_unlock(&fsn_mark->lock);
return mask & ~oldmask;
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index 7e8282d..c58a1bc 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -245,16 +245,14 @@ int ocfs2_set_acl(handle_t *handle,
ret = posix_acl_equiv_mode(acl, &mode);
if (ret < 0)
return ret;
- else {
- if (ret == 0)
- acl = NULL;
- ret = ocfs2_acl_set_mode(inode, di_bh,
- handle, mode);
- if (ret)
- return ret;
+ if (ret == 0)
+ acl = NULL;
- }
+ ret = ocfs2_acl_set_mode(inode, di_bh,
+ handle, mode);
+ if (ret)
+ return ret;
}
break;
case ACL_TYPE_DEFAULT:
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index fcae9ef..044158b 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -6873,7 +6873,7 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
- goto out_unlock;
+ goto out;
}
ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
@@ -6931,7 +6931,7 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
if (ret) {
mlog_errno(ret);
need_free = 1;
- goto out_commit;
+ goto out_unlock;
}
page_end = PAGE_CACHE_SIZE;
@@ -6964,12 +6964,16 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
if (ret) {
mlog_errno(ret);
need_free = 1;
- goto out_commit;
+ goto out_unlock;
}
inode->i_blocks = ocfs2_inode_sector_count(inode);
}
+out_unlock:
+ if (pages)
+ ocfs2_unlock_and_free_pages(pages, num_pages);
+
out_commit:
if (ret < 0 && did_quota)
dquot_free_space_nodirty(inode,
@@ -6989,15 +6993,11 @@ out_commit:
ocfs2_commit_trans(osb, handle);
-out_unlock:
+out:
if (data_ac)
ocfs2_free_alloc_context(data_ac);
-
-out:
- if (pages) {
- ocfs2_unlock_and_free_pages(pages, num_pages);
+ if (pages)
kfree(pages);
- }
return ret;
}
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 2e355e0..56c403a 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -1016,7 +1016,8 @@ void o2net_fill_node_map(unsigned long *map, unsigned bytes)
memset(map, 0, bytes);
for (node = 0; node < O2NM_MAX_NODES; ++node) {
- o2net_tx_can_proceed(o2net_nn_from_num(node), &sc, &ret);
+ if (!o2net_tx_can_proceed(o2net_nn_from_num(node), &sc, &ret))
+ continue;
if (!ret) {
set_bit(node, map);
sc_put(sc);
diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h
index dc024367..b95e7df 100644
--- a/fs/ocfs2/cluster/tcp_internal.h
+++ b/fs/ocfs2/cluster/tcp_internal.h
@@ -107,12 +107,12 @@ struct o2net_node {
struct list_head nn_status_list;
/* connects are attempted from when heartbeat comes up until either hb
- * goes down, the node is unconfigured, no connect attempts succeed
- * before O2NET_CONN_IDLE_DELAY, or a connect succeeds. connect_work
- * is queued from set_nn_state both from hb up and from itself if a
- * connect attempt fails and so can be self-arming. shutdown is
- * careful to first mark the nn such that no connects will be attempted
- * before canceling delayed connect work and flushing the queue. */
+ * goes down, the node is unconfigured, or a connect succeeds.
+ * connect_work is queued from set_nn_state both from hb up and from
+ * itself if a connect attempt fails and so can be self-arming.
+ * shutdown is careful to first mark the nn such that no connects will
+ * be attempted before canceling delayed connect work and flushing the
+ * queue. */
struct delayed_work nn_connect_work;
unsigned long nn_last_connect_attempt;
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index 319e786..b08050b 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -3456,10 +3456,8 @@ static int ocfs2_find_dir_space_el(struct inode *dir, const char *name,
int blocksize = dir->i_sb->s_blocksize;
status = ocfs2_read_dir_block(dir, 0, &bh, 0);
- if (status) {
- mlog_errno(status);
+ if (status)
goto bail;
- }
rec_len = OCFS2_DIR_REC_LEN(namelen);
offset = 0;
@@ -3480,10 +3478,9 @@ static int ocfs2_find_dir_space_el(struct inode *dir, const char *name,
status = ocfs2_read_dir_block(dir,
offset >> sb->s_blocksize_bits,
&bh, 0);
- if (status) {
- mlog_errno(status);
+ if (status)
goto bail;
- }
+
/* move to next block */
de = (struct ocfs2_dir_entry *) bh->b_data;
}
@@ -3513,7 +3510,6 @@ next:
de = (struct ocfs2_dir_entry *)((char *) de + le16_to_cpu(de->rec_len));
}
- status = 0;
bail:
brelse(bh);
if (status)
diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c
index b46278f..fd6bbbb 100644
--- a/fs/ocfs2/dlm/dlmast.c
+++ b/fs/ocfs2/dlm/dlmast.c
@@ -385,8 +385,12 @@ int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data,
head = &res->granted;
list_for_each_entry(lock, head, list) {
- if (lock->ml.cookie == cookie)
+ /* if lock is found but unlock is pending ignore the bast */
+ if (lock->ml.cookie == cookie) {
+ if (lock->unlock_pending)
+ break;
goto do_ast;
+ }
}
mlog(0, "Got %sast for unknown lock! cookie=%u:%llu, name=%.*s, "
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
index 149eb55..8251360 100644
--- a/fs/ocfs2/dlm/dlmdebug.c
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -406,7 +406,7 @@ static int debug_purgelist_print(struct dlm_ctxt *dlm, char *buf, int len)
}
spin_unlock(&dlm->spinlock);
- out += snprintf(buf + out, len - out, "Total on list: %ld\n", total);
+ out += snprintf(buf + out, len - out, "Total on list: %lu\n", total);
return out;
}
@@ -464,7 +464,7 @@ static int debug_mle_print(struct dlm_ctxt *dlm, char *buf, int len)
spin_unlock(&dlm->master_lock);
out += snprintf(buf + out, len - out,
- "Total: %ld, Longest: %ld\n", total, longest);
+ "Total: %lu, Longest: %lu\n", total, longest);
return out;
}
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 50a59d2..7df88a6 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -674,20 +674,6 @@ static void dlm_leave_domain(struct dlm_ctxt *dlm)
spin_unlock(&dlm->spinlock);
}
-int dlm_joined(struct dlm_ctxt *dlm)
-{
- int ret = 0;
-
- spin_lock(&dlm_domain_lock);
-
- if (dlm->dlm_state == DLM_CTXT_JOINED)
- ret = 1;
-
- spin_unlock(&dlm_domain_lock);
-
- return ret;
-}
-
int dlm_shutting_down(struct dlm_ctxt *dlm)
{
int ret = 0;
diff --git a/fs/ocfs2/dlm/dlmdomain.h b/fs/ocfs2/dlm/dlmdomain.h
index 2f7f60b..fd6122a 100644
--- a/fs/ocfs2/dlm/dlmdomain.h
+++ b/fs/ocfs2/dlm/dlmdomain.h
@@ -28,7 +28,6 @@
extern spinlock_t dlm_domain_lock;
extern struct list_head dlm_domains;
-int dlm_joined(struct dlm_ctxt *dlm);
int dlm_shutting_down(struct dlm_ctxt *dlm);
void dlm_fire_domain_eviction_callbacks(struct dlm_ctxt *dlm,
int node_num);
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index cecd875..ce12e0b 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -1070,6 +1070,9 @@ static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
dead_node, dlm->name);
list_del_init(&lock->list);
dlm_lock_put(lock);
+ /* Can't schedule DLM_UNLOCK_FREE_LOCK
+ * - do manually */
+ dlm_lock_put(lock);
break;
}
}
@@ -2346,6 +2349,10 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
dead_node, dlm->name);
list_del_init(&lock->list);
dlm_lock_put(lock);
+ /* Can't schedule
+ * DLM_UNLOCK_FREE_LOCK
+ * - do manually */
+ dlm_lock_put(lock);
break;
}
}
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 1c423af..11849a4 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -3750,6 +3750,9 @@ static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
break;
spin_unlock(&dentry_attach_lock);
+ if (S_ISDIR(dl->dl_inode->i_mode))
+ shrink_dcache_parent(dentry);
+
mlog(0, "d_delete(%pd);\n", dentry);
/*
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index abe7d98..e0f04d5 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -569,7 +569,7 @@ static int __ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
handle_t *handle = NULL;
struct ocfs2_alloc_context *data_ac = NULL;
struct ocfs2_alloc_context *meta_ac = NULL;
- enum ocfs2_alloc_restarted why;
+ enum ocfs2_alloc_restarted why = RESTART_NONE;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_extent_tree et;
int did_quota = 0;
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 4f502382..d10860f 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -1447,7 +1447,6 @@ bail:
* requires that we call do_exit(). And it isn't exported, but
* complete_and_exit() seems to be a minimal wrapper around it. */
complete_and_exit(NULL, status);
- return status;
}
void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num)
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c
index 10d66c7..9581d190 100644
--- a/fs/ocfs2/mmap.c
+++ b/fs/ocfs2/mmap.c
@@ -173,7 +173,6 @@ out:
static const struct vm_operations_struct ocfs2_file_vm_ops = {
.fault = ocfs2_fault,
.page_mkwrite = ocfs2_page_mkwrite,
- .remap_pages = generic_file_remap_pages,
};
int ocfs2_mmap(struct file *file, struct vm_area_struct *vma)
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 7d6b7d0..fdbcbfe 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -279,6 +279,8 @@ enum ocfs2_mount_options
writes */
OCFS2_MOUNT_HB_NONE = 1 << 13, /* No heartbeat */
OCFS2_MOUNT_HB_GLOBAL = 1 << 14, /* Global heartbeat */
+
+ OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT = 1 << 15, /* Journal Async Commit */
};
#define OCFS2_OSB_SOFT_RO 0x0001
diff --git a/fs/ocfs2/quota.h b/fs/ocfs2/quota.h
index 1eae330..b6d5133 100644
--- a/fs/ocfs2/quota.h
+++ b/fs/ocfs2/quota.h
@@ -48,6 +48,7 @@ struct ocfs2_quota_recovery {
/* In-memory structure with quota header information */
struct ocfs2_mem_dqinfo {
unsigned int dqi_type; /* Quota type this structure describes */
+ unsigned int dqi_flags; /* Flags OLQF_* */
unsigned int dqi_chunks; /* Number of chunks in local quota file */
unsigned int dqi_blocks; /* Number of blocks allocated for local quota file */
unsigned int dqi_syncms; /* How often should we sync with other nodes */
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index 10b6539..3d0b63d 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -73,12 +73,6 @@ static loff_t ol_dqblk_off(struct super_block *sb, int c, int off)
ol_dqblk_block_off(sb, c, off);
}
-/* Compute block number from given offset */
-static inline unsigned int ol_dqblk_file_block(struct super_block *sb, loff_t off)
-{
- return off >> sb->s_blocksize_bits;
-}
-
static inline unsigned int ol_dqblk_block_offset(struct super_block *sb, loff_t off)
{
return off & ((1 << sb->s_blocksize_bits) - 1);
@@ -292,7 +286,7 @@ static void olq_update_info(struct buffer_head *bh, void *private)
ldinfo = (struct ocfs2_local_disk_dqinfo *)(bh->b_data +
OCFS2_LOCAL_INFO_OFF);
spin_lock(&dq_data_lock);
- ldinfo->dqi_flags = cpu_to_le32(info->dqi_flags & DQF_MASK);
+ ldinfo->dqi_flags = cpu_to_le32(oinfo->dqi_flags);
ldinfo->dqi_chunks = cpu_to_le32(oinfo->dqi_chunks);
ldinfo->dqi_blocks = cpu_to_le32(oinfo->dqi_blocks);
spin_unlock(&dq_data_lock);
@@ -701,8 +695,8 @@ static int ocfs2_local_read_info(struct super_block *sb, int type)
/* We don't need the lock and we have to acquire quota file locks
* which will later depend on this lock */
mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
- info->dqi_maxblimit = 0x7fffffffffffffffLL;
- info->dqi_maxilimit = 0x7fffffffffffffffLL;
+ info->dqi_max_spc_limit = 0x7fffffffffffffffLL;
+ info->dqi_max_ino_limit = 0x7fffffffffffffffLL;
oinfo = kmalloc(sizeof(struct ocfs2_mem_dqinfo), GFP_NOFS);
if (!oinfo) {
mlog(ML_ERROR, "failed to allocate memory for ocfs2 quota"
@@ -737,13 +731,13 @@ static int ocfs2_local_read_info(struct super_block *sb, int type)
}
ldinfo = (struct ocfs2_local_disk_dqinfo *)(bh->b_data +
OCFS2_LOCAL_INFO_OFF);
- info->dqi_flags = le32_to_cpu(ldinfo->dqi_flags);
+ oinfo->dqi_flags = le32_to_cpu(ldinfo->dqi_flags);
oinfo->dqi_chunks = le32_to_cpu(ldinfo->dqi_chunks);
oinfo->dqi_blocks = le32_to_cpu(ldinfo->dqi_blocks);
oinfo->dqi_libh = bh;
/* We crashed when using local quota file? */
- if (!(info->dqi_flags & OLQF_CLEAN)) {
+ if (!(oinfo->dqi_flags & OLQF_CLEAN)) {
rec = OCFS2_SB(sb)->quota_rec;
if (!rec) {
rec = ocfs2_alloc_quota_recovery();
@@ -772,7 +766,7 @@ static int ocfs2_local_read_info(struct super_block *sb, int type)
}
/* Now mark quota file as used */
- info->dqi_flags &= ~OLQF_CLEAN;
+ oinfo->dqi_flags &= ~OLQF_CLEAN;
status = ocfs2_modify_bh(lqinode, bh, olq_update_info, info);
if (status < 0) {
mlog_errno(status);
@@ -857,7 +851,7 @@ static int ocfs2_local_free_info(struct super_block *sb, int type)
goto out;
/* Mark local file as clean */
- info->dqi_flags |= OLQF_CLEAN;
+ oinfo->dqi_flags |= OLQF_CLEAN;
status = ocfs2_modify_bh(sb_dqopt(sb)->files[type],
oinfo->dqi_libh,
olq_update_info,
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index d81f6e2..ee541f9 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -2428,8 +2428,6 @@ static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
get_bh(prev_bh);
}
- rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
-
trace_ocfs2_calc_refcount_meta_credits_iterate(
recs_add, (unsigned long long)cpos, clusters,
(unsigned long long)le64_to_cpu(rec.r_cpos),
diff --git a/fs/ocfs2/reservations.c b/fs/ocfs2/reservations.c
index 41ffd36..6a348b0 100644
--- a/fs/ocfs2/reservations.c
+++ b/fs/ocfs2/reservations.c
@@ -39,7 +39,7 @@
#define OCFS2_CHECK_RESERVATIONS
#endif
-DEFINE_SPINLOCK(resv_lock);
+static DEFINE_SPINLOCK(resv_lock);
#define OCFS2_MIN_RESV_WINDOW_BITS 8
#define OCFS2_MAX_RESV_WINDOW_BITS 1024
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 8372317..87a1f76 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -191,6 +191,7 @@ enum {
Opt_coherency_full,
Opt_resv_level,
Opt_dir_resv_level,
+ Opt_journal_async_commit,
Opt_err,
};
@@ -222,6 +223,7 @@ static const match_table_t tokens = {
{Opt_coherency_full, "coherency=full"},
{Opt_resv_level, "resv_level=%u"},
{Opt_dir_resv_level, "dir_resv_level=%u"},
+ {Opt_journal_async_commit, "journal_async_commit"},
{Opt_err, NULL}
};
@@ -1000,36 +1002,6 @@ static void ocfs2_disable_quotas(struct ocfs2_super *osb)
}
}
-/* Handle quota on quotactl */
-static int ocfs2_quota_on(struct super_block *sb, int type, int format_id)
-{
- unsigned int feature[OCFS2_MAXQUOTAS] = {
- OCFS2_FEATURE_RO_COMPAT_USRQUOTA,
- OCFS2_FEATURE_RO_COMPAT_GRPQUOTA};
-
- if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, feature[type]))
- return -EINVAL;
-
- return dquot_enable(sb_dqopt(sb)->files[type], type,
- format_id, DQUOT_LIMITS_ENABLED);
-}
-
-/* Handle quota off quotactl */
-static int ocfs2_quota_off(struct super_block *sb, int type)
-{
- return dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
-}
-
-static const struct quotactl_ops ocfs2_quotactl_ops = {
- .quota_on_meta = ocfs2_quota_on,
- .quota_off = ocfs2_quota_off,
- .quota_sync = dquot_quota_sync,
- .get_info = dquot_get_dqinfo,
- .set_info = dquot_set_dqinfo,
- .get_dqblk = dquot_get_dqblk,
- .set_dqblk = dquot_set_dqblk,
-};
-
static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
{
struct dentry *root;
@@ -1500,6 +1472,9 @@ static int ocfs2_parse_options(struct super_block *sb,
option < OCFS2_MAX_RESV_LEVEL)
mopt->dir_resv_level = option;
break;
+ case Opt_journal_async_commit:
+ mopt->mount_opt |= OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT;
+ break;
default:
mlog(ML_ERROR,
"Unrecognized mount option \"%s\" "
@@ -1606,6 +1581,9 @@ static int ocfs2_show_options(struct seq_file *s, struct dentry *root)
if (osb->osb_dir_resv_level != osb->osb_resv_level)
seq_printf(s, ",dir_resv_level=%d", osb->osb_resv_level);
+ if (opts & OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT)
+ seq_printf(s, ",journal_async_commit");
+
return 0;
}
@@ -2079,7 +2057,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
sb->s_op = &ocfs2_sops;
sb->s_d_op = &ocfs2_dentry_ops;
sb->s_export_op = &ocfs2_export_ops;
- sb->s_qcop = &ocfs2_quotactl_ops;
+ sb->s_qcop = &dquot_quotactl_sysfile_ops;
sb->dq_op = &ocfs2_quota_operations;
sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
sb->s_xattr = ocfs2_xattr_handlers;
@@ -2475,6 +2453,15 @@ static int ocfs2_check_volume(struct ocfs2_super *osb)
goto finally;
}
+ if (osb->s_mount_opt & OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT)
+ jbd2_journal_set_features(osb->journal->j_journal,
+ JBD2_FEATURE_COMPAT_CHECKSUM, 0,
+ JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
+ else
+ jbd2_journal_clear_features(osb->journal->j_journal,
+ JBD2_FEATURE_COMPAT_CHECKSUM, 0,
+ JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
+
if (dirty) {
/* recover my local alloc if we didn't unmount cleanly. */
status = ocfs2_begin_local_alloc_recovery(osb,
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 662f8de..85b190d 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -5334,16 +5334,6 @@ out:
return ret;
}
-static inline char *ocfs2_xattr_bucket_get_val(struct inode *inode,
- struct ocfs2_xattr_bucket *bucket,
- int offs)
-{
- int block_off = offs >> inode->i_sb->s_blocksize_bits;
-
- offs = offs % inode->i_sb->s_blocksize;
- return bucket_block(bucket, block_off) + offs;
-}
-
/*
* Truncate the specified xe_off entry in xattr bucket.
* bucket is indicated by header_bh and len is the new length.
diff --git a/fs/proc/page.c b/fs/proc/page.c
index 1e3187d..7eee2d8 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -5,6 +5,7 @@
#include <linux/ksm.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
+#include <linux/huge_mm.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/hugetlb.h>
@@ -121,9 +122,18 @@ u64 stable_page_flags(struct page *page)
* just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon
* to make sure a given page is a thp, not a non-huge compound page.
*/
- else if (PageTransCompound(page) && (PageLRU(compound_head(page)) ||
- PageAnon(compound_head(page))))
- u |= 1 << KPF_THP;
+ else if (PageTransCompound(page)) {
+ struct page *head = compound_head(page);
+
+ if (PageLRU(head) || PageAnon(head))
+ u |= 1 << KPF_THP;
+ else if (is_huge_zero_page(head)) {
+ u |= 1 << KPF_ZERO_PAGE;
+ u |= 1 << KPF_THP;
+ }
+ } else if (is_zero_pfn(page_to_pfn(page)))
+ u |= 1 << KPF_ZERO_PAGE;
+
/*
* Caveats on high order pages: page->_count will only be set
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 246eae8..0e36c1e 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -21,7 +21,7 @@
void task_mem(struct seq_file *m, struct mm_struct *mm)
{
- unsigned long data, text, lib, swap;
+ unsigned long data, text, lib, swap, ptes, pmds;
unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
/*
@@ -42,6 +42,8 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
swap = get_mm_counter(mm, MM_SWAPENTS);
+ ptes = PTRS_PER_PTE * sizeof(pte_t) * atomic_long_read(&mm->nr_ptes);
+ pmds = PTRS_PER_PMD * sizeof(pmd_t) * mm_nr_pmds(mm);
seq_printf(m,
"VmPeak:\t%8lu kB\n"
"VmSize:\t%8lu kB\n"
@@ -54,6 +56,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
"VmExe:\t%8lu kB\n"
"VmLib:\t%8lu kB\n"
"VmPTE:\t%8lu kB\n"
+ "VmPMD:\t%8lu kB\n"
"VmSwap:\t%8lu kB\n",
hiwater_vm << (PAGE_SHIFT-10),
total_vm << (PAGE_SHIFT-10),
@@ -63,8 +66,8 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
total_rss << (PAGE_SHIFT-10),
data << (PAGE_SHIFT-10),
mm->stack_vm << (PAGE_SHIFT-10), text, lib,
- (PTRS_PER_PTE * sizeof(pte_t) *
- atomic_long_read(&mm->nr_ptes)) >> 10,
+ ptes >> 10,
+ pmds >> 10,
swap << (PAGE_SHIFT-10));
}
@@ -433,7 +436,6 @@ const struct file_operations proc_tid_maps_operations = {
#ifdef CONFIG_PROC_PAGE_MONITOR
struct mem_size_stats {
- struct vm_area_struct *vma;
unsigned long resident;
unsigned long shared_clean;
unsigned long shared_dirty;
@@ -443,7 +445,6 @@ struct mem_size_stats {
unsigned long anonymous;
unsigned long anonymous_thp;
unsigned long swap;
- unsigned long nonlinear;
u64 pss;
};
@@ -483,8 +484,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
struct mm_walk *walk)
{
struct mem_size_stats *mss = walk->private;
- struct vm_area_struct *vma = mss->vma;
- pgoff_t pgoff = linear_page_index(vma, addr);
+ struct vm_area_struct *vma = walk->vma;
struct page *page = NULL;
if (pte_present(*pte)) {
@@ -496,17 +496,10 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
mss->swap += PAGE_SIZE;
else if (is_migration_entry(swpent))
page = migration_entry_to_page(swpent);
- } else if (pte_file(*pte)) {
- if (pte_to_pgoff(*pte) != pgoff)
- mss->nonlinear += PAGE_SIZE;
}
if (!page)
return;
-
- if (page->index != pgoff)
- mss->nonlinear += PAGE_SIZE;
-
smaps_account(mss, page, PAGE_SIZE, pte_young(*pte), pte_dirty(*pte));
}
@@ -515,7 +508,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
struct mm_walk *walk)
{
struct mem_size_stats *mss = walk->private;
- struct vm_area_struct *vma = mss->vma;
+ struct vm_area_struct *vma = walk->vma;
struct page *page;
/* FOLL_DUMP will return -EFAULT on huge zero page */
@@ -536,8 +529,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
- struct mem_size_stats *mss = walk->private;
- struct vm_area_struct *vma = mss->vma;
+ struct vm_area_struct *vma = walk->vma;
pte_t *pte;
spinlock_t *ptl;
@@ -596,7 +588,6 @@ static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
[ilog2(VM_ACCOUNT)] = "ac",
[ilog2(VM_NORESERVE)] = "nr",
[ilog2(VM_HUGETLB)] = "ht",
- [ilog2(VM_NONLINEAR)] = "nl",
[ilog2(VM_ARCH_1)] = "ar",
[ilog2(VM_DONTDUMP)] = "dd",
#ifdef CONFIG_MEM_SOFT_DIRTY
@@ -630,10 +621,8 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
};
memset(&mss, 0, sizeof mss);
- mss.vma = vma;
/* mmap_sem is held in m_start */
- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
+ walk_page_vma(vma, &smaps_walk);
show_map_vma(m, vma, is_pid);
@@ -668,10 +657,6 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
(vma->vm_flags & VM_LOCKED) ?
(unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
- if (vma->vm_flags & VM_NONLINEAR)
- seq_printf(m, "Nonlinear: %8lu kB\n",
- mss.nonlinear >> 10);
-
show_smap_vma_flags(m, vma);
m_cache_vma(m, vma);
return 0;
@@ -751,14 +736,13 @@ enum clear_refs_types {
};
struct clear_refs_private {
- struct vm_area_struct *vma;
enum clear_refs_types type;
};
+#ifdef CONFIG_MEM_SOFT_DIRTY
static inline void clear_soft_dirty(struct vm_area_struct *vma,
unsigned long addr, pte_t *pte)
{
-#ifdef CONFIG_MEM_SOFT_DIRTY
/*
* The soft-dirty tracker uses #PF-s to catch writes
* to pages, so write-protect the pte as well. See the
@@ -772,24 +756,63 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma,
ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY);
} else if (is_swap_pte(ptent)) {
ptent = pte_swp_clear_soft_dirty(ptent);
- } else if (pte_file(ptent)) {
- ptent = pte_file_clear_soft_dirty(ptent);
}
set_pte_at(vma->vm_mm, addr, pte, ptent);
-#endif
}
+static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmdp)
+{
+ pmd_t pmd = *pmdp;
+
+ pmd = pmd_wrprotect(pmd);
+ pmd = pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
+
+ if (vma->vm_flags & VM_SOFTDIRTY)
+ vma->vm_flags &= ~VM_SOFTDIRTY;
+
+ set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
+}
+
+#else
+
+static inline void clear_soft_dirty(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *pte)
+{
+}
+
+static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmdp)
+{
+}
+#endif
+
static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
unsigned long end, struct mm_walk *walk)
{
struct clear_refs_private *cp = walk->private;
- struct vm_area_struct *vma = cp->vma;
+ struct vm_area_struct *vma = walk->vma;
pte_t *pte, ptent;
spinlock_t *ptl;
struct page *page;
- split_huge_page_pmd(vma, addr, pmd);
+ if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
+ if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
+ clear_soft_dirty_pmd(vma, addr, pmd);
+ goto out;
+ }
+
+ page = pmd_page(*pmd);
+
+ /* Clear accessed and referenced bits. */
+ pmdp_test_and_clear_young(vma, addr, pmd);
+ ClearPageReferenced(page);
+out:
+ spin_unlock(ptl);
+ return 0;
+ }
+
if (pmd_trans_unstable(pmd))
return 0;
@@ -818,6 +841,28 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
return 0;
}
+static int clear_refs_test_walk(unsigned long start, unsigned long end,
+ struct mm_walk *walk)
+{
+ struct clear_refs_private *cp = walk->private;
+ struct vm_area_struct *vma = walk->vma;
+
+ if (vma->vm_flags & VM_PFNMAP)
+ return 1;
+
+ /*
+ * Writing 1 to /proc/pid/clear_refs affects all pages.
+ * Writing 2 to /proc/pid/clear_refs only affects anonymous pages.
+ * Writing 3 to /proc/pid/clear_refs only affects file mapped pages.
+ * Writing 4 to /proc/pid/clear_refs affects all pages.
+ */
+ if (cp->type == CLEAR_REFS_ANON && vma->vm_file)
+ return 1;
+ if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file)
+ return 1;
+ return 0;
+}
+
static ssize_t clear_refs_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
@@ -858,6 +903,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
};
struct mm_walk clear_refs_walk = {
.pmd_entry = clear_refs_pte_range,
+ .test_walk = clear_refs_test_walk,
.mm = mm,
.private = &cp,
};
@@ -877,28 +923,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
}
mmu_notifier_invalidate_range_start(mm, 0, -1);
}
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
- cp.vma = vma;
- if (is_vm_hugetlb_page(vma))
- continue;
- /*
- * Writing 1 to /proc/pid/clear_refs affects all pages.
- *
- * Writing 2 to /proc/pid/clear_refs only affects
- * Anonymous pages.
- *
- * Writing 3 to /proc/pid/clear_refs only affects file
- * mapped pages.
- *
- * Writing 4 to /proc/pid/clear_refs affects all pages.
- */
- if (type == CLEAR_REFS_ANON && vma->vm_file)
- continue;
- if (type == CLEAR_REFS_MAPPED && !vma->vm_file)
- continue;
- walk_page_range(vma->vm_start, vma->vm_end,
- &clear_refs_walk);
- }
+ walk_page_range(0, ~0UL, &clear_refs_walk);
if (type == CLEAR_REFS_SOFT_DIRTY)
mmu_notifier_invalidate_range_end(mm, 0, -1);
flush_tlb_mm(mm);
@@ -1066,15 +1091,13 @@ static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemap
static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
- struct vm_area_struct *vma;
+ struct vm_area_struct *vma = walk->vma;
struct pagemapread *pm = walk->private;
spinlock_t *ptl;
- pte_t *pte;
+ pte_t *pte, *orig_pte;
int err = 0;
- /* find the first VMA at or above 'addr' */
- vma = find_vma(walk->mm, addr);
- if (vma && pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
+ if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
int pmd_flags2;
if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(*pmd))
@@ -1100,51 +1123,20 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
if (pmd_trans_unstable(pmd))
return 0;
- while (1) {
- /* End of address space hole, which we mark as non-present. */
- unsigned long hole_end;
-
- if (vma)
- hole_end = min(end, vma->vm_start);
- else
- hole_end = end;
-
- for (; addr < hole_end; addr += PAGE_SIZE) {
- pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
-
- err = add_to_pagemap(addr, &pme, pm);
- if (err)
- return err;
- }
-
- if (!vma || vma->vm_start >= end)
- break;
- /*
- * We can't possibly be in a hugetlb VMA. In general,
- * for a mm_walk with a pmd_entry and a hugetlb_entry,
- * the pmd_entry can only be called on addresses in a
- * hugetlb if the walk starts in a non-hugetlb VMA and
- * spans a hugepage VMA. Since pagemap_read walks are
- * PMD-sized and PMD-aligned, this will never be true.
- */
- BUG_ON(is_vm_hugetlb_page(vma));
-
- /* Addresses in the VMA. */
- for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
- pagemap_entry_t pme;
- pte = pte_offset_map(pmd, addr);
- pte_to_pagemap_entry(&pme, pm, vma, addr, *pte);
- pte_unmap(pte);
- err = add_to_pagemap(addr, &pme, pm);
- if (err)
- return err;
- }
+ /*
+ * We can assume that @vma always points to a valid one and @end never
+ * goes beyond vma->vm_end.
+ */
+ orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+ for (; addr < end; pte++, addr += PAGE_SIZE) {
+ pagemap_entry_t pme;
- if (addr == end)
+ pte_to_pagemap_entry(&pme, pm, vma, addr, *pte);
+ err = add_to_pagemap(addr, &pme, pm);
+ if (err)
break;
-
- vma = find_vma(walk->mm, addr);
}
+ pte_unmap_unlock(orig_pte, ptl);
cond_resched();
@@ -1170,15 +1162,12 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
struct mm_walk *walk)
{
struct pagemapread *pm = walk->private;
- struct vm_area_struct *vma;
+ struct vm_area_struct *vma = walk->vma;
int err = 0;
int flags2;
pagemap_entry_t pme;
- vma = find_vma(walk->mm, addr);
- WARN_ON_ONCE(!vma);
-
- if (vma && (vma->vm_flags & VM_SOFTDIRTY))
+ if (vma->vm_flags & VM_SOFTDIRTY)
flags2 = __PM_SOFT_DIRTY;
else
flags2 = 0;
@@ -1338,7 +1327,6 @@ const struct file_operations proc_pagemap_operations = {
#ifdef CONFIG_NUMA
struct numa_maps {
- struct vm_area_struct *vma;
unsigned long pages;
unsigned long anon;
unsigned long active;
@@ -1407,18 +1395,17 @@ static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
unsigned long end, struct mm_walk *walk)
{
- struct numa_maps *md;
+ struct numa_maps *md = walk->private;
+ struct vm_area_struct *vma = walk->vma;
spinlock_t *ptl;
pte_t *orig_pte;
pte_t *pte;
- md = walk->private;
-
- if (pmd_trans_huge_lock(pmd, md->vma, &ptl) == 1) {
+ if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
pte_t huge_pte = *(pte_t *)pmd;
struct page *page;
- page = can_gather_numa_stats(huge_pte, md->vma, addr);
+ page = can_gather_numa_stats(huge_pte, vma, addr);
if (page)
gather_stats(page, md, pte_dirty(huge_pte),
HPAGE_PMD_SIZE/PAGE_SIZE);
@@ -1430,7 +1417,7 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
return 0;
orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
do {
- struct page *page = can_gather_numa_stats(*pte, md->vma, addr);
+ struct page *page = can_gather_numa_stats(*pte, vma, addr);
if (!page)
continue;
gather_stats(page, md, pte_dirty(*pte), 1);
@@ -1440,7 +1427,7 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
return 0;
}
#ifdef CONFIG_HUGETLB_PAGE
-static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
+static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
unsigned long addr, unsigned long end, struct mm_walk *walk)
{
struct numa_maps *md;
@@ -1459,7 +1446,7 @@ static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
}
#else
-static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
+static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
unsigned long addr, unsigned long end, struct mm_walk *walk)
{
return 0;
@@ -1477,7 +1464,12 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
struct numa_maps *md = &numa_priv->md;
struct file *file = vma->vm_file;
struct mm_struct *mm = vma->vm_mm;
- struct mm_walk walk = {};
+ struct mm_walk walk = {
+ .hugetlb_entry = gather_hugetlb_stats,
+ .pmd_entry = gather_pte_stats,
+ .private = md,
+ .mm = mm,
+ };
struct mempolicy *pol;
char buffer[64];
int nid;
@@ -1488,13 +1480,6 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
/* Ensure we start with an empty set of numa_maps statistics. */
memset(md, 0, sizeof(*md));
- md->vma = vma;
-
- walk.hugetlb_entry = gather_hugetbl_stats;
- walk.pmd_entry = gather_pte_stats;
- walk.private = md;
- walk.mm = mm;
-
pol = __get_vma_policy(vma, vma->vm_start);
if (pol) {
mpol_to_str(buffer, sizeof(buffer), pol);
@@ -1528,7 +1513,8 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
if (is_vm_hugetlb_page(vma))
seq_puts(m, " huge");
- walk_page_range(vma->vm_start, vma->vm_end, &walk);
+ /* mmap_sem is held by m_start */
+ walk_page_vma(vma, &walk);
if (!md->pages)
goto out;
diff --git a/fs/pstore/Kconfig b/fs/pstore/Kconfig
index 983d951..916b8e2 100644
--- a/fs/pstore/Kconfig
+++ b/fs/pstore/Kconfig
@@ -21,6 +21,16 @@ config PSTORE_CONSOLE
When the option is enabled, pstore will log all kernel
messages, even if no oops or panic happened.
+config PSTORE_PMSG
+ bool "Log user space messages"
+ depends on PSTORE
+ help
+ When the option is enabled, pstore will export a character
+ interface /dev/pmsg0 to log user space messages. On reboot
+ data can be retrieved from /sys/fs/pstore/pmsg-ramoops-[ID].
+
+ If unsure, say N.
+
config PSTORE_FTRACE
bool "Persistent function tracer"
depends on PSTORE
diff --git a/fs/pstore/Makefile b/fs/pstore/Makefile
index 4c9095c..e647d8e 100644
--- a/fs/pstore/Makefile
+++ b/fs/pstore/Makefile
@@ -7,5 +7,7 @@ obj-y += pstore.o
pstore-objs += inode.o platform.o
obj-$(CONFIG_PSTORE_FTRACE) += ftrace.o
+obj-$(CONFIG_PSTORE_PMSG) += pmsg.o
+
ramoops-objs += ram.o ram_core.o
obj-$(CONFIG_PSTORE_RAM) += ramoops.o
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index 5041660..b32ce53 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -338,32 +338,38 @@ int pstore_mkfile(enum pstore_type_id type, char *psname, u64 id, int count,
switch (type) {
case PSTORE_TYPE_DMESG:
- sprintf(name, "dmesg-%s-%lld%s", psname, id,
- compressed ? ".enc.z" : "");
+ scnprintf(name, sizeof(name), "dmesg-%s-%lld%s",
+ psname, id, compressed ? ".enc.z" : "");
break;
case PSTORE_TYPE_CONSOLE:
- sprintf(name, "console-%s-%lld", psname, id);
+ scnprintf(name, sizeof(name), "console-%s-%lld", psname, id);
break;
case PSTORE_TYPE_FTRACE:
- sprintf(name, "ftrace-%s-%lld", psname, id);
+ scnprintf(name, sizeof(name), "ftrace-%s-%lld", psname, id);
break;
case PSTORE_TYPE_MCE:
- sprintf(name, "mce-%s-%lld", psname, id);
+ scnprintf(name, sizeof(name), "mce-%s-%lld", psname, id);
break;
case PSTORE_TYPE_PPC_RTAS:
- sprintf(name, "rtas-%s-%lld", psname, id);
+ scnprintf(name, sizeof(name), "rtas-%s-%lld", psname, id);
break;
case PSTORE_TYPE_PPC_OF:
- sprintf(name, "powerpc-ofw-%s-%lld", psname, id);
+ scnprintf(name, sizeof(name), "powerpc-ofw-%s-%lld",
+ psname, id);
break;
case PSTORE_TYPE_PPC_COMMON:
- sprintf(name, "powerpc-common-%s-%lld", psname, id);
+ scnprintf(name, sizeof(name), "powerpc-common-%s-%lld",
+ psname, id);
+ break;
+ case PSTORE_TYPE_PMSG:
+ scnprintf(name, sizeof(name), "pmsg-%s-%lld", psname, id);
break;
case PSTORE_TYPE_UNKNOWN:
- sprintf(name, "unknown-%s-%lld", psname, id);
+ scnprintf(name, sizeof(name), "unknown-%s-%lld", psname, id);
break;
default:
- sprintf(name, "type%d-%s-%lld", type, psname, id);
+ scnprintf(name, sizeof(name), "type%d-%s-%lld",
+ type, psname, id);
break;
}
diff --git a/fs/pstore/internal.h b/fs/pstore/internal.h
index 3b3d305..c36ba2c 100644
--- a/fs/pstore/internal.h
+++ b/fs/pstore/internal.h
@@ -45,6 +45,12 @@ extern void pstore_register_ftrace(void);
static inline void pstore_register_ftrace(void) {}
#endif
+#ifdef CONFIG_PSTORE_PMSG
+extern void pstore_register_pmsg(void);
+#else
+static inline void pstore_register_pmsg(void) {}
+#endif
+
extern struct pstore_info *psinfo;
extern void pstore_set_kmsg_bytes(int);
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 0a9b72c..c4c9a10 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -301,7 +301,7 @@ static void pstore_dump(struct kmsg_dumper *dumper,
if (big_oops_buf) {
dst = big_oops_buf;
- hsize = sprintf(dst, "%s#%d Part%d\n", why,
+ hsize = sprintf(dst, "%s#%d Part%u\n", why,
oopscount, part);
size = big_oops_buf_sz - hsize;
@@ -321,7 +321,7 @@ static void pstore_dump(struct kmsg_dumper *dumper,
}
} else {
dst = psinfo->buf;
- hsize = sprintf(dst, "%s#%d Part%d\n", why, oopscount,
+ hsize = sprintf(dst, "%s#%d Part%u\n", why, oopscount,
part);
size = psinfo->bufsize - hsize;
dst += hsize;
@@ -447,6 +447,7 @@ int pstore_register(struct pstore_info *psi)
if ((psi->flags & PSTORE_FLAGS_FRAGILE) == 0) {
pstore_register_console();
pstore_register_ftrace();
+ pstore_register_pmsg();
}
if (pstore_update_ms >= 0) {
diff --git a/fs/pstore/pmsg.c b/fs/pstore/pmsg.c
new file mode 100644
index 0000000..feb5dd2
--- /dev/null
+++ b/fs/pstore/pmsg.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2014 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include "internal.h"
+
+static DEFINE_MUTEX(pmsg_lock);
+#define PMSG_MAX_BOUNCE_BUFFER_SIZE (2*PAGE_SIZE)
+
+static ssize_t write_pmsg(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ size_t i, buffer_size;
+ char *buffer;
+
+ if (!count)
+ return 0;
+
+ if (!access_ok(VERIFY_READ, buf, count))
+ return -EFAULT;
+
+ buffer_size = count;
+ if (buffer_size > PMSG_MAX_BOUNCE_BUFFER_SIZE)
+ buffer_size = PMSG_MAX_BOUNCE_BUFFER_SIZE;
+ buffer = vmalloc(buffer_size);
+
+ mutex_lock(&pmsg_lock);
+ for (i = 0; i < count; ) {
+ size_t c = min(count - i, buffer_size);
+ u64 id;
+ long ret;
+
+ ret = __copy_from_user(buffer, buf + i, c);
+ if (unlikely(ret != 0)) {
+ mutex_unlock(&pmsg_lock);
+ vfree(buffer);
+ return -EFAULT;
+ }
+ psinfo->write_buf(PSTORE_TYPE_PMSG, 0, &id, 0, buffer, 0, c,
+ psinfo);
+
+ i += c;
+ }
+
+ mutex_unlock(&pmsg_lock);
+ vfree(buffer);
+ return count;
+}
+
+static const struct file_operations pmsg_fops = {
+ .owner = THIS_MODULE,
+ .llseek = noop_llseek,
+ .write = write_pmsg,
+};
+
+static struct class *pmsg_class;
+static int pmsg_major;
+#define PMSG_NAME "pmsg"
+#undef pr_fmt
+#define pr_fmt(fmt) PMSG_NAME ": " fmt
+
+static char *pmsg_devnode(struct device *dev, umode_t *mode)
+{
+ if (mode)
+ *mode = 0220;
+ return NULL;
+}
+
+void pstore_register_pmsg(void)
+{
+ struct device *pmsg_device;
+
+ pmsg_major = register_chrdev(0, PMSG_NAME, &pmsg_fops);
+ if (pmsg_major < 0) {
+ pr_err("register_chrdev failed\n");
+ goto err;
+ }
+
+ pmsg_class = class_create(THIS_MODULE, PMSG_NAME);
+ if (IS_ERR(pmsg_class)) {
+ pr_err("device class file already in use\n");
+ goto err_class;
+ }
+ pmsg_class->devnode = pmsg_devnode;
+
+ pmsg_device = device_create(pmsg_class, NULL, MKDEV(pmsg_major, 0),
+ NULL, "%s%d", PMSG_NAME, 0);
+ if (IS_ERR(pmsg_device)) {
+ pr_err("failed to create device\n");
+ goto err_device;
+ }
+ return;
+
+err_device:
+ class_destroy(pmsg_class);
+err_class:
+ unregister_chrdev(pmsg_major, PMSG_NAME);
+err:
+ return;
+}
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index 8613e5b..39d1373 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -51,6 +51,10 @@ static ulong ramoops_ftrace_size = MIN_MEM_SIZE;
module_param_named(ftrace_size, ramoops_ftrace_size, ulong, 0400);
MODULE_PARM_DESC(ftrace_size, "size of ftrace log");
+static ulong ramoops_pmsg_size = MIN_MEM_SIZE;
+module_param_named(pmsg_size, ramoops_pmsg_size, ulong, 0400);
+MODULE_PARM_DESC(pmsg_size, "size of user space message log");
+
static ulong mem_address;
module_param(mem_address, ulong, 0400);
MODULE_PARM_DESC(mem_address,
@@ -82,12 +86,14 @@ struct ramoops_context {
struct persistent_ram_zone **przs;
struct persistent_ram_zone *cprz;
struct persistent_ram_zone *fprz;
+ struct persistent_ram_zone *mprz;
phys_addr_t phys_addr;
unsigned long size;
unsigned int memtype;
size_t record_size;
size_t console_size;
size_t ftrace_size;
+ size_t pmsg_size;
int dump_oops;
struct persistent_ram_ecc_info ecc_info;
unsigned int max_dump_cnt;
@@ -96,6 +102,7 @@ struct ramoops_context {
unsigned int dump_read_cnt;
unsigned int console_read_cnt;
unsigned int ftrace_read_cnt;
+ unsigned int pmsg_read_cnt;
struct pstore_info pstore;
};
@@ -109,6 +116,7 @@ static int ramoops_pstore_open(struct pstore_info *psi)
cxt->dump_read_cnt = 0;
cxt->console_read_cnt = 0;
cxt->ftrace_read_cnt = 0;
+ cxt->pmsg_read_cnt = 0;
return 0;
}
@@ -164,6 +172,12 @@ static int ramoops_read_kmsg_hdr(char *buffer, struct timespec *time,
return header_length;
}
+static bool prz_ok(struct persistent_ram_zone *prz)
+{
+ return !!prz && !!(persistent_ram_old_size(prz) +
+ persistent_ram_ecc_string(prz, NULL, 0));
+}
+
static ssize_t ramoops_pstore_read(u64 *id, enum pstore_type_id *type,
int *count, struct timespec *time,
char **buf, bool *compressed,
@@ -178,13 +192,16 @@ static ssize_t ramoops_pstore_read(u64 *id, enum pstore_type_id *type,
prz = ramoops_get_next_prz(cxt->przs, &cxt->dump_read_cnt,
cxt->max_dump_cnt, id, type,
PSTORE_TYPE_DMESG, 1);
- if (!prz)
+ if (!prz_ok(prz))
prz = ramoops_get_next_prz(&cxt->cprz, &cxt->console_read_cnt,
1, id, type, PSTORE_TYPE_CONSOLE, 0);
- if (!prz)
+ if (!prz_ok(prz))
prz = ramoops_get_next_prz(&cxt->fprz, &cxt->ftrace_read_cnt,
1, id, type, PSTORE_TYPE_FTRACE, 0);
- if (!prz)
+ if (!prz_ok(prz))
+ prz = ramoops_get_next_prz(&cxt->mprz, &cxt->pmsg_read_cnt,
+ 1, id, type, PSTORE_TYPE_PMSG, 0);
+ if (!prz_ok(prz))
return 0;
if (!persistent_ram_old(prz))
@@ -252,6 +269,11 @@ static int notrace ramoops_pstore_write_buf(enum pstore_type_id type,
return -ENOMEM;
persistent_ram_write(cxt->fprz, buf, size);
return 0;
+ } else if (type == PSTORE_TYPE_PMSG) {
+ if (!cxt->mprz)
+ return -ENOMEM;
+ persistent_ram_write(cxt->mprz, buf, size);
+ return 0;
}
if (type != PSTORE_TYPE_DMESG)
@@ -309,6 +331,9 @@ static int ramoops_pstore_erase(enum pstore_type_id type, u64 id, int count,
case PSTORE_TYPE_FTRACE:
prz = cxt->fprz;
break;
+ case PSTORE_TYPE_PMSG:
+ prz = cxt->mprz;
+ break;
default:
return -EINVAL;
}
@@ -435,7 +460,7 @@ static int ramoops_probe(struct platform_device *pdev)
goto fail_out;
if (!pdata->mem_size || (!pdata->record_size && !pdata->console_size &&
- !pdata->ftrace_size)) {
+ !pdata->ftrace_size && !pdata->pmsg_size)) {
pr_err("The memory size and the record/console size must be "
"non-zero\n");
goto fail_out;
@@ -447,6 +472,8 @@ static int ramoops_probe(struct platform_device *pdev)
pdata->console_size = rounddown_pow_of_two(pdata->console_size);
if (pdata->ftrace_size && !is_power_of_2(pdata->ftrace_size))
pdata->ftrace_size = rounddown_pow_of_two(pdata->ftrace_size);
+ if (pdata->pmsg_size && !is_power_of_2(pdata->pmsg_size))
+ pdata->pmsg_size = rounddown_pow_of_two(pdata->pmsg_size);
cxt->size = pdata->mem_size;
cxt->phys_addr = pdata->mem_address;
@@ -454,12 +481,14 @@ static int ramoops_probe(struct platform_device *pdev)
cxt->record_size = pdata->record_size;
cxt->console_size = pdata->console_size;
cxt->ftrace_size = pdata->ftrace_size;
+ cxt->pmsg_size = pdata->pmsg_size;
cxt->dump_oops = pdata->dump_oops;
cxt->ecc_info = pdata->ecc_info;
paddr = cxt->phys_addr;
- dump_mem_sz = cxt->size - cxt->console_size - cxt->ftrace_size;
+ dump_mem_sz = cxt->size - cxt->console_size - cxt->ftrace_size
+ - cxt->pmsg_size;
err = ramoops_init_przs(dev, cxt, &paddr, dump_mem_sz);
if (err)
goto fail_out;
@@ -474,13 +503,9 @@ static int ramoops_probe(struct platform_device *pdev)
if (err)
goto fail_init_fprz;
- if (!cxt->przs && !cxt->cprz && !cxt->fprz) {
- pr_err("memory size too small, minimum is %zu\n",
- cxt->console_size + cxt->record_size +
- cxt->ftrace_size);
- err = -EINVAL;
- goto fail_cnt;
- }
+ err = ramoops_init_prz(dev, cxt, &cxt->mprz, &paddr, cxt->pmsg_size, 0);
+ if (err)
+ goto fail_init_mprz;
cxt->pstore.data = cxt;
/*
@@ -525,7 +550,8 @@ fail_buf:
kfree(cxt->pstore.buf);
fail_clear:
cxt->pstore.bufsize = 0;
-fail_cnt:
+ kfree(cxt->mprz);
+fail_init_mprz:
kfree(cxt->fprz);
fail_init_fprz:
kfree(cxt->cprz);
@@ -583,6 +609,7 @@ static void ramoops_register_dummy(void)
dummy_data->record_size = record_size;
dummy_data->console_size = ramoops_console_size;
dummy_data->ftrace_size = ramoops_ftrace_size;
+ dummy_data->pmsg_size = ramoops_pmsg_size;
dummy_data->dump_oops = dump_oops;
/*
* For backwards compatibility ramoops.ecc=1 means 16 bytes ECC
diff --git a/fs/quota/Kconfig b/fs/quota/Kconfig
index c51df1d..4a09975 100644
--- a/fs/quota/Kconfig
+++ b/fs/quota/Kconfig
@@ -5,6 +5,7 @@
config QUOTA
bool "Quota support"
select QUOTACTL
+ select SRCU
help
If you say Y here, you will be able to set per user limits for disk
usage (also called disk quotas). Currently, it works for the
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 8f0acef..0ccd4ba 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -1248,7 +1248,7 @@ static int ignore_hardlimit(struct dquot *dquot)
return capable(CAP_SYS_RESOURCE) &&
(info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
- !(info->dqi_flags & V1_DQF_RSQUASH));
+ !(info->dqi_flags & DQF_ROOT_SQUASH));
}
/* needs dq_data_lock */
@@ -2385,41 +2385,106 @@ out:
}
EXPORT_SYMBOL(dquot_quota_on_mount);
-static inline qsize_t qbtos(qsize_t blocks)
+static int dquot_quota_enable(struct super_block *sb, unsigned int flags)
{
- return blocks << QIF_DQBLKSIZE_BITS;
+ int ret;
+ int type;
+ struct quota_info *dqopt = sb_dqopt(sb);
+
+ if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE))
+ return -ENOSYS;
+ /* Accounting cannot be turned on while fs is mounted */
+ flags &= ~(FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT);
+ if (!flags)
+ return -EINVAL;
+ for (type = 0; type < MAXQUOTAS; type++) {
+ if (!(flags & qtype_enforce_flag(type)))
+ continue;
+ /* Can't enforce without accounting */
+ if (!sb_has_quota_usage_enabled(sb, type))
+ return -EINVAL;
+ ret = dquot_enable(dqopt->files[type], type,
+ dqopt->info[type].dqi_fmt_id,
+ DQUOT_LIMITS_ENABLED);
+ if (ret < 0)
+ goto out_err;
+ }
+ return 0;
+out_err:
+ /* Backout enforcement enablement we already did */
+ for (type--; type >= 0; type--) {
+ if (flags & qtype_enforce_flag(type))
+ dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
+ }
+ /* Error code translation for better compatibility with XFS */
+ if (ret == -EBUSY)
+ ret = -EEXIST;
+ return ret;
}
-static inline qsize_t stoqb(qsize_t space)
+static int dquot_quota_disable(struct super_block *sb, unsigned int flags)
{
- return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS;
+ int ret;
+ int type;
+ struct quota_info *dqopt = sb_dqopt(sb);
+
+ if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE))
+ return -ENOSYS;
+ /*
+ * We don't support turning off accounting via quotactl. In principle
+ * quota infrastructure can do this but filesystems don't expect
+ * userspace to be able to do it.
+ */
+ if (flags &
+ (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT))
+ return -EOPNOTSUPP;
+
+ /* Filter out limits not enabled */
+ for (type = 0; type < MAXQUOTAS; type++)
+ if (!sb_has_quota_limits_enabled(sb, type))
+ flags &= ~qtype_enforce_flag(type);
+ /* Nothing left? */
+ if (!flags)
+ return -EEXIST;
+ for (type = 0; type < MAXQUOTAS; type++) {
+ if (flags & qtype_enforce_flag(type)) {
+ ret = dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
+ if (ret < 0)
+ goto out_err;
+ }
+ }
+ return 0;
+out_err:
+ /* Backout enforcement disabling we already did */
+ for (type--; type >= 0; type--) {
+ if (flags & qtype_enforce_flag(type))
+ dquot_enable(dqopt->files[type], type,
+ dqopt->info[type].dqi_fmt_id,
+ DQUOT_LIMITS_ENABLED);
+ }
+ return ret;
}
/* Generic routine for getting common part of quota structure */
-static void do_get_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
+static void do_get_dqblk(struct dquot *dquot, struct qc_dqblk *di)
{
struct mem_dqblk *dm = &dquot->dq_dqb;
memset(di, 0, sizeof(*di));
- di->d_version = FS_DQUOT_VERSION;
- di->d_flags = dquot->dq_id.type == USRQUOTA ?
- FS_USER_QUOTA : FS_GROUP_QUOTA;
- di->d_id = from_kqid_munged(current_user_ns(), dquot->dq_id);
-
spin_lock(&dq_data_lock);
- di->d_blk_hardlimit = stoqb(dm->dqb_bhardlimit);
- di->d_blk_softlimit = stoqb(dm->dqb_bsoftlimit);
+ di->d_spc_hardlimit = dm->dqb_bhardlimit;
+ di->d_spc_softlimit = dm->dqb_bsoftlimit;
di->d_ino_hardlimit = dm->dqb_ihardlimit;
di->d_ino_softlimit = dm->dqb_isoftlimit;
- di->d_bcount = dm->dqb_curspace + dm->dqb_rsvspace;
- di->d_icount = dm->dqb_curinodes;
- di->d_btimer = dm->dqb_btime;
- di->d_itimer = dm->dqb_itime;
+ di->d_space = dm->dqb_curspace + dm->dqb_rsvspace;
+ di->d_ino_count = dm->dqb_curinodes;
+ di->d_spc_timer = dm->dqb_btime;
+ di->d_ino_timer = dm->dqb_itime;
spin_unlock(&dq_data_lock);
}
int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
- struct fs_disk_quota *di)
+ struct qc_dqblk *di)
{
struct dquot *dquot;
@@ -2433,70 +2498,70 @@ int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
}
EXPORT_SYMBOL(dquot_get_dqblk);
-#define VFS_FS_DQ_MASK \
- (FS_DQ_BCOUNT | FS_DQ_BSOFT | FS_DQ_BHARD | \
- FS_DQ_ICOUNT | FS_DQ_ISOFT | FS_DQ_IHARD | \
- FS_DQ_BTIMER | FS_DQ_ITIMER)
+#define VFS_QC_MASK \
+ (QC_SPACE | QC_SPC_SOFT | QC_SPC_HARD | \
+ QC_INO_COUNT | QC_INO_SOFT | QC_INO_HARD | \
+ QC_SPC_TIMER | QC_INO_TIMER)
/* Generic routine for setting common part of quota structure */
-static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
+static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di)
{
struct mem_dqblk *dm = &dquot->dq_dqb;
int check_blim = 0, check_ilim = 0;
struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
- if (di->d_fieldmask & ~VFS_FS_DQ_MASK)
+ if (di->d_fieldmask & ~VFS_QC_MASK)
return -EINVAL;
- if (((di->d_fieldmask & FS_DQ_BSOFT) &&
- (di->d_blk_softlimit > dqi->dqi_maxblimit)) ||
- ((di->d_fieldmask & FS_DQ_BHARD) &&
- (di->d_blk_hardlimit > dqi->dqi_maxblimit)) ||
- ((di->d_fieldmask & FS_DQ_ISOFT) &&
- (di->d_ino_softlimit > dqi->dqi_maxilimit)) ||
- ((di->d_fieldmask & FS_DQ_IHARD) &&
- (di->d_ino_hardlimit > dqi->dqi_maxilimit)))
+ if (((di->d_fieldmask & QC_SPC_SOFT) &&
+ di->d_spc_softlimit > dqi->dqi_max_spc_limit) ||
+ ((di->d_fieldmask & QC_SPC_HARD) &&
+ di->d_spc_hardlimit > dqi->dqi_max_spc_limit) ||
+ ((di->d_fieldmask & QC_INO_SOFT) &&
+ (di->d_ino_softlimit > dqi->dqi_max_ino_limit)) ||
+ ((di->d_fieldmask & QC_INO_HARD) &&
+ (di->d_ino_hardlimit > dqi->dqi_max_ino_limit)))
return -ERANGE;
spin_lock(&dq_data_lock);
- if (di->d_fieldmask & FS_DQ_BCOUNT) {
- dm->dqb_curspace = di->d_bcount - dm->dqb_rsvspace;
+ if (di->d_fieldmask & QC_SPACE) {
+ dm->dqb_curspace = di->d_space - dm->dqb_rsvspace;
check_blim = 1;
set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
}
- if (di->d_fieldmask & FS_DQ_BSOFT)
- dm->dqb_bsoftlimit = qbtos(di->d_blk_softlimit);
- if (di->d_fieldmask & FS_DQ_BHARD)
- dm->dqb_bhardlimit = qbtos(di->d_blk_hardlimit);
- if (di->d_fieldmask & (FS_DQ_BSOFT | FS_DQ_BHARD)) {
+ if (di->d_fieldmask & QC_SPC_SOFT)
+ dm->dqb_bsoftlimit = di->d_spc_softlimit;
+ if (di->d_fieldmask & QC_SPC_HARD)
+ dm->dqb_bhardlimit = di->d_spc_hardlimit;
+ if (di->d_fieldmask & (QC_SPC_SOFT | QC_SPC_HARD)) {
check_blim = 1;
set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
}
- if (di->d_fieldmask & FS_DQ_ICOUNT) {
- dm->dqb_curinodes = di->d_icount;
+ if (di->d_fieldmask & QC_INO_COUNT) {
+ dm->dqb_curinodes = di->d_ino_count;
check_ilim = 1;
set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
}
- if (di->d_fieldmask & FS_DQ_ISOFT)
+ if (di->d_fieldmask & QC_INO_SOFT)
dm->dqb_isoftlimit = di->d_ino_softlimit;
- if (di->d_fieldmask & FS_DQ_IHARD)
+ if (di->d_fieldmask & QC_INO_HARD)
dm->dqb_ihardlimit = di->d_ino_hardlimit;
- if (di->d_fieldmask & (FS_DQ_ISOFT | FS_DQ_IHARD)) {
+ if (di->d_fieldmask & (QC_INO_SOFT | QC_INO_HARD)) {
check_ilim = 1;
set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
}
- if (di->d_fieldmask & FS_DQ_BTIMER) {
- dm->dqb_btime = di->d_btimer;
+ if (di->d_fieldmask & QC_SPC_TIMER) {
+ dm->dqb_btime = di->d_spc_timer;
check_blim = 1;
set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
}
- if (di->d_fieldmask & FS_DQ_ITIMER) {
- dm->dqb_itime = di->d_itimer;
+ if (di->d_fieldmask & QC_INO_TIMER) {
+ dm->dqb_itime = di->d_ino_timer;
check_ilim = 1;
set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
}
@@ -2506,7 +2571,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
dm->dqb_curspace < dm->dqb_bsoftlimit) {
dm->dqb_btime = 0;
clear_bit(DQ_BLKS_B, &dquot->dq_flags);
- } else if (!(di->d_fieldmask & FS_DQ_BTIMER))
+ } else if (!(di->d_fieldmask & QC_SPC_TIMER))
/* Set grace only if user hasn't provided his own... */
dm->dqb_btime = get_seconds() + dqi->dqi_bgrace;
}
@@ -2515,7 +2580,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
dm->dqb_curinodes < dm->dqb_isoftlimit) {
dm->dqb_itime = 0;
clear_bit(DQ_INODES_B, &dquot->dq_flags);
- } else if (!(di->d_fieldmask & FS_DQ_ITIMER))
+ } else if (!(di->d_fieldmask & QC_INO_TIMER))
/* Set grace only if user hasn't provided his own... */
dm->dqb_itime = get_seconds() + dqi->dqi_igrace;
}
@@ -2531,7 +2596,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
}
int dquot_set_dqblk(struct super_block *sb, struct kqid qid,
- struct fs_disk_quota *di)
+ struct qc_dqblk *di)
{
struct dquot *dquot;
int rc;
@@ -2582,6 +2647,14 @@ int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
goto out;
}
mi = sb_dqopt(sb)->info + type;
+ if (ii->dqi_valid & IIF_FLAGS) {
+ if (ii->dqi_flags & ~DQF_SETINFO_MASK ||
+ (ii->dqi_flags & DQF_ROOT_SQUASH &&
+ mi->dqi_format->qf_fmt_id != QFMT_VFS_OLD)) {
+ err = -EINVAL;
+ goto out;
+ }
+ }
spin_lock(&dq_data_lock);
if (ii->dqi_valid & IIF_BGRACE)
mi->dqi_bgrace = ii->dqi_bgrace;
@@ -2611,6 +2684,17 @@ const struct quotactl_ops dquot_quotactl_ops = {
};
EXPORT_SYMBOL(dquot_quotactl_ops);
+const struct quotactl_ops dquot_quotactl_sysfile_ops = {
+ .quota_enable = dquot_quota_enable,
+ .quota_disable = dquot_quota_disable,
+ .quota_sync = dquot_quota_sync,
+ .get_info = dquot_get_dqinfo,
+ .set_info = dquot_set_dqinfo,
+ .get_dqblk = dquot_get_dqblk,
+ .set_dqblk = dquot_set_dqblk
+};
+EXPORT_SYMBOL(dquot_quotactl_sysfile_ops);
+
static int do_proc_dqstats(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index 2aa4151..d14a799 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -66,18 +66,40 @@ static int quota_sync_all(int type)
return ret;
}
+unsigned int qtype_enforce_flag(int type)
+{
+ switch (type) {
+ case USRQUOTA:
+ return FS_QUOTA_UDQ_ENFD;
+ case GRPQUOTA:
+ return FS_QUOTA_GDQ_ENFD;
+ case PRJQUOTA:
+ return FS_QUOTA_PDQ_ENFD;
+ }
+ return 0;
+}
+
static int quota_quotaon(struct super_block *sb, int type, int cmd, qid_t id,
struct path *path)
{
- if (!sb->s_qcop->quota_on && !sb->s_qcop->quota_on_meta)
+ if (!sb->s_qcop->quota_on && !sb->s_qcop->quota_enable)
return -ENOSYS;
- if (sb->s_qcop->quota_on_meta)
- return sb->s_qcop->quota_on_meta(sb, type, id);
+ if (sb->s_qcop->quota_enable)
+ return sb->s_qcop->quota_enable(sb, qtype_enforce_flag(type));
if (IS_ERR(path))
return PTR_ERR(path);
return sb->s_qcop->quota_on(sb, type, id, path);
}
+static int quota_quotaoff(struct super_block *sb, int type)
+{
+ if (!sb->s_qcop->quota_off && !sb->s_qcop->quota_disable)
+ return -ENOSYS;
+ if (sb->s_qcop->quota_disable)
+ return sb->s_qcop->quota_disable(sb, qtype_enforce_flag(type));
+ return sb->s_qcop->quota_off(sb, type);
+}
+
static int quota_getfmt(struct super_block *sb, int type, void __user *addr)
{
__u32 fmt;
@@ -118,17 +140,27 @@ static int quota_setinfo(struct super_block *sb, int type, void __user *addr)
return sb->s_qcop->set_info(sb, type, &info);
}
-static void copy_to_if_dqblk(struct if_dqblk *dst, struct fs_disk_quota *src)
+static inline qsize_t qbtos(qsize_t blocks)
+{
+ return blocks << QIF_DQBLKSIZE_BITS;
+}
+
+static inline qsize_t stoqb(qsize_t space)
+{
+ return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS;
+}
+
+static void copy_to_if_dqblk(struct if_dqblk *dst, struct qc_dqblk *src)
{
memset(dst, 0, sizeof(*dst));
- dst->dqb_bhardlimit = src->d_blk_hardlimit;
- dst->dqb_bsoftlimit = src->d_blk_softlimit;
- dst->dqb_curspace = src->d_bcount;
+ dst->dqb_bhardlimit = stoqb(src->d_spc_hardlimit);
+ dst->dqb_bsoftlimit = stoqb(src->d_spc_softlimit);
+ dst->dqb_curspace = src->d_space;
dst->dqb_ihardlimit = src->d_ino_hardlimit;
dst->dqb_isoftlimit = src->d_ino_softlimit;
- dst->dqb_curinodes = src->d_icount;
- dst->dqb_btime = src->d_btimer;
- dst->dqb_itime = src->d_itimer;
+ dst->dqb_curinodes = src->d_ino_count;
+ dst->dqb_btime = src->d_spc_timer;
+ dst->dqb_itime = src->d_ino_timer;
dst->dqb_valid = QIF_ALL;
}
@@ -136,7 +168,7 @@ static int quota_getquota(struct super_block *sb, int type, qid_t id,
void __user *addr)
{
struct kqid qid;
- struct fs_disk_quota fdq;
+ struct qc_dqblk fdq;
struct if_dqblk idq;
int ret;
@@ -154,36 +186,36 @@ static int quota_getquota(struct super_block *sb, int type, qid_t id,
return 0;
}
-static void copy_from_if_dqblk(struct fs_disk_quota *dst, struct if_dqblk *src)
+static void copy_from_if_dqblk(struct qc_dqblk *dst, struct if_dqblk *src)
{
- dst->d_blk_hardlimit = src->dqb_bhardlimit;
- dst->d_blk_softlimit = src->dqb_bsoftlimit;
- dst->d_bcount = src->dqb_curspace;
+ dst->d_spc_hardlimit = qbtos(src->dqb_bhardlimit);
+ dst->d_spc_softlimit = qbtos(src->dqb_bsoftlimit);
+ dst->d_space = src->dqb_curspace;
dst->d_ino_hardlimit = src->dqb_ihardlimit;
dst->d_ino_softlimit = src->dqb_isoftlimit;
- dst->d_icount = src->dqb_curinodes;
- dst->d_btimer = src->dqb_btime;
- dst->d_itimer = src->dqb_itime;
+ dst->d_ino_count = src->dqb_curinodes;
+ dst->d_spc_timer = src->dqb_btime;
+ dst->d_ino_timer = src->dqb_itime;
dst->d_fieldmask = 0;
if (src->dqb_valid & QIF_BLIMITS)
- dst->d_fieldmask |= FS_DQ_BSOFT | FS_DQ_BHARD;
+ dst->d_fieldmask |= QC_SPC_SOFT | QC_SPC_HARD;
if (src->dqb_valid & QIF_SPACE)
- dst->d_fieldmask |= FS_DQ_BCOUNT;
+ dst->d_fieldmask |= QC_SPACE;
if (src->dqb_valid & QIF_ILIMITS)
- dst->d_fieldmask |= FS_DQ_ISOFT | FS_DQ_IHARD;
+ dst->d_fieldmask |= QC_INO_SOFT | QC_INO_HARD;
if (src->dqb_valid & QIF_INODES)
- dst->d_fieldmask |= FS_DQ_ICOUNT;
+ dst->d_fieldmask |= QC_INO_COUNT;
if (src->dqb_valid & QIF_BTIME)
- dst->d_fieldmask |= FS_DQ_BTIMER;
+ dst->d_fieldmask |= QC_SPC_TIMER;
if (src->dqb_valid & QIF_ITIME)
- dst->d_fieldmask |= FS_DQ_ITIMER;
+ dst->d_fieldmask |= QC_INO_TIMER;
}
static int quota_setquota(struct super_block *sb, int type, qid_t id,
void __user *addr)
{
- struct fs_disk_quota fdq;
+ struct qc_dqblk fdq;
struct if_dqblk idq;
struct kqid qid;
@@ -198,15 +230,26 @@ static int quota_setquota(struct super_block *sb, int type, qid_t id,
return sb->s_qcop->set_dqblk(sb, qid, &fdq);
}
-static int quota_setxstate(struct super_block *sb, int cmd, void __user *addr)
+static int quota_enable(struct super_block *sb, void __user *addr)
+{
+ __u32 flags;
+
+ if (copy_from_user(&flags, addr, sizeof(flags)))
+ return -EFAULT;
+ if (!sb->s_qcop->quota_enable)
+ return -ENOSYS;
+ return sb->s_qcop->quota_enable(sb, flags);
+}
+
+static int quota_disable(struct super_block *sb, void __user *addr)
{
__u32 flags;
if (copy_from_user(&flags, addr, sizeof(flags)))
return -EFAULT;
- if (!sb->s_qcop->set_xstate)
+ if (!sb->s_qcop->quota_disable)
return -ENOSYS;
- return sb->s_qcop->set_xstate(sb, flags, cmd);
+ return sb->s_qcop->quota_disable(sb, flags);
}
static int quota_getxstate(struct super_block *sb, void __user *addr)
@@ -247,10 +290,78 @@ static int quota_getxstatev(struct super_block *sb, void __user *addr)
return ret;
}
+/*
+ * XFS defines BBTOB and BTOBB macros inside fs/xfs/ and we cannot move them
+ * out of there as xfsprogs rely on definitions being in that header file. So
+ * just define same functions here for quota purposes.
+ */
+#define XFS_BB_SHIFT 9
+
+static inline u64 quota_bbtob(u64 blocks)
+{
+ return blocks << XFS_BB_SHIFT;
+}
+
+static inline u64 quota_btobb(u64 bytes)
+{
+ return (bytes + (1 << XFS_BB_SHIFT) - 1) >> XFS_BB_SHIFT;
+}
+
+static void copy_from_xfs_dqblk(struct qc_dqblk *dst, struct fs_disk_quota *src)
+{
+ dst->d_spc_hardlimit = quota_bbtob(src->d_blk_hardlimit);
+ dst->d_spc_softlimit = quota_bbtob(src->d_blk_softlimit);
+ dst->d_ino_hardlimit = src->d_ino_hardlimit;
+ dst->d_ino_softlimit = src->d_ino_softlimit;
+ dst->d_space = quota_bbtob(src->d_bcount);
+ dst->d_ino_count = src->d_icount;
+ dst->d_ino_timer = src->d_itimer;
+ dst->d_spc_timer = src->d_btimer;
+ dst->d_ino_warns = src->d_iwarns;
+ dst->d_spc_warns = src->d_bwarns;
+ dst->d_rt_spc_hardlimit = quota_bbtob(src->d_rtb_hardlimit);
+ dst->d_rt_spc_softlimit = quota_bbtob(src->d_rtb_softlimit);
+ dst->d_rt_space = quota_bbtob(src->d_rtbcount);
+ dst->d_rt_spc_timer = src->d_rtbtimer;
+ dst->d_rt_spc_warns = src->d_rtbwarns;
+ dst->d_fieldmask = 0;
+ if (src->d_fieldmask & FS_DQ_ISOFT)
+ dst->d_fieldmask |= QC_INO_SOFT;
+ if (src->d_fieldmask & FS_DQ_IHARD)
+ dst->d_fieldmask |= QC_INO_HARD;
+ if (src->d_fieldmask & FS_DQ_BSOFT)
+ dst->d_fieldmask |= QC_SPC_SOFT;
+ if (src->d_fieldmask & FS_DQ_BHARD)
+ dst->d_fieldmask |= QC_SPC_HARD;
+ if (src->d_fieldmask & FS_DQ_RTBSOFT)
+ dst->d_fieldmask |= QC_RT_SPC_SOFT;
+ if (src->d_fieldmask & FS_DQ_RTBHARD)
+ dst->d_fieldmask |= QC_RT_SPC_HARD;
+ if (src->d_fieldmask & FS_DQ_BTIMER)
+ dst->d_fieldmask |= QC_SPC_TIMER;
+ if (src->d_fieldmask & FS_DQ_ITIMER)
+ dst->d_fieldmask |= QC_INO_TIMER;
+ if (src->d_fieldmask & FS_DQ_RTBTIMER)
+ dst->d_fieldmask |= QC_RT_SPC_TIMER;
+ if (src->d_fieldmask & FS_DQ_BWARNS)
+ dst->d_fieldmask |= QC_SPC_WARNS;
+ if (src->d_fieldmask & FS_DQ_IWARNS)
+ dst->d_fieldmask |= QC_INO_WARNS;
+ if (src->d_fieldmask & FS_DQ_RTBWARNS)
+ dst->d_fieldmask |= QC_RT_SPC_WARNS;
+ if (src->d_fieldmask & FS_DQ_BCOUNT)
+ dst->d_fieldmask |= QC_SPACE;
+ if (src->d_fieldmask & FS_DQ_ICOUNT)
+ dst->d_fieldmask |= QC_INO_COUNT;
+ if (src->d_fieldmask & FS_DQ_RTBCOUNT)
+ dst->d_fieldmask |= QC_RT_SPACE;
+}
+
static int quota_setxquota(struct super_block *sb, int type, qid_t id,
void __user *addr)
{
struct fs_disk_quota fdq;
+ struct qc_dqblk qdq;
struct kqid qid;
if (copy_from_user(&fdq, addr, sizeof(fdq)))
@@ -260,13 +371,44 @@ static int quota_setxquota(struct super_block *sb, int type, qid_t id,
qid = make_kqid(current_user_ns(), type, id);
if (!qid_valid(qid))
return -EINVAL;
- return sb->s_qcop->set_dqblk(sb, qid, &fdq);
+ copy_from_xfs_dqblk(&qdq, &fdq);
+ return sb->s_qcop->set_dqblk(sb, qid, &qdq);
+}
+
+static void copy_to_xfs_dqblk(struct fs_disk_quota *dst, struct qc_dqblk *src,
+ int type, qid_t id)
+{
+ memset(dst, 0, sizeof(*dst));
+ dst->d_version = FS_DQUOT_VERSION;
+ dst->d_id = id;
+ if (type == USRQUOTA)
+ dst->d_flags = FS_USER_QUOTA;
+ else if (type == PRJQUOTA)
+ dst->d_flags = FS_PROJ_QUOTA;
+ else
+ dst->d_flags = FS_GROUP_QUOTA;
+ dst->d_blk_hardlimit = quota_btobb(src->d_spc_hardlimit);
+ dst->d_blk_softlimit = quota_btobb(src->d_spc_softlimit);
+ dst->d_ino_hardlimit = src->d_ino_hardlimit;
+ dst->d_ino_softlimit = src->d_ino_softlimit;
+ dst->d_bcount = quota_btobb(src->d_space);
+ dst->d_icount = src->d_ino_count;
+ dst->d_itimer = src->d_ino_timer;
+ dst->d_btimer = src->d_spc_timer;
+ dst->d_iwarns = src->d_ino_warns;
+ dst->d_bwarns = src->d_spc_warns;
+ dst->d_rtb_hardlimit = quota_btobb(src->d_rt_spc_hardlimit);
+ dst->d_rtb_softlimit = quota_btobb(src->d_rt_spc_softlimit);
+ dst->d_rtbcount = quota_btobb(src->d_rt_space);
+ dst->d_rtbtimer = src->d_rt_spc_timer;
+ dst->d_rtbwarns = src->d_rt_spc_warns;
}
static int quota_getxquota(struct super_block *sb, int type, qid_t id,
void __user *addr)
{
struct fs_disk_quota fdq;
+ struct qc_dqblk qdq;
struct kqid qid;
int ret;
@@ -275,8 +417,11 @@ static int quota_getxquota(struct super_block *sb, int type, qid_t id,
qid = make_kqid(current_user_ns(), type, id);
if (!qid_valid(qid))
return -EINVAL;
- ret = sb->s_qcop->get_dqblk(sb, qid, &fdq);
- if (!ret && copy_to_user(addr, &fdq, sizeof(fdq)))
+ ret = sb->s_qcop->get_dqblk(sb, qid, &qdq);
+ if (ret)
+ return ret;
+ copy_to_xfs_dqblk(&fdq, &qdq, type, id);
+ if (copy_to_user(addr, &fdq, sizeof(fdq)))
return -EFAULT;
return ret;
}
@@ -317,9 +462,7 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
case Q_QUOTAON:
return quota_quotaon(sb, type, cmd, id, path);
case Q_QUOTAOFF:
- if (!sb->s_qcop->quota_off)
- return -ENOSYS;
- return sb->s_qcop->quota_off(sb, type);
+ return quota_quotaoff(sb, type);
case Q_GETFMT:
return quota_getfmt(sb, type, addr);
case Q_GETINFO:
@@ -335,8 +478,9 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
return -ENOSYS;
return sb->s_qcop->quota_sync(sb, type);
case Q_XQUOTAON:
+ return quota_enable(sb, addr);
case Q_XQUOTAOFF:
- return quota_setxstate(sb, cmd, addr);
+ return quota_disable(sb, addr);
case Q_XQUOTARM:
return quota_rmxquota(sb, addr);
case Q_XGETQSTAT:
diff --git a/fs/quota/quota_v1.c b/fs/quota/quota_v1.c
index 469c684..8fe79be 100644
--- a/fs/quota/quota_v1.c
+++ b/fs/quota/quota_v1.c
@@ -169,8 +169,8 @@ static int v1_read_file_info(struct super_block *sb, int type)
}
ret = 0;
/* limits are stored as unsigned 32-bit data */
- dqopt->info[type].dqi_maxblimit = 0xffffffff;
- dqopt->info[type].dqi_maxilimit = 0xffffffff;
+ dqopt->info[type].dqi_max_spc_limit = 0xffffffffULL << QUOTABLOCK_BITS;
+ dqopt->info[type].dqi_max_ino_limit = 0xffffffff;
dqopt->info[type].dqi_igrace =
dqblk.dqb_itime ? dqblk.dqb_itime : MAX_IQ_TIME;
dqopt->info[type].dqi_bgrace =
diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c
index 02751ec..9cb10d7 100644
--- a/fs/quota/quota_v2.c
+++ b/fs/quota/quota_v2.c
@@ -117,16 +117,17 @@ static int v2_read_file_info(struct super_block *sb, int type)
qinfo = info->dqi_priv;
if (version == 0) {
/* limits are stored as unsigned 32-bit data */
- info->dqi_maxblimit = 0xffffffff;
- info->dqi_maxilimit = 0xffffffff;
+ info->dqi_max_spc_limit = 0xffffffffULL << QUOTABLOCK_BITS;
+ info->dqi_max_ino_limit = 0xffffffff;
} else {
- /* used space is stored as unsigned 64-bit value */
- info->dqi_maxblimit = 0xffffffffffffffffULL; /* 2^64-1 */
- info->dqi_maxilimit = 0xffffffffffffffffULL;
+ /* used space is stored as unsigned 64-bit value in bytes */
+ info->dqi_max_spc_limit = 0xffffffffffffffffULL; /* 2^64-1 */
+ info->dqi_max_ino_limit = 0xffffffffffffffffULL;
}
info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace);
info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace);
- info->dqi_flags = le32_to_cpu(dinfo.dqi_flags);
+ /* No flags currently supported */
+ info->dqi_flags = 0;
qinfo->dqi_sb = sb;
qinfo->dqi_type = type;
qinfo->dqi_blocks = le32_to_cpu(dinfo.dqi_blocks);
@@ -157,7 +158,8 @@ static int v2_write_file_info(struct super_block *sb, int type)
info->dqi_flags &= ~DQF_INFO_DIRTY;
dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace);
dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace);
- dinfo.dqi_flags = cpu_to_le32(info->dqi_flags & DQF_MASK);
+ /* No flags currently supported */
+ dinfo.dqi_flags = cpu_to_le32(0);
spin_unlock(&dq_data_lock);
dinfo.dqi_blocks = cpu_to_le32(qinfo->dqi_blocks);
dinfo.dqi_free_blk = cpu_to_le32(qinfo->dqi_free_blk);
diff --git a/fs/read_write.c b/fs/read_write.c
index c0805c9..4060691 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -358,7 +358,7 @@ int rw_verify_area(int read_write, struct file *file, const loff_t *ppos, size_t
return retval;
}
- if (unlikely(inode->i_flock && mandatory_lock(inode))) {
+ if (unlikely(inode->i_flctx && mandatory_lock(inode))) {
retval = locks_mandatory_area(
read_write == READ ? FLOCK_VERIFY_READ : FLOCK_VERIFY_WRITE,
inode, file, pos, count);
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 538519e..035e510 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -1536,7 +1536,6 @@ static const struct vm_operations_struct ubifs_file_vm_ops = {
.fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = ubifs_vm_page_mkwrite,
- .remap_pages = generic_file_remap_pages,
};
static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma)
diff --git a/fs/udf/Kconfig b/fs/udf/Kconfig
index 0e0e99b..c6e17a7 100644
--- a/fs/udf/Kconfig
+++ b/fs/udf/Kconfig
@@ -2,10 +2,12 @@ config UDF_FS
tristate "UDF file system support"
select CRC_ITU_T
help
- This is the new file system used on some CD-ROMs and DVDs. Say Y if
- you intend to mount DVD discs or CDRW's written in packet mode, or
- if written to by other UDF utilities, such as DirectCD.
- Please read <file:Documentation/filesystems/udf.txt>.
+ This is a file system used on some CD-ROMs and DVDs. Since the
+ file system is supported by multiple operating systems and is more
+ compatible with standard unix file systems, it is also suitable for
+ removable USB disks. Say Y if you intend to mount DVD discs or CDRW's
+ written in packet mode, or if you want to use UDF for removable USB
+ disks. Please read <file:Documentation/filesystems/udf.txt>.
To compile this file system support as a module, choose M here: the
module will be called udf.
diff --git a/fs/udf/file.c b/fs/udf/file.c
index bb15771..08f3555 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -224,7 +224,7 @@ out:
static int udf_release_file(struct inode *inode, struct file *filp)
{
if (filp->f_mode & FMODE_WRITE &&
- atomic_read(&inode->i_writecount) > 1) {
+ atomic_read(&inode->i_writecount) == 1) {
/*
* Grab i_mutex to avoid races with writes changing i_size
* while we are running.
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 5bc71d9..a445d59 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -750,7 +750,7 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
/* Are we beyond EOF? */
if (etype == -1) {
int ret;
- isBeyondEOF = 1;
+ isBeyondEOF = true;
if (count) {
if (c)
laarr[0] = laarr[1];
@@ -792,7 +792,7 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
endnum = c + 1;
lastblock = 1;
} else {
- isBeyondEOF = 0;
+ isBeyondEOF = false;
endnum = startnum = ((count > 2) ? 2 : count);
/* if the current extent is in position 0,
@@ -1288,6 +1288,7 @@ static int udf_read_inode(struct inode *inode, bool hidden_inode)
struct kernel_lb_addr *iloc = &iinfo->i_location;
unsigned int link_count;
unsigned int indirections = 0;
+ int bs = inode->i_sb->s_blocksize;
int ret = -EIO;
reread:
@@ -1374,38 +1375,35 @@ reread:
if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) {
iinfo->i_efe = 1;
iinfo->i_use = 0;
- ret = udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
+ ret = udf_alloc_i_data(inode, bs -
sizeof(struct extendedFileEntry));
if (ret)
goto out;
memcpy(iinfo->i_ext.i_data,
bh->b_data + sizeof(struct extendedFileEntry),
- inode->i_sb->s_blocksize -
- sizeof(struct extendedFileEntry));
+ bs - sizeof(struct extendedFileEntry));
} else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) {
iinfo->i_efe = 0;
iinfo->i_use = 0;
- ret = udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
- sizeof(struct fileEntry));
+ ret = udf_alloc_i_data(inode, bs - sizeof(struct fileEntry));
if (ret)
goto out;
memcpy(iinfo->i_ext.i_data,
bh->b_data + sizeof(struct fileEntry),
- inode->i_sb->s_blocksize - sizeof(struct fileEntry));
+ bs - sizeof(struct fileEntry));
} else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_USE)) {
iinfo->i_efe = 0;
iinfo->i_use = 1;
iinfo->i_lenAlloc = le32_to_cpu(
((struct unallocSpaceEntry *)bh->b_data)->
lengthAllocDescs);
- ret = udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
+ ret = udf_alloc_i_data(inode, bs -
sizeof(struct unallocSpaceEntry));
if (ret)
goto out;
memcpy(iinfo->i_ext.i_data,
bh->b_data + sizeof(struct unallocSpaceEntry),
- inode->i_sb->s_blocksize -
- sizeof(struct unallocSpaceEntry));
+ bs - sizeof(struct unallocSpaceEntry));
return 0;
}
@@ -1489,6 +1487,15 @@ reread:
}
inode->i_generation = iinfo->i_unique;
+ /*
+ * Sanity check length of allocation descriptors and extended attrs to
+ * avoid integer overflows
+ */
+ if (iinfo->i_lenEAttr > bs || iinfo->i_lenAlloc > bs)
+ goto out;
+ /* Now do exact checks */
+ if (udf_file_entry_alloc_offset(inode) + iinfo->i_lenAlloc > bs)
+ goto out;
/* Sanity checks for files in ICB so that we don't get confused later */
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
/*
@@ -1498,8 +1505,7 @@ reread:
if (iinfo->i_lenAlloc != inode->i_size)
goto out;
/* File in ICB has to fit in there... */
- if (inode->i_size > inode->i_sb->s_blocksize -
- udf_file_entry_alloc_offset(inode))
+ if (inode->i_size > bs - udf_file_entry_alloc_offset(inode))
goto out;
}
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 3ccb2f1..f169411 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -1599,7 +1599,7 @@ static noinline int udf_process_sequence(
struct udf_vds_record *curr;
struct generic_desc *gd;
struct volDescPtr *vdp;
- int done = 0;
+ bool done = false;
uint32_t vdsn;
uint16_t ident;
long next_s = 0, next_e = 0;
@@ -1680,7 +1680,7 @@ static noinline int udf_process_sequence(
lastblock = next_e;
next_s = next_e = 0;
} else
- done = 1;
+ done = true;
break;
}
brelse(bh);
@@ -2300,6 +2300,7 @@ static void udf_put_super(struct super_block *sb)
udf_close_lvid(sb);
brelse(sbi->s_lvid_bh);
udf_sb_free_partitions(sb);
+ mutex_destroy(&sbi->s_alloc_mutex);
kfree(sb->s_fs_info);
sb->s_fs_info = NULL;
}
diff --git a/fs/xfs/kmem.c b/fs/xfs/kmem.c
index 53e95b2..a7a3a63 100644
--- a/fs/xfs/kmem.c
+++ b/fs/xfs/kmem.c
@@ -91,16 +91,6 @@ kmem_zalloc_large(size_t size, xfs_km_flags_t flags)
return ptr;
}
-void
-kmem_free(const void *ptr)
-{
- if (!is_vmalloc_addr(ptr)) {
- kfree(ptr);
- } else {
- vfree(ptr);
- }
-}
-
void *
kmem_realloc(const void *ptr, size_t newsize, size_t oldsize,
xfs_km_flags_t flags)
diff --git a/fs/xfs/kmem.h b/fs/xfs/kmem.h
index 64db0e5..cc6b768 100644
--- a/fs/xfs/kmem.h
+++ b/fs/xfs/kmem.h
@@ -63,7 +63,10 @@ kmem_flags_convert(xfs_km_flags_t flags)
extern void *kmem_alloc(size_t, xfs_km_flags_t);
extern void *kmem_zalloc_large(size_t size, xfs_km_flags_t);
extern void *kmem_realloc(const void *, size_t, size_t, xfs_km_flags_t);
-extern void kmem_free(const void *);
+static inline void kmem_free(const void *ptr)
+{
+ kvfree(ptr);
+}
extern void *kmem_zalloc_greedy(size_t *, size_t, size_t);
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index 5d38e8b..15105db 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -403,7 +403,7 @@ xfs_sbversion_add_attr2(xfs_mount_t *mp, xfs_trans_t *tp)
if (!xfs_sb_version_hasattr2(&mp->m_sb)) {
xfs_sb_version_addattr2(&mp->m_sb);
spin_unlock(&mp->m_sb_lock);
- xfs_mod_sb(tp, XFS_SB_VERSIONNUM | XFS_SB_FEATURES2);
+ xfs_log_sb(tp);
} else
spin_unlock(&mp->m_sb_lock);
}
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index b5eb474..61ec015 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -973,7 +973,11 @@ xfs_bmap_local_to_extents(
*firstblock = args.fsbno;
bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
- /* initialise the block and copy the data */
+ /*
+ * Initialise the block and copy the data
+ *
+ * Note: init_fn must set the buffer log item type correctly!
+ */
init_fn(tp, bp, ip, ifp);
/* account for the change in fork size and log everything */
@@ -1221,22 +1225,20 @@ xfs_bmap_add_attrfork(
goto bmap_cancel;
if (!xfs_sb_version_hasattr(&mp->m_sb) ||
(!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) {
- __int64_t sbfields = 0;
+ bool log_sb = false;
spin_lock(&mp->m_sb_lock);
if (!xfs_sb_version_hasattr(&mp->m_sb)) {
xfs_sb_version_addattr(&mp->m_sb);
- sbfields |= XFS_SB_VERSIONNUM;
+ log_sb = true;
}
if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) {
xfs_sb_version_addattr2(&mp->m_sb);
- sbfields |= (XFS_SB_VERSIONNUM | XFS_SB_FEATURES2);
+ log_sb = true;
}
- if (sbfields) {
- spin_unlock(&mp->m_sb_lock);
- xfs_mod_sb(tp, sbfields);
- } else
- spin_unlock(&mp->m_sb_lock);
+ spin_unlock(&mp->m_sb_lock);
+ if (log_sb)
+ xfs_log_sb(tp);
}
error = xfs_bmap_finish(&tp, &flist, &committed);
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index 44db6db..b9d8a49 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -28,6 +28,37 @@ struct xfs_trans;
extern kmem_zone_t *xfs_bmap_free_item_zone;
/*
+ * Argument structure for xfs_bmap_alloc.
+ */
+struct xfs_bmalloca {
+ xfs_fsblock_t *firstblock; /* i/o first block allocated */
+ struct xfs_bmap_free *flist; /* bmap freelist */
+ struct xfs_trans *tp; /* transaction pointer */
+ struct xfs_inode *ip; /* incore inode pointer */
+ struct xfs_bmbt_irec prev; /* extent before the new one */
+ struct xfs_bmbt_irec got; /* extent after, or delayed */
+
+ xfs_fileoff_t offset; /* offset in file filling in */
+ xfs_extlen_t length; /* i/o length asked/allocated */
+ xfs_fsblock_t blkno; /* starting block of new extent */
+
+ struct xfs_btree_cur *cur; /* btree cursor */
+ xfs_extnum_t idx; /* current extent index */
+ int nallocs;/* number of extents alloc'd */
+ int logflags;/* flags for transaction logging */
+
+ xfs_extlen_t total; /* total blocks needed for xaction */
+ xfs_extlen_t minlen; /* minimum allocation size (blocks) */
+ xfs_extlen_t minleft; /* amount must be left after alloc */
+ bool eof; /* set if allocating past last extent */
+ bool wasdel; /* replacing a delayed allocation */
+ bool userdata;/* set if is user data */
+ bool aeof; /* allocated space at eof */
+ bool conv; /* overwriting unwritten extents */
+ int flags;
+};
+
+/*
* List of extents to be free "later".
* The list is kept sorted on xbf_startblock.
*/
@@ -149,6 +180,8 @@ void xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
void xfs_bmap_add_free(xfs_fsblock_t bno, xfs_filblks_t len,
struct xfs_bmap_free *flist, struct xfs_mount *mp);
void xfs_bmap_cancel(struct xfs_bmap_free *flist);
+int xfs_bmap_finish(struct xfs_trans **tp, struct xfs_bmap_free *flist,
+ int *committed);
void xfs_bmap_compute_maxlevels(struct xfs_mount *mp, int whichfork);
int xfs_bmap_first_unused(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_extlen_t len, xfs_fileoff_t *unused, int whichfork);
diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
index fbd6da2..8eb7189 100644
--- a/fs/xfs/libxfs/xfs_format.h
+++ b/fs/xfs/libxfs/xfs_format.h
@@ -151,10 +151,13 @@ typedef struct xfs_sb {
__uint32_t sb_features2; /* additional feature bits */
/*
- * bad features2 field as a result of failing to pad the sb
- * structure to 64 bits. Some machines will be using this field
- * for features2 bits. Easiest just to mark it bad and not use
- * it for anything else.
+ * bad features2 field as a result of failing to pad the sb structure to
+ * 64 bits. Some machines will be using this field for features2 bits.
+ * Easiest just to mark it bad and not use it for anything else.
+ *
+ * This is not kept up to date in memory; it is always overwritten by
+ * the value in sb_features2 when formatting the incore superblock to
+ * the disk buffer.
*/
__uint32_t sb_bad_features2;
@@ -304,8 +307,8 @@ typedef enum {
#define XFS_SB_ICOUNT XFS_SB_MVAL(ICOUNT)
#define XFS_SB_IFREE XFS_SB_MVAL(IFREE)
#define XFS_SB_FDBLOCKS XFS_SB_MVAL(FDBLOCKS)
-#define XFS_SB_FEATURES2 XFS_SB_MVAL(FEATURES2)
-#define XFS_SB_BAD_FEATURES2 XFS_SB_MVAL(BAD_FEATURES2)
+#define XFS_SB_FEATURES2 (XFS_SB_MVAL(FEATURES2) | \
+ XFS_SB_MVAL(BAD_FEATURES2))
#define XFS_SB_FEATURES_COMPAT XFS_SB_MVAL(FEATURES_COMPAT)
#define XFS_SB_FEATURES_RO_COMPAT XFS_SB_MVAL(FEATURES_RO_COMPAT)
#define XFS_SB_FEATURES_INCOMPAT XFS_SB_MVAL(FEATURES_INCOMPAT)
@@ -319,9 +322,9 @@ typedef enum {
XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | XFS_SB_GQUOTINO | \
XFS_SB_QFLAGS | XFS_SB_SHARED_VN | XFS_SB_UNIT | XFS_SB_WIDTH | \
XFS_SB_ICOUNT | XFS_SB_IFREE | XFS_SB_FDBLOCKS | XFS_SB_FEATURES2 | \
- XFS_SB_BAD_FEATURES2 | XFS_SB_FEATURES_COMPAT | \
- XFS_SB_FEATURES_RO_COMPAT | XFS_SB_FEATURES_INCOMPAT | \
- XFS_SB_FEATURES_LOG_INCOMPAT | XFS_SB_PQUOTINO)
+ XFS_SB_FEATURES_COMPAT | XFS_SB_FEATURES_RO_COMPAT | \
+ XFS_SB_FEATURES_INCOMPAT | XFS_SB_FEATURES_LOG_INCOMPAT | \
+ XFS_SB_PQUOTINO)
/*
@@ -453,13 +456,11 @@ static inline void xfs_sb_version_addattr2(struct xfs_sb *sbp)
{
sbp->sb_versionnum |= XFS_SB_VERSION_MOREBITSBIT;
sbp->sb_features2 |= XFS_SB_VERSION2_ATTR2BIT;
- sbp->sb_bad_features2 |= XFS_SB_VERSION2_ATTR2BIT;
}
static inline void xfs_sb_version_removeattr2(struct xfs_sb *sbp)
{
sbp->sb_features2 &= ~XFS_SB_VERSION2_ATTR2BIT;
- sbp->sb_bad_features2 &= ~XFS_SB_VERSION2_ATTR2BIT;
if (!sbp->sb_features2)
sbp->sb_versionnum &= ~XFS_SB_VERSION_MOREBITSBIT;
}
@@ -475,7 +476,6 @@ static inline void xfs_sb_version_addprojid32bit(struct xfs_sb *sbp)
{
sbp->sb_versionnum |= XFS_SB_VERSION_MOREBITSBIT;
sbp->sb_features2 |= XFS_SB_VERSION2_PROJID32BIT;
- sbp->sb_bad_features2 |= XFS_SB_VERSION2_PROJID32BIT;
}
/*
diff --git a/fs/xfs/xfs_fs.h b/fs/xfs/libxfs/xfs_fs.h
index 18dc721..18dc721 100644
--- a/fs/xfs/xfs_fs.h
+++ b/fs/xfs/libxfs/xfs_fs.h
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index 752915f..b0a5fe9 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -40,69 +40,6 @@
* Physical superblock buffer manipulations. Shared with libxfs in userspace.
*/
-static const struct {
- short offset;
- short type; /* 0 = integer
- * 1 = binary / string (no translation)
- */
-} xfs_sb_info[] = {
- { offsetof(xfs_sb_t, sb_magicnum), 0 },
- { offsetof(xfs_sb_t, sb_blocksize), 0 },
- { offsetof(xfs_sb_t, sb_dblocks), 0 },
- { offsetof(xfs_sb_t, sb_rblocks), 0 },
- { offsetof(xfs_sb_t, sb_rextents), 0 },
- { offsetof(xfs_sb_t, sb_uuid), 1 },
- { offsetof(xfs_sb_t, sb_logstart), 0 },
- { offsetof(xfs_sb_t, sb_rootino), 0 },
- { offsetof(xfs_sb_t, sb_rbmino), 0 },
- { offsetof(xfs_sb_t, sb_rsumino), 0 },
- { offsetof(xfs_sb_t, sb_rextsize), 0 },
- { offsetof(xfs_sb_t, sb_agblocks), 0 },
- { offsetof(xfs_sb_t, sb_agcount), 0 },
- { offsetof(xfs_sb_t, sb_rbmblocks), 0 },
- { offsetof(xfs_sb_t, sb_logblocks), 0 },
- { offsetof(xfs_sb_t, sb_versionnum), 0 },
- { offsetof(xfs_sb_t, sb_sectsize), 0 },
- { offsetof(xfs_sb_t, sb_inodesize), 0 },
- { offsetof(xfs_sb_t, sb_inopblock), 0 },
- { offsetof(xfs_sb_t, sb_fname[0]), 1 },
- { offsetof(xfs_sb_t, sb_blocklog), 0 },
- { offsetof(xfs_sb_t, sb_sectlog), 0 },
- { offsetof(xfs_sb_t, sb_inodelog), 0 },
- { offsetof(xfs_sb_t, sb_inopblog), 0 },
- { offsetof(xfs_sb_t, sb_agblklog), 0 },
- { offsetof(xfs_sb_t, sb_rextslog), 0 },
- { offsetof(xfs_sb_t, sb_inprogress), 0 },
- { offsetof(xfs_sb_t, sb_imax_pct), 0 },
- { offsetof(xfs_sb_t, sb_icount), 0 },
- { offsetof(xfs_sb_t, sb_ifree), 0 },
- { offsetof(xfs_sb_t, sb_fdblocks), 0 },
- { offsetof(xfs_sb_t, sb_frextents), 0 },
- { offsetof(xfs_sb_t, sb_uquotino), 0 },
- { offsetof(xfs_sb_t, sb_gquotino), 0 },
- { offsetof(xfs_sb_t, sb_qflags), 0 },
- { offsetof(xfs_sb_t, sb_flags), 0 },
- { offsetof(xfs_sb_t, sb_shared_vn), 0 },
- { offsetof(xfs_sb_t, sb_inoalignmt), 0 },
- { offsetof(xfs_sb_t, sb_unit), 0 },
- { offsetof(xfs_sb_t, sb_width), 0 },
- { offsetof(xfs_sb_t, sb_dirblklog), 0 },
- { offsetof(xfs_sb_t, sb_logsectlog), 0 },
- { offsetof(xfs_sb_t, sb_logsectsize), 0 },
- { offsetof(xfs_sb_t, sb_logsunit), 0 },
- { offsetof(xfs_sb_t, sb_features2), 0 },
- { offsetof(xfs_sb_t, sb_bad_features2), 0 },
- { offsetof(xfs_sb_t, sb_features_compat), 0 },
- { offsetof(xfs_sb_t, sb_features_ro_compat), 0 },
- { offsetof(xfs_sb_t, sb_features_incompat), 0 },
- { offsetof(xfs_sb_t, sb_features_log_incompat), 0 },
- { offsetof(xfs_sb_t, sb_crc), 0 },
- { offsetof(xfs_sb_t, sb_pad), 0 },
- { offsetof(xfs_sb_t, sb_pquotino), 0 },
- { offsetof(xfs_sb_t, sb_lsn), 0 },
- { sizeof(xfs_sb_t), 0 }
-};
-
/*
* Reference counting access wrappers to the perag structures.
* Because we never free per-ag structures, the only thing we
@@ -461,58 +398,49 @@ xfs_sb_from_disk(
__xfs_sb_from_disk(to, from, true);
}
-static inline void
+static void
xfs_sb_quota_to_disk(
- xfs_dsb_t *to,
- xfs_sb_t *from,
- __int64_t *fields)
+ struct xfs_dsb *to,
+ struct xfs_sb *from)
{
__uint16_t qflags = from->sb_qflags;
+ to->sb_uquotino = cpu_to_be64(from->sb_uquotino);
+ if (xfs_sb_version_has_pquotino(from)) {
+ to->sb_qflags = cpu_to_be16(from->sb_qflags);
+ to->sb_gquotino = cpu_to_be64(from->sb_gquotino);
+ to->sb_pquotino = cpu_to_be64(from->sb_pquotino);
+ return;
+ }
+
/*
- * We need to do these manipilations only if we are working
- * with an older version of on-disk superblock.
+ * The in-core version of sb_qflags do not have XFS_OQUOTA_*
+ * flags, whereas the on-disk version does. So, convert incore
+ * XFS_{PG}QUOTA_* flags to on-disk XFS_OQUOTA_* flags.
*/
- if (xfs_sb_version_has_pquotino(from))
- return;
+ qflags &= ~(XFS_PQUOTA_ENFD | XFS_PQUOTA_CHKD |
+ XFS_GQUOTA_ENFD | XFS_GQUOTA_CHKD);
- if (*fields & XFS_SB_QFLAGS) {
- /*
- * The in-core version of sb_qflags do not have
- * XFS_OQUOTA_* flags, whereas the on-disk version
- * does. So, convert incore XFS_{PG}QUOTA_* flags
- * to on-disk XFS_OQUOTA_* flags.
- */
- qflags &= ~(XFS_PQUOTA_ENFD | XFS_PQUOTA_CHKD |
- XFS_GQUOTA_ENFD | XFS_GQUOTA_CHKD);
-
- if (from->sb_qflags &
- (XFS_PQUOTA_ENFD | XFS_GQUOTA_ENFD))
- qflags |= XFS_OQUOTA_ENFD;
- if (from->sb_qflags &
- (XFS_PQUOTA_CHKD | XFS_GQUOTA_CHKD))
- qflags |= XFS_OQUOTA_CHKD;
- to->sb_qflags = cpu_to_be16(qflags);
- *fields &= ~XFS_SB_QFLAGS;
- }
+ if (from->sb_qflags &
+ (XFS_PQUOTA_ENFD | XFS_GQUOTA_ENFD))
+ qflags |= XFS_OQUOTA_ENFD;
+ if (from->sb_qflags &
+ (XFS_PQUOTA_CHKD | XFS_GQUOTA_CHKD))
+ qflags |= XFS_OQUOTA_CHKD;
+ to->sb_qflags = cpu_to_be16(qflags);
/*
- * GQUOTINO and PQUOTINO cannot be used together in versions of
- * superblock that do not have pquotino. from->sb_flags tells us which
- * quota is active and should be copied to disk. If neither are active,
- * make sure we write NULLFSINO to the sb_gquotino field as a quota
- * inode value of "0" is invalid when the XFS_SB_VERSION_QUOTA feature
- * bit is set.
+ * GQUOTINO and PQUOTINO cannot be used together in versions
+ * of superblock that do not have pquotino. from->sb_flags
+ * tells us which quota is active and should be copied to
+ * disk. If neither are active, we should NULL the inode.
*
- * Note that we don't need to handle the sb_uquotino or sb_pquotino here
- * as they do not require any translation. Hence the main sb field loop
- * will write them appropriately from the in-core superblock.
+ * In all cases, the separate pquotino must remain 0 because it
+ * it beyond the "end" of the valid non-pquotino superblock.
*/
- if ((*fields & XFS_SB_GQUOTINO) &&
- (from->sb_qflags & XFS_GQUOTA_ACCT))
+ if (from->sb_qflags & XFS_GQUOTA_ACCT)
to->sb_gquotino = cpu_to_be64(from->sb_gquotino);
- else if ((*fields & XFS_SB_PQUOTINO) &&
- (from->sb_qflags & XFS_PQUOTA_ACCT))
+ else if (from->sb_qflags & XFS_PQUOTA_ACCT)
to->sb_gquotino = cpu_to_be64(from->sb_pquotino);
else {
/*
@@ -526,63 +454,78 @@ xfs_sb_quota_to_disk(
to->sb_gquotino = cpu_to_be64(NULLFSINO);
}
- *fields &= ~(XFS_SB_PQUOTINO | XFS_SB_GQUOTINO);
+ to->sb_pquotino = 0;
}
-/*
- * Copy in core superblock to ondisk one.
- *
- * The fields argument is mask of superblock fields to copy.
- */
void
xfs_sb_to_disk(
- xfs_dsb_t *to,
- xfs_sb_t *from,
- __int64_t fields)
+ struct xfs_dsb *to,
+ struct xfs_sb *from)
{
- xfs_caddr_t to_ptr = (xfs_caddr_t)to;
- xfs_caddr_t from_ptr = (xfs_caddr_t)from;
- xfs_sb_field_t f;
- int first;
- int size;
-
- ASSERT(fields);
- if (!fields)
- return;
+ xfs_sb_quota_to_disk(to, from);
- /* We should never write the crc here, it's updated in the IO path */
- fields &= ~XFS_SB_CRC;
-
- xfs_sb_quota_to_disk(to, from, &fields);
- while (fields) {
- f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields);
- first = xfs_sb_info[f].offset;
- size = xfs_sb_info[f + 1].offset - first;
-
- ASSERT(xfs_sb_info[f].type == 0 || xfs_sb_info[f].type == 1);
-
- if (size == 1 || xfs_sb_info[f].type == 1) {
- memcpy(to_ptr + first, from_ptr + first, size);
- } else {
- switch (size) {
- case 2:
- *(__be16 *)(to_ptr + first) =
- cpu_to_be16(*(__u16 *)(from_ptr + first));
- break;
- case 4:
- *(__be32 *)(to_ptr + first) =
- cpu_to_be32(*(__u32 *)(from_ptr + first));
- break;
- case 8:
- *(__be64 *)(to_ptr + first) =
- cpu_to_be64(*(__u64 *)(from_ptr + first));
- break;
- default:
- ASSERT(0);
- }
- }
+ to->sb_magicnum = cpu_to_be32(from->sb_magicnum);
+ to->sb_blocksize = cpu_to_be32(from->sb_blocksize);
+ to->sb_dblocks = cpu_to_be64(from->sb_dblocks);
+ to->sb_rblocks = cpu_to_be64(from->sb_rblocks);
+ to->sb_rextents = cpu_to_be64(from->sb_rextents);
+ memcpy(&to->sb_uuid, &from->sb_uuid, sizeof(to->sb_uuid));
+ to->sb_logstart = cpu_to_be64(from->sb_logstart);
+ to->sb_rootino = cpu_to_be64(from->sb_rootino);
+ to->sb_rbmino = cpu_to_be64(from->sb_rbmino);
+ to->sb_rsumino = cpu_to_be64(from->sb_rsumino);
+ to->sb_rextsize = cpu_to_be32(from->sb_rextsize);
+ to->sb_agblocks = cpu_to_be32(from->sb_agblocks);
+ to->sb_agcount = cpu_to_be32(from->sb_agcount);
+ to->sb_rbmblocks = cpu_to_be32(from->sb_rbmblocks);
+ to->sb_logblocks = cpu_to_be32(from->sb_logblocks);
+ to->sb_versionnum = cpu_to_be16(from->sb_versionnum);
+ to->sb_sectsize = cpu_to_be16(from->sb_sectsize);
+ to->sb_inodesize = cpu_to_be16(from->sb_inodesize);
+ to->sb_inopblock = cpu_to_be16(from->sb_inopblock);
+ memcpy(&to->sb_fname, &from->sb_fname, sizeof(to->sb_fname));
+ to->sb_blocklog = from->sb_blocklog;
+ to->sb_sectlog = from->sb_sectlog;
+ to->sb_inodelog = from->sb_inodelog;
+ to->sb_inopblog = from->sb_inopblog;
+ to->sb_agblklog = from->sb_agblklog;
+ to->sb_rextslog = from->sb_rextslog;
+ to->sb_inprogress = from->sb_inprogress;
+ to->sb_imax_pct = from->sb_imax_pct;
+ to->sb_icount = cpu_to_be64(from->sb_icount);
+ to->sb_ifree = cpu_to_be64(from->sb_ifree);
+ to->sb_fdblocks = cpu_to_be64(from->sb_fdblocks);
+ to->sb_frextents = cpu_to_be64(from->sb_frextents);
- fields &= ~(1LL << f);
+ to->sb_flags = from->sb_flags;
+ to->sb_shared_vn = from->sb_shared_vn;
+ to->sb_inoalignmt = cpu_to_be32(from->sb_inoalignmt);
+ to->sb_unit = cpu_to_be32(from->sb_unit);
+ to->sb_width = cpu_to_be32(from->sb_width);
+ to->sb_dirblklog = from->sb_dirblklog;
+ to->sb_logsectlog = from->sb_logsectlog;
+ to->sb_logsectsize = cpu_to_be16(from->sb_logsectsize);
+ to->sb_logsunit = cpu_to_be32(from->sb_logsunit);
+
+ /*
+ * We need to ensure that bad_features2 always matches features2.
+ * Hence we enforce that here rather than having to remember to do it
+ * everywhere else that updates features2.
+ */
+ from->sb_bad_features2 = from->sb_features2;
+ to->sb_features2 = cpu_to_be32(from->sb_features2);
+ to->sb_bad_features2 = cpu_to_be32(from->sb_bad_features2);
+
+ if (xfs_sb_version_hascrc(from)) {
+ to->sb_features_compat = cpu_to_be32(from->sb_features_compat);
+ to->sb_features_ro_compat =
+ cpu_to_be32(from->sb_features_ro_compat);
+ to->sb_features_incompat =
+ cpu_to_be32(from->sb_features_incompat);
+ to->sb_features_log_incompat =
+ cpu_to_be32(from->sb_features_log_incompat);
+ to->sb_pad = 0;
+ to->sb_lsn = cpu_to_be64(from->sb_lsn);
}
}
@@ -816,42 +759,51 @@ xfs_initialize_perag_data(
}
/*
- * xfs_mod_sb() can be used to copy arbitrary changes to the
- * in-core superblock into the superblock buffer to be logged.
- * It does not provide the higher level of locking that is
- * needed to protect the in-core superblock from concurrent
- * access.
+ * xfs_log_sb() can be used to copy arbitrary changes to the in-core superblock
+ * into the superblock buffer to be logged. It does not provide the higher
+ * level of locking that is needed to protect the in-core superblock from
+ * concurrent access.
*/
void
-xfs_mod_sb(xfs_trans_t *tp, __int64_t fields)
+xfs_log_sb(
+ struct xfs_trans *tp)
{
- xfs_buf_t *bp;
- int first;
- int last;
- xfs_mount_t *mp;
- xfs_sb_field_t f;
-
- ASSERT(fields);
- if (!fields)
- return;
- mp = tp->t_mountp;
- bp = xfs_trans_getsb(tp, mp, 0);
- first = sizeof(xfs_sb_t);
- last = 0;
-
- /* translate/copy */
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_buf *bp = xfs_trans_getsb(tp, mp, 0);
- xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb, fields);
+ xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb);
+ xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
+ xfs_trans_log_buf(tp, bp, 0, sizeof(struct xfs_dsb));
+}
- /* find modified range */
- f = (xfs_sb_field_t)xfs_highbit64((__uint64_t)fields);
- ASSERT((1LL << f) & XFS_SB_MOD_BITS);
- last = xfs_sb_info[f + 1].offset - 1;
+/*
+ * xfs_sync_sb
+ *
+ * Sync the superblock to disk.
+ *
+ * Note that the caller is responsible for checking the frozen state of the
+ * filesystem. This procedure uses the non-blocking transaction allocator and
+ * thus will allow modifications to a frozen fs. This is required because this
+ * code can be called during the process of freezing where use of the high-level
+ * allocator would deadlock.
+ */
+int
+xfs_sync_sb(
+ struct xfs_mount *mp,
+ bool wait)
+{
+ struct xfs_trans *tp;
+ int error;
- f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields);
- ASSERT((1LL << f) & XFS_SB_MOD_BITS);
- first = xfs_sb_info[f].offset;
+ tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_CHANGE, KM_SLEEP);
+ error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0);
+ if (error) {
+ xfs_trans_cancel(tp, 0);
+ return error;
+ }
- xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
- xfs_trans_log_buf(tp, bp, first, last);
+ xfs_log_sb(tp);
+ if (wait)
+ xfs_trans_set_sync(tp);
+ return xfs_trans_commit(tp, 0);
}
diff --git a/fs/xfs/libxfs/xfs_sb.h b/fs/xfs/libxfs/xfs_sb.h
index 8eb1c54..b25bb9a 100644
--- a/fs/xfs/libxfs/xfs_sb.h
+++ b/fs/xfs/libxfs/xfs_sb.h
@@ -27,11 +27,12 @@ extern struct xfs_perag *xfs_perag_get_tag(struct xfs_mount *, xfs_agnumber_t,
extern void xfs_perag_put(struct xfs_perag *pag);
extern int xfs_initialize_perag_data(struct xfs_mount *, xfs_agnumber_t);
-extern void xfs_sb_calc_crc(struct xfs_buf *);
-extern void xfs_mod_sb(struct xfs_trans *, __int64_t);
-extern void xfs_sb_mount_common(struct xfs_mount *, struct xfs_sb *);
-extern void xfs_sb_from_disk(struct xfs_sb *, struct xfs_dsb *);
-extern void xfs_sb_to_disk(struct xfs_dsb *, struct xfs_sb *, __int64_t);
+extern void xfs_sb_calc_crc(struct xfs_buf *bp);
+extern void xfs_log_sb(struct xfs_trans *tp);
+extern int xfs_sync_sb(struct xfs_mount *mp, bool wait);
+extern void xfs_sb_mount_common(struct xfs_mount *mp, struct xfs_sb *sbp);
+extern void xfs_sb_from_disk(struct xfs_sb *to, struct xfs_dsb *from);
+extern void xfs_sb_to_disk(struct xfs_dsb *to, struct xfs_sb *from);
extern void xfs_sb_quota_from_disk(struct xfs_sb *sbp);
#endif /* __XFS_SB_H__ */
diff --git a/fs/xfs/libxfs/xfs_shared.h b/fs/xfs/libxfs/xfs_shared.h
index 82404da..8dda4b3 100644
--- a/fs/xfs/libxfs/xfs_shared.h
+++ b/fs/xfs/libxfs/xfs_shared.h
@@ -82,7 +82,7 @@ extern const struct xfs_buf_ops xfs_symlink_buf_ops;
#define XFS_TRANS_ATTR_RM 23
#define XFS_TRANS_ATTR_FLAG 24
#define XFS_TRANS_CLEAR_AGI_BUCKET 25
-#define XFS_TRANS_QM_SBCHANGE 26
+#define XFS_TRANS_SB_CHANGE 26
/*
* Dummy entries since we use the transaction type to index into the
* trans_type[] in xlog_recover_print_trans_head()
@@ -95,17 +95,15 @@ extern const struct xfs_buf_ops xfs_symlink_buf_ops;
#define XFS_TRANS_QM_DQCLUSTER 32
#define XFS_TRANS_QM_QINOCREATE 33
#define XFS_TRANS_QM_QUOTAOFF_END 34
-#define XFS_TRANS_SB_UNIT 35
-#define XFS_TRANS_FSYNC_TS 36
-#define XFS_TRANS_GROWFSRT_ALLOC 37
-#define XFS_TRANS_GROWFSRT_ZERO 38
-#define XFS_TRANS_GROWFSRT_FREE 39
-#define XFS_TRANS_SWAPEXT 40
-#define XFS_TRANS_SB_COUNT 41
-#define XFS_TRANS_CHECKPOINT 42
-#define XFS_TRANS_ICREATE 43
-#define XFS_TRANS_CREATE_TMPFILE 44
-#define XFS_TRANS_TYPE_MAX 44
+#define XFS_TRANS_FSYNC_TS 35
+#define XFS_TRANS_GROWFSRT_ALLOC 36
+#define XFS_TRANS_GROWFSRT_ZERO 37
+#define XFS_TRANS_GROWFSRT_FREE 38
+#define XFS_TRANS_SWAPEXT 39
+#define XFS_TRANS_CHECKPOINT 40
+#define XFS_TRANS_ICREATE 41
+#define XFS_TRANS_CREATE_TMPFILE 42
+#define XFS_TRANS_TYPE_MAX 43
/* new transaction types need to be reflected in xfs_logprint(8) */
#define XFS_TRANS_TYPES \
@@ -113,7 +111,6 @@ extern const struct xfs_buf_ops xfs_symlink_buf_ops;
{ XFS_TRANS_SETATTR_SIZE, "SETATTR_SIZE" }, \
{ XFS_TRANS_INACTIVE, "INACTIVE" }, \
{ XFS_TRANS_CREATE, "CREATE" }, \
- { XFS_TRANS_CREATE_TMPFILE, "CREATE_TMPFILE" }, \
{ XFS_TRANS_CREATE_TRUNC, "CREATE_TRUNC" }, \
{ XFS_TRANS_TRUNCATE_FILE, "TRUNCATE_FILE" }, \
{ XFS_TRANS_REMOVE, "REMOVE" }, \
@@ -134,23 +131,23 @@ extern const struct xfs_buf_ops xfs_symlink_buf_ops;
{ XFS_TRANS_ATTR_RM, "ATTR_RM" }, \
{ XFS_TRANS_ATTR_FLAG, "ATTR_FLAG" }, \
{ XFS_TRANS_CLEAR_AGI_BUCKET, "CLEAR_AGI_BUCKET" }, \
- { XFS_TRANS_QM_SBCHANGE, "QM_SBCHANGE" }, \
+ { XFS_TRANS_SB_CHANGE, "SBCHANGE" }, \
+ { XFS_TRANS_DUMMY1, "DUMMY1" }, \
+ { XFS_TRANS_DUMMY2, "DUMMY2" }, \
{ XFS_TRANS_QM_QUOTAOFF, "QM_QUOTAOFF" }, \
{ XFS_TRANS_QM_DQALLOC, "QM_DQALLOC" }, \
{ XFS_TRANS_QM_SETQLIM, "QM_SETQLIM" }, \
{ XFS_TRANS_QM_DQCLUSTER, "QM_DQCLUSTER" }, \
{ XFS_TRANS_QM_QINOCREATE, "QM_QINOCREATE" }, \
{ XFS_TRANS_QM_QUOTAOFF_END, "QM_QOFF_END" }, \
- { XFS_TRANS_SB_UNIT, "SB_UNIT" }, \
{ XFS_TRANS_FSYNC_TS, "FSYNC_TS" }, \
{ XFS_TRANS_GROWFSRT_ALLOC, "GROWFSRT_ALLOC" }, \
{ XFS_TRANS_GROWFSRT_ZERO, "GROWFSRT_ZERO" }, \
{ XFS_TRANS_GROWFSRT_FREE, "GROWFSRT_FREE" }, \
{ XFS_TRANS_SWAPEXT, "SWAPEXT" }, \
- { XFS_TRANS_SB_COUNT, "SB_COUNT" }, \
{ XFS_TRANS_CHECKPOINT, "CHECKPOINT" }, \
- { XFS_TRANS_DUMMY1, "DUMMY1" }, \
- { XFS_TRANS_DUMMY2, "DUMMY2" }, \
+ { XFS_TRANS_ICREATE, "ICREATE" }, \
+ { XFS_TRANS_CREATE_TMPFILE, "CREATE_TMPFILE" }, \
{ XLOG_UNMOUNT_REC_TYPE, "UNMOUNT" }
/*
diff --git a/fs/xfs/libxfs/xfs_symlink_remote.c b/fs/xfs/libxfs/xfs_symlink_remote.c
index c80c523..e7e26bd 100644
--- a/fs/xfs/libxfs/xfs_symlink_remote.c
+++ b/fs/xfs/libxfs/xfs_symlink_remote.c
@@ -178,6 +178,8 @@ xfs_symlink_local_to_remote(
struct xfs_mount *mp = ip->i_mount;
char *buf;
+ xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SYMLINK_BUF);
+
if (!xfs_sb_version_hascrc(&mp->m_sb)) {
bp->b_ops = NULL;
memcpy(bp->b_addr, ifp->if_u1.if_data, ifp->if_bytes);
diff --git a/fs/xfs/libxfs/xfs_trans_resv.c b/fs/xfs/libxfs/xfs_trans_resv.c
index 6c1330f..68cb1e7 100644
--- a/fs/xfs/libxfs/xfs_trans_resv.c
+++ b/fs/xfs/libxfs/xfs_trans_resv.c
@@ -716,17 +716,6 @@ xfs_calc_clear_agi_bucket_reservation(
}
/*
- * Clearing the quotaflags in the superblock.
- * the super block for changing quota flags: sector size
- */
-STATIC uint
-xfs_calc_qm_sbchange_reservation(
- struct xfs_mount *mp)
-{
- return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize);
-}
-
-/*
* Adjusting quota limits.
* the xfs_disk_dquot_t: sizeof(struct xfs_disk_dquot)
*/
@@ -864,9 +853,6 @@ xfs_trans_resv_calc(
* The following transactions are logged in logical format with
* a default log count.
*/
- resp->tr_qm_sbchange.tr_logres = xfs_calc_qm_sbchange_reservation(mp);
- resp->tr_qm_sbchange.tr_logcount = XFS_DEFAULT_LOG_COUNT;
-
resp->tr_qm_setqlim.tr_logres = xfs_calc_qm_setqlim_reservation(mp);
resp->tr_qm_setqlim.tr_logcount = XFS_DEFAULT_LOG_COUNT;
diff --git a/fs/xfs/libxfs/xfs_trans_resv.h b/fs/xfs/libxfs/xfs_trans_resv.h
index 1097d14..2d5bdfc 100644
--- a/fs/xfs/libxfs/xfs_trans_resv.h
+++ b/fs/xfs/libxfs/xfs_trans_resv.h
@@ -56,7 +56,6 @@ struct xfs_trans_resv {
struct xfs_trans_res tr_growrtalloc; /* grow realtime allocations */
struct xfs_trans_res tr_growrtzero; /* grow realtime zeroing */
struct xfs_trans_res tr_growrtfree; /* grow realtime freeing */
- struct xfs_trans_res tr_qm_sbchange; /* change quota flags */
struct xfs_trans_res tr_qm_setqlim; /* adjust quota limits */
struct xfs_trans_res tr_qm_dqalloc; /* allocate quota on disk */
struct xfs_trans_res tr_qm_quotaoff; /* turn quota off */
diff --git a/fs/xfs/xfs_types.h b/fs/xfs/libxfs/xfs_types.h
index b79dc66..b79dc66 100644
--- a/fs/xfs/xfs_types.h
+++ b/fs/xfs/libxfs/xfs_types.h
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 18e2f3b..3a9b7a1 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -135,30 +135,22 @@ xfs_setfilesize_trans_alloc(
*/
STATIC int
xfs_setfilesize(
- struct xfs_ioend *ioend)
+ struct xfs_inode *ip,
+ struct xfs_trans *tp,
+ xfs_off_t offset,
+ size_t size)
{
- struct xfs_inode *ip = XFS_I(ioend->io_inode);
- struct xfs_trans *tp = ioend->io_append_trans;
xfs_fsize_t isize;
- /*
- * The transaction may have been allocated in the I/O submission thread,
- * thus we need to mark ourselves as beeing in a transaction manually.
- * Similarly for freeze protection.
- */
- current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
- rwsem_acquire_read(&VFS_I(ip)->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
- 0, 1, _THIS_IP_);
-
xfs_ilock(ip, XFS_ILOCK_EXCL);
- isize = xfs_new_eof(ip, ioend->io_offset + ioend->io_size);
+ isize = xfs_new_eof(ip, offset + size);
if (!isize) {
xfs_iunlock(ip, XFS_ILOCK_EXCL);
xfs_trans_cancel(tp, 0);
return 0;
}
- trace_xfs_setfilesize(ip, ioend->io_offset, ioend->io_size);
+ trace_xfs_setfilesize(ip, offset, size);
ip->i_d.di_size = isize;
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
@@ -167,6 +159,25 @@ xfs_setfilesize(
return xfs_trans_commit(tp, 0);
}
+STATIC int
+xfs_setfilesize_ioend(
+ struct xfs_ioend *ioend)
+{
+ struct xfs_inode *ip = XFS_I(ioend->io_inode);
+ struct xfs_trans *tp = ioend->io_append_trans;
+
+ /*
+ * The transaction may have been allocated in the I/O submission thread,
+ * thus we need to mark ourselves as being in a transaction manually.
+ * Similarly for freeze protection.
+ */
+ current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
+ rwsem_acquire_read(&VFS_I(ip)->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
+ 0, 1, _THIS_IP_);
+
+ return xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
+}
+
/*
* Schedule IO completion handling on the final put of an ioend.
*
@@ -182,8 +193,7 @@ xfs_finish_ioend(
if (ioend->io_type == XFS_IO_UNWRITTEN)
queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
- else if (ioend->io_append_trans ||
- (ioend->io_isdirect && xfs_ioend_is_append(ioend)))
+ else if (ioend->io_append_trans)
queue_work(mp->m_data_workqueue, &ioend->io_work);
else
xfs_destroy_ioend(ioend);
@@ -215,22 +225,8 @@ xfs_end_io(
if (ioend->io_type == XFS_IO_UNWRITTEN) {
error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
ioend->io_size);
- } else if (ioend->io_isdirect && xfs_ioend_is_append(ioend)) {
- /*
- * For direct I/O we do not know if we need to allocate blocks
- * or not so we can't preallocate an append transaction as that
- * results in nested reservations and log space deadlocks. Hence
- * allocate the transaction here. While this is sub-optimal and
- * can block IO completion for some time, we're stuck with doing
- * it this way until we can pass the ioend to the direct IO
- * allocation callbacks and avoid nesting that way.
- */
- error = xfs_setfilesize_trans_alloc(ioend);
- if (error)
- goto done;
- error = xfs_setfilesize(ioend);
} else if (ioend->io_append_trans) {
- error = xfs_setfilesize(ioend);
+ error = xfs_setfilesize_ioend(ioend);
} else {
ASSERT(!xfs_ioend_is_append(ioend));
}
@@ -242,17 +238,6 @@ done:
}
/*
- * Call IO completion handling in caller context on the final put of an ioend.
- */
-STATIC void
-xfs_finish_ioend_sync(
- struct xfs_ioend *ioend)
-{
- if (atomic_dec_and_test(&ioend->io_remaining))
- xfs_end_io(&ioend->io_work);
-}
-
-/*
* Allocate and initialise an IO completion structure.
* We need to track unwritten extent write completion here initially.
* We'll need to extend this for updating the ondisk inode size later
@@ -273,7 +258,6 @@ xfs_alloc_ioend(
* all the I/O from calling the completion routine too early.
*/
atomic_set(&ioend->io_remaining, 1);
- ioend->io_isdirect = 0;
ioend->io_error = 0;
ioend->io_list = NULL;
ioend->io_type = type;
@@ -1459,11 +1443,7 @@ xfs_get_blocks_direct(
*
* If the private argument is non-NULL __xfs_get_blocks signals us that we
* need to issue a transaction to convert the range from unwritten to written
- * extents. In case this is regular synchronous I/O we just call xfs_end_io
- * to do this and we are done. But in case this was a successful AIO
- * request this handler is called from interrupt context, from which we
- * can't start transactions. In that case offload the I/O completion to
- * the workqueues we also use for buffered I/O completion.
+ * extents.
*/
STATIC void
xfs_end_io_direct_write(
@@ -1472,7 +1452,12 @@ xfs_end_io_direct_write(
ssize_t size,
void *private)
{
- struct xfs_ioend *ioend = iocb->private;
+ struct inode *inode = file_inode(iocb->ki_filp);
+ struct xfs_inode *ip = XFS_I(inode);
+ struct xfs_mount *mp = ip->i_mount;
+
+ if (XFS_FORCED_SHUTDOWN(mp))
+ return;
/*
* While the generic direct I/O code updates the inode size, it does
@@ -1480,22 +1465,33 @@ xfs_end_io_direct_write(
* end_io handler thinks the on-disk size is outside the in-core
* size. To prevent this just update it a little bit earlier here.
*/
- if (offset + size > i_size_read(ioend->io_inode))
- i_size_write(ioend->io_inode, offset + size);
+ if (offset + size > i_size_read(inode))
+ i_size_write(inode, offset + size);
/*
- * blockdev_direct_IO can return an error even after the I/O
- * completion handler was called. Thus we need to protect
- * against double-freeing.
+ * For direct I/O we do not know if we need to allocate blocks or not,
+ * so we can't preallocate an append transaction, as that results in
+ * nested reservations and log space deadlocks. Hence allocate the
+ * transaction here. While this is sub-optimal and can block IO
+ * completion for some time, we're stuck with doing it this way until
+ * we can pass the ioend to the direct IO allocation callbacks and
+ * avoid nesting that way.
*/
- iocb->private = NULL;
-
- ioend->io_offset = offset;
- ioend->io_size = size;
- if (private && size > 0)
- ioend->io_type = XFS_IO_UNWRITTEN;
+ if (private && size > 0) {
+ xfs_iomap_write_unwritten(ip, offset, size);
+ } else if (offset + size > ip->i_d.di_size) {
+ struct xfs_trans *tp;
+ int error;
+
+ tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
+ error = xfs_trans_reserve(tp, &M_RES(mp)->tr_fsyncts, 0, 0);
+ if (error) {
+ xfs_trans_cancel(tp, 0);
+ return;
+ }
- xfs_finish_ioend_sync(ioend);
+ xfs_setfilesize(ip, tp, offset, size);
+ }
}
STATIC ssize_t
@@ -1507,39 +1503,16 @@ xfs_vm_direct_IO(
{
struct inode *inode = iocb->ki_filp->f_mapping->host;
struct block_device *bdev = xfs_find_bdev_for_inode(inode);
- struct xfs_ioend *ioend = NULL;
- ssize_t ret;
if (rw & WRITE) {
- size_t size = iov_iter_count(iter);
-
- /*
- * We cannot preallocate a size update transaction here as we
- * don't know whether allocation is necessary or not. Hence we
- * can only tell IO completion that one is necessary if we are
- * not doing unwritten extent conversion.
- */
- iocb->private = ioend = xfs_alloc_ioend(inode, XFS_IO_DIRECT);
- if (offset + size > XFS_I(inode)->i_d.di_size)
- ioend->io_isdirect = 1;
-
- ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iter,
+ return __blockdev_direct_IO(rw, iocb, inode, bdev, iter,
offset, xfs_get_blocks_direct,
xfs_end_io_direct_write, NULL,
DIO_ASYNC_EXTEND);
- if (ret != -EIOCBQUEUED && iocb->private)
- goto out_destroy_ioend;
- } else {
- ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iter,
- offset, xfs_get_blocks_direct,
- NULL, NULL, 0);
}
-
- return ret;
-
-out_destroy_ioend:
- xfs_destroy_ioend(ioend);
- return ret;
+ return __blockdev_direct_IO(rw, iocb, inode, bdev, iter,
+ offset, xfs_get_blocks_direct,
+ NULL, NULL, 0);
}
/*
diff --git a/fs/xfs/xfs_aops.h b/fs/xfs/xfs_aops.h
index f94dd45..ac644e0 100644
--- a/fs/xfs/xfs_aops.h
+++ b/fs/xfs/xfs_aops.h
@@ -24,14 +24,12 @@ extern mempool_t *xfs_ioend_pool;
* Types of I/O for bmap clustering and I/O completion tracking.
*/
enum {
- XFS_IO_DIRECT = 0, /* special case for direct I/O ioends */
XFS_IO_DELALLOC, /* covers delalloc region */
XFS_IO_UNWRITTEN, /* covers allocated but uninitialized data */
XFS_IO_OVERWRITE, /* covers already allocated extent */
};
#define XFS_IO_TYPES \
- { 0, "" }, \
{ XFS_IO_DELALLOC, "delalloc" }, \
{ XFS_IO_UNWRITTEN, "unwritten" }, \
{ XFS_IO_OVERWRITE, "overwrite" }
@@ -45,7 +43,6 @@ typedef struct xfs_ioend {
unsigned int io_type; /* delalloc / unwritten */
int io_error; /* I/O error code */
atomic_t io_remaining; /* hold count */
- unsigned int io_isdirect : 1;/* direct I/O */
struct inode *io_inode; /* file being written to */
struct buffer_head *io_buffer_head;/* buffer linked list head */
struct buffer_head *io_buffer_tail;/* buffer linked list tail */
diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h
index 2fdb72d..736429a 100644
--- a/fs/xfs/xfs_bmap_util.h
+++ b/fs/xfs/xfs_bmap_util.h
@@ -26,43 +26,8 @@ struct xfs_ifork;
struct xfs_inode;
struct xfs_mount;
struct xfs_trans;
+struct xfs_bmalloca;
-/*
- * Argument structure for xfs_bmap_alloc.
- */
-struct xfs_bmalloca {
- xfs_fsblock_t *firstblock; /* i/o first block allocated */
- struct xfs_bmap_free *flist; /* bmap freelist */
- struct xfs_trans *tp; /* transaction pointer */
- struct xfs_inode *ip; /* incore inode pointer */
- struct xfs_bmbt_irec prev; /* extent before the new one */
- struct xfs_bmbt_irec got; /* extent after, or delayed */
-
- xfs_fileoff_t offset; /* offset in file filling in */
- xfs_extlen_t length; /* i/o length asked/allocated */
- xfs_fsblock_t blkno; /* starting block of new extent */
-
- struct xfs_btree_cur *cur; /* btree cursor */
- xfs_extnum_t idx; /* current extent index */
- int nallocs;/* number of extents alloc'd */
- int logflags;/* flags for transaction logging */
-
- xfs_extlen_t total; /* total blocks needed for xaction */
- xfs_extlen_t minlen; /* minimum allocation size (blocks) */
- xfs_extlen_t minleft; /* amount must be left after alloc */
- bool eof; /* set if allocating past last extent */
- bool wasdel; /* replacing a delayed allocation */
- bool userdata;/* set if is user data */
- bool aeof; /* allocated space at eof */
- bool conv; /* overwriting unwritten extents */
- int flags;
- struct completion *done;
- struct work_struct work;
- int result;
-};
-
-int xfs_bmap_finish(struct xfs_trans **tp, struct xfs_bmap_free *flist,
- int *committed);
int xfs_bmap_rtalloc(struct xfs_bmalloca *ap);
int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff,
int whichfork, int *eof);
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 3f9bd58..507d96a 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -319,6 +319,10 @@ xfs_buf_item_format(
ASSERT(atomic_read(&bip->bli_refcount) > 0);
ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
(bip->bli_flags & XFS_BLI_STALE));
+ ASSERT((bip->bli_flags & XFS_BLI_STALE) ||
+ (xfs_blft_from_flags(&bip->__bli_format) > XFS_BLFT_UNKNOWN_BUF
+ && xfs_blft_from_flags(&bip->__bli_format) < XFS_BLFT_MAX_BUF));
+
/*
* If it is an inode buffer, transfer the in-memory state to the
@@ -535,7 +539,7 @@ xfs_buf_item_push(
if ((bp->b_flags & XBF_WRITE_FAIL) &&
___ratelimit(&xfs_buf_write_fail_rl_state, "XFS:")) {
xfs_warn(bp->b_target->bt_mount,
-"Detected failing async write on buffer block 0x%llx. Retrying async write.\n",
+"Detected failing async write on buffer block 0x%llx. Retrying async write.",
(long long)bp->b_bn);
}
diff --git a/fs/xfs/xfs_dquot.h b/fs/xfs/xfs_dquot.h
index c24c67e..2f536f3 100644
--- a/fs/xfs/xfs_dquot.h
+++ b/fs/xfs/xfs_dquot.h
@@ -86,7 +86,7 @@ static inline void xfs_dqflock(xfs_dquot_t *dqp)
wait_for_completion(&dqp->q_flush);
}
-static inline int xfs_dqflock_nowait(xfs_dquot_t *dqp)
+static inline bool xfs_dqflock_nowait(xfs_dquot_t *dqp)
{
return try_wait_for_completion(&dqp->q_flush);
}
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 5684ac3..1cdba95 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -127,6 +127,42 @@ xfs_iozero(
return (-status);
}
+int
+xfs_update_prealloc_flags(
+ struct xfs_inode *ip,
+ enum xfs_prealloc_flags flags)
+{
+ struct xfs_trans *tp;
+ int error;
+
+ tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_WRITEID);
+ error = xfs_trans_reserve(tp, &M_RES(ip->i_mount)->tr_writeid, 0, 0);
+ if (error) {
+ xfs_trans_cancel(tp, 0);
+ return error;
+ }
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+
+ if (!(flags & XFS_PREALLOC_INVISIBLE)) {
+ ip->i_d.di_mode &= ~S_ISUID;
+ if (ip->i_d.di_mode & S_IXGRP)
+ ip->i_d.di_mode &= ~S_ISGID;
+ xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+ }
+
+ if (flags & XFS_PREALLOC_SET)
+ ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
+ if (flags & XFS_PREALLOC_CLEAR)
+ ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
+
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+ if (flags & XFS_PREALLOC_SYNC)
+ xfs_trans_set_sync(tp);
+ return xfs_trans_commit(tp, 0);
+}
+
/*
* Fsync operations on directories are much simpler than on regular files,
* as there is no file data to flush, and thus also no need for explicit
@@ -784,8 +820,8 @@ xfs_file_fallocate(
{
struct inode *inode = file_inode(file);
struct xfs_inode *ip = XFS_I(inode);
- struct xfs_trans *tp;
long error;
+ enum xfs_prealloc_flags flags = 0;
loff_t new_size = 0;
if (!S_ISREG(inode->i_mode))
@@ -822,6 +858,8 @@ xfs_file_fallocate(
if (error)
goto out_unlock;
} else {
+ flags |= XFS_PREALLOC_SET;
+
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
offset + len > i_size_read(inode)) {
new_size = offset + len;
@@ -839,28 +877,10 @@ xfs_file_fallocate(
goto out_unlock;
}
- tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_WRITEID);
- error = xfs_trans_reserve(tp, &M_RES(ip->i_mount)->tr_writeid, 0, 0);
- if (error) {
- xfs_trans_cancel(tp, 0);
- goto out_unlock;
- }
-
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
- ip->i_d.di_mode &= ~S_ISUID;
- if (ip->i_d.di_mode & S_IXGRP)
- ip->i_d.di_mode &= ~S_ISGID;
-
- if (!(mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE)))
- ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
-
- xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
- xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-
if (file->f_flags & O_DSYNC)
- xfs_trans_set_sync(tp);
- error = xfs_trans_commit(tp, 0);
+ flags |= XFS_PREALLOC_SYNC;
+
+ error = xfs_update_prealloc_flags(ip, flags);
if (error)
goto out_unlock;
@@ -1384,5 +1404,4 @@ static const struct vm_operations_struct xfs_file_vm_ops = {
.fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = xfs_vm_page_mkwrite,
- .remap_pages = generic_file_remap_pages,
};
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index fdc6422..fba6532 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -488,6 +488,7 @@ xfs_growfs_data_private(
xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, nfree);
if (dpct)
xfs_trans_mod_sb(tp, XFS_TRANS_SB_IMAXPCT, dpct);
+ xfs_trans_set_sync(tp);
error = xfs_trans_commit(tp, 0);
if (error)
return error;
@@ -541,7 +542,7 @@ xfs_growfs_data_private(
saved_error = error;
continue;
}
- xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb, XFS_SB_ALL_BITS);
+ xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb);
error = xfs_bwrite(bp);
xfs_buf_relse(bp);
@@ -756,37 +757,6 @@ out:
return 0;
}
-/*
- * Dump a transaction into the log that contains no real change. This is needed
- * to be able to make the log dirty or stamp the current tail LSN into the log
- * during the covering operation.
- *
- * We cannot use an inode here for this - that will push dirty state back up
- * into the VFS and then periodic inode flushing will prevent log covering from
- * making progress. Hence we log a field in the superblock instead and use a
- * synchronous transaction to ensure the superblock is immediately unpinned
- * and can be written back.
- */
-int
-xfs_fs_log_dummy(
- xfs_mount_t *mp)
-{
- xfs_trans_t *tp;
- int error;
-
- tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1, KM_SLEEP);
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0);
- if (error) {
- xfs_trans_cancel(tp, 0);
- return error;
- }
-
- /* log the UUID because it is an unchanging field */
- xfs_mod_sb(tp, XFS_SB_UUID);
- xfs_trans_set_sync(tp);
- return xfs_trans_commit(tp, 0);
-}
-
int
xfs_fs_goingdown(
xfs_mount_t *mp,
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 41f804e..daafa1f 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1995,6 +1995,7 @@ xfs_iunlink(
agi->agi_unlinked[bucket_index] = cpu_to_be32(agino);
offset = offsetof(xfs_agi_t, agi_unlinked) +
(sizeof(xfs_agino_t) * bucket_index);
+ xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF);
xfs_trans_log_buf(tp, agibp, offset,
(offset + sizeof(xfs_agino_t) - 1));
return 0;
@@ -2086,6 +2087,7 @@ xfs_iunlink_remove(
agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino);
offset = offsetof(xfs_agi_t, agi_unlinked) +
(sizeof(xfs_agino_t) * bucket_index);
+ xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF);
xfs_trans_log_buf(tp, agibp, offset,
(offset + sizeof(xfs_agino_t) - 1));
} else {
@@ -2656,6 +2658,124 @@ xfs_sort_for_rename(
}
/*
+ * xfs_cross_rename()
+ *
+ * responsible for handling RENAME_EXCHANGE flag in renameat2() sytemcall
+ */
+STATIC int
+xfs_cross_rename(
+ struct xfs_trans *tp,
+ struct xfs_inode *dp1,
+ struct xfs_name *name1,
+ struct xfs_inode *ip1,
+ struct xfs_inode *dp2,
+ struct xfs_name *name2,
+ struct xfs_inode *ip2,
+ struct xfs_bmap_free *free_list,
+ xfs_fsblock_t *first_block,
+ int spaceres)
+{
+ int error = 0;
+ int ip1_flags = 0;
+ int ip2_flags = 0;
+ int dp2_flags = 0;
+
+ /* Swap inode number for dirent in first parent */
+ error = xfs_dir_replace(tp, dp1, name1,
+ ip2->i_ino,
+ first_block, free_list, spaceres);
+ if (error)
+ goto out;
+
+ /* Swap inode number for dirent in second parent */
+ error = xfs_dir_replace(tp, dp2, name2,
+ ip1->i_ino,
+ first_block, free_list, spaceres);
+ if (error)
+ goto out;
+
+ /*
+ * If we're renaming one or more directories across different parents,
+ * update the respective ".." entries (and link counts) to match the new
+ * parents.
+ */
+ if (dp1 != dp2) {
+ dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
+
+ if (S_ISDIR(ip2->i_d.di_mode)) {
+ error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
+ dp1->i_ino, first_block,
+ free_list, spaceres);
+ if (error)
+ goto out;
+
+ /* transfer ip2 ".." reference to dp1 */
+ if (!S_ISDIR(ip1->i_d.di_mode)) {
+ error = xfs_droplink(tp, dp2);
+ if (error)
+ goto out;
+ error = xfs_bumplink(tp, dp1);
+ if (error)
+ goto out;
+ }
+
+ /*
+ * Although ip1 isn't changed here, userspace needs
+ * to be warned about the change, so that applications
+ * relying on it (like backup ones), will properly
+ * notify the change
+ */
+ ip1_flags |= XFS_ICHGTIME_CHG;
+ ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
+ }
+
+ if (S_ISDIR(ip1->i_d.di_mode)) {
+ error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
+ dp2->i_ino, first_block,
+ free_list, spaceres);
+ if (error)
+ goto out;
+
+ /* transfer ip1 ".." reference to dp2 */
+ if (!S_ISDIR(ip2->i_d.di_mode)) {
+ error = xfs_droplink(tp, dp1);
+ if (error)
+ goto out;
+ error = xfs_bumplink(tp, dp2);
+ if (error)
+ goto out;
+ }
+
+ /*
+ * Although ip2 isn't changed here, userspace needs
+ * to be warned about the change, so that applications
+ * relying on it (like backup ones), will properly
+ * notify the change
+ */
+ ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
+ ip2_flags |= XFS_ICHGTIME_CHG;
+ }
+ }
+
+ if (ip1_flags) {
+ xfs_trans_ichgtime(tp, ip1, ip1_flags);
+ xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
+ }
+ if (ip2_flags) {
+ xfs_trans_ichgtime(tp, ip2, ip2_flags);
+ xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
+ }
+ if (dp2_flags) {
+ xfs_trans_ichgtime(tp, dp2, dp2_flags);
+ xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
+ }
+ xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+ xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
+out:
+ return error;
+}
+
+/*
* xfs_rename
*/
int
@@ -2665,7 +2785,8 @@ xfs_rename(
xfs_inode_t *src_ip,
xfs_inode_t *target_dp,
struct xfs_name *target_name,
- xfs_inode_t *target_ip)
+ xfs_inode_t *target_ip,
+ unsigned int flags)
{
xfs_trans_t *tp = NULL;
xfs_mount_t *mp = src_dp->i_mount;
@@ -2743,6 +2864,18 @@ xfs_rename(
}
/*
+ * Handle RENAME_EXCHANGE flags
+ */
+ if (flags & RENAME_EXCHANGE) {
+ error = xfs_cross_rename(tp, src_dp, src_name, src_ip,
+ target_dp, target_name, target_ip,
+ &free_list, &first_block, spaceres);
+ if (error)
+ goto abort_return;
+ goto finish_rename;
+ }
+
+ /*
* Set up the target.
*/
if (target_ip == NULL) {
@@ -2881,6 +3014,7 @@ xfs_rename(
if (new_parent)
xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
+finish_rename:
/*
* If this is a synchronous mount, make sure that the
* rename transaction goes to disk before returning to
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 4ed2ba9..86cd6b3 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -338,7 +338,7 @@ int xfs_link(struct xfs_inode *tdp, struct xfs_inode *sip,
int xfs_rename(struct xfs_inode *src_dp, struct xfs_name *src_name,
struct xfs_inode *src_ip, struct xfs_inode *target_dp,
struct xfs_name *target_name,
- struct xfs_inode *target_ip);
+ struct xfs_inode *target_ip, unsigned int flags);
void xfs_ilock(xfs_inode_t *, uint);
int xfs_ilock_nowait(xfs_inode_t *, uint);
@@ -377,6 +377,15 @@ int xfs_droplink(struct xfs_trans *, struct xfs_inode *);
int xfs_bumplink(struct xfs_trans *, struct xfs_inode *);
/* from xfs_file.c */
+enum xfs_prealloc_flags {
+ XFS_PREALLOC_SET = (1 << 1),
+ XFS_PREALLOC_CLEAR = (1 << 2),
+ XFS_PREALLOC_SYNC = (1 << 3),
+ XFS_PREALLOC_INVISIBLE = (1 << 4),
+};
+
+int xfs_update_prealloc_flags(struct xfs_inode *,
+ enum xfs_prealloc_flags);
int xfs_zero_eof(struct xfs_inode *, xfs_off_t, xfs_fsize_t);
int xfs_iozero(struct xfs_inode *, loff_t, size_t);
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index a1831980..f7afb86 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -606,11 +606,8 @@ xfs_ioc_space(
unsigned int cmd,
xfs_flock64_t *bf)
{
- struct xfs_mount *mp = ip->i_mount;
- struct xfs_trans *tp;
struct iattr iattr;
- bool setprealloc = false;
- bool clrprealloc = false;
+ enum xfs_prealloc_flags flags = 0;
int error;
/*
@@ -630,6 +627,11 @@ xfs_ioc_space(
if (!S_ISREG(inode->i_mode))
return -EINVAL;
+ if (filp->f_flags & O_DSYNC)
+ flags |= XFS_PREALLOC_SYNC;
+ if (ioflags & XFS_IO_INVIS)
+ flags |= XFS_PREALLOC_INVISIBLE;
+
error = mnt_want_write_file(filp);
if (error)
return error;
@@ -673,25 +675,23 @@ xfs_ioc_space(
}
if (bf->l_start < 0 ||
- bf->l_start > mp->m_super->s_maxbytes ||
+ bf->l_start > inode->i_sb->s_maxbytes ||
bf->l_start + bf->l_len < 0 ||
- bf->l_start + bf->l_len >= mp->m_super->s_maxbytes) {
+ bf->l_start + bf->l_len >= inode->i_sb->s_maxbytes) {
error = -EINVAL;
goto out_unlock;
}
switch (cmd) {
case XFS_IOC_ZERO_RANGE:
+ flags |= XFS_PREALLOC_SET;
error = xfs_zero_file_space(ip, bf->l_start, bf->l_len);
- if (!error)
- setprealloc = true;
break;
case XFS_IOC_RESVSP:
case XFS_IOC_RESVSP64:
+ flags |= XFS_PREALLOC_SET;
error = xfs_alloc_file_space(ip, bf->l_start, bf->l_len,
XFS_BMAPI_PREALLOC);
- if (!error)
- setprealloc = true;
break;
case XFS_IOC_UNRESVSP:
case XFS_IOC_UNRESVSP64:
@@ -701,6 +701,7 @@ xfs_ioc_space(
case XFS_IOC_ALLOCSP64:
case XFS_IOC_FREESP:
case XFS_IOC_FREESP64:
+ flags |= XFS_PREALLOC_CLEAR;
if (bf->l_start > XFS_ISIZE(ip)) {
error = xfs_alloc_file_space(ip, XFS_ISIZE(ip),
bf->l_start - XFS_ISIZE(ip), 0);
@@ -712,8 +713,6 @@ xfs_ioc_space(
iattr.ia_size = bf->l_start;
error = xfs_setattr_size(ip, &iattr);
- if (!error)
- clrprealloc = true;
break;
default:
ASSERT(0);
@@ -723,32 +722,7 @@ xfs_ioc_space(
if (error)
goto out_unlock;
- tp = xfs_trans_alloc(mp, XFS_TRANS_WRITEID);
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_writeid, 0, 0);
- if (error) {
- xfs_trans_cancel(tp, 0);
- goto out_unlock;
- }
-
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
-
- if (!(ioflags & XFS_IO_INVIS)) {
- ip->i_d.di_mode &= ~S_ISUID;
- if (ip->i_d.di_mode & S_IXGRP)
- ip->i_d.di_mode &= ~S_ISGID;
- xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
- }
-
- if (setprealloc)
- ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
- else if (clrprealloc)
- ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
-
- xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
- if (filp->f_flags & O_DSYNC)
- xfs_trans_set_sync(tp);
- error = xfs_trans_commit(tp, 0);
+ error = xfs_update_prealloc_flags(ip, flags);
out_unlock:
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
@@ -1013,20 +987,182 @@ xfs_diflags_to_linux(
inode->i_flags &= ~S_NOATIME;
}
-#define FSX_PROJID 1
-#define FSX_EXTSIZE 2
-#define FSX_XFLAGS 4
-#define FSX_NONBLOCK 8
+static int
+xfs_ioctl_setattr_xflags(
+ struct xfs_trans *tp,
+ struct xfs_inode *ip,
+ struct fsxattr *fa)
+{
+ struct xfs_mount *mp = ip->i_mount;
+
+ /* Can't change realtime flag if any extents are allocated. */
+ if ((ip->i_d.di_nextents || ip->i_delayed_blks) &&
+ XFS_IS_REALTIME_INODE(ip) != (fa->fsx_xflags & XFS_XFLAG_REALTIME))
+ return -EINVAL;
+
+ /* If realtime flag is set then must have realtime device */
+ if (fa->fsx_xflags & XFS_XFLAG_REALTIME) {
+ if (mp->m_sb.sb_rblocks == 0 || mp->m_sb.sb_rextsize == 0 ||
+ (ip->i_d.di_extsize % mp->m_sb.sb_rextsize))
+ return -EINVAL;
+ }
+
+ /*
+ * Can't modify an immutable/append-only file unless
+ * we have appropriate permission.
+ */
+ if (((ip->i_d.di_flags & (XFS_DIFLAG_IMMUTABLE | XFS_DIFLAG_APPEND)) ||
+ (fa->fsx_xflags & (XFS_XFLAG_IMMUTABLE | XFS_XFLAG_APPEND))) &&
+ !capable(CAP_LINUX_IMMUTABLE))
+ return -EPERM;
+
+ xfs_set_diflags(ip, fa->fsx_xflags);
+ xfs_diflags_to_linux(ip);
+ xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+ XFS_STATS_INC(xs_ig_attrchg);
+ return 0;
+}
+
+/*
+ * Set up the transaction structure for the setattr operation, checking that we
+ * have permission to do so. On success, return a clean transaction and the
+ * inode locked exclusively ready for further operation specific checks. On
+ * failure, return an error without modifying or locking the inode.
+ */
+static struct xfs_trans *
+xfs_ioctl_setattr_get_trans(
+ struct xfs_inode *ip)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_trans *tp;
+ int error;
+
+ if (mp->m_flags & XFS_MOUNT_RDONLY)
+ return ERR_PTR(-EROFS);
+ if (XFS_FORCED_SHUTDOWN(mp))
+ return ERR_PTR(-EIO);
+
+ tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
+ error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0);
+ if (error)
+ goto out_cancel;
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+
+ /*
+ * CAP_FOWNER overrides the following restrictions:
+ *
+ * The user ID of the calling process must be equal to the file owner
+ * ID, except in cases where the CAP_FSETID capability is applicable.
+ */
+ if (!inode_owner_or_capable(VFS_I(ip))) {
+ error = -EPERM;
+ goto out_cancel;
+ }
+
+ if (mp->m_flags & XFS_MOUNT_WSYNC)
+ xfs_trans_set_sync(tp);
+
+ return tp;
+
+out_cancel:
+ xfs_trans_cancel(tp, 0);
+ return ERR_PTR(error);
+}
+
+/*
+ * extent size hint validation is somewhat cumbersome. Rules are:
+ *
+ * 1. extent size hint is only valid for directories and regular files
+ * 2. XFS_XFLAG_EXTSIZE is only valid for regular files
+ * 3. XFS_XFLAG_EXTSZINHERIT is only valid for directories.
+ * 4. can only be changed on regular files if no extents are allocated
+ * 5. can be changed on directories at any time
+ * 6. extsize hint of 0 turns off hints, clears inode flags.
+ * 7. Extent size must be a multiple of the appropriate block size.
+ * 8. for non-realtime files, the extent size hint must be limited
+ * to half the AG size to avoid alignment extending the extent beyond the
+ * limits of the AG.
+ */
+static int
+xfs_ioctl_setattr_check_extsize(
+ struct xfs_inode *ip,
+ struct fsxattr *fa)
+{
+ struct xfs_mount *mp = ip->i_mount;
+
+ if ((fa->fsx_xflags & XFS_XFLAG_EXTSIZE) && !S_ISREG(ip->i_d.di_mode))
+ return -EINVAL;
+
+ if ((fa->fsx_xflags & XFS_XFLAG_EXTSZINHERIT) &&
+ !S_ISDIR(ip->i_d.di_mode))
+ return -EINVAL;
+
+ if (S_ISREG(ip->i_d.di_mode) && ip->i_d.di_nextents &&
+ ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) != fa->fsx_extsize))
+ return -EINVAL;
+
+ if (fa->fsx_extsize != 0) {
+ xfs_extlen_t size;
+ xfs_fsblock_t extsize_fsb;
+
+ extsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_extsize);
+ if (extsize_fsb > MAXEXTLEN)
+ return -EINVAL;
+
+ if (XFS_IS_REALTIME_INODE(ip) ||
+ (fa->fsx_xflags & XFS_XFLAG_REALTIME)) {
+ size = mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog;
+ } else {
+ size = mp->m_sb.sb_blocksize;
+ if (extsize_fsb > mp->m_sb.sb_agblocks / 2)
+ return -EINVAL;
+ }
+
+ if (fa->fsx_extsize % size)
+ return -EINVAL;
+ } else
+ fa->fsx_xflags &= ~(XFS_XFLAG_EXTSIZE | XFS_XFLAG_EXTSZINHERIT);
+
+ return 0;
+}
+
+static int
+xfs_ioctl_setattr_check_projid(
+ struct xfs_inode *ip,
+ struct fsxattr *fa)
+{
+ /* Disallow 32bit project ids if projid32bit feature is not enabled. */
+ if (fa->fsx_projid > (__uint16_t)-1 &&
+ !xfs_sb_version_hasprojid32bit(&ip->i_mount->m_sb))
+ return -EINVAL;
+
+ /*
+ * Project Quota ID state is only allowed to change from within the init
+ * namespace. Enforce that restriction only if we are trying to change
+ * the quota ID state. Everything else is allowed in user namespaces.
+ */
+ if (current_user_ns() == &init_user_ns)
+ return 0;
+
+ if (xfs_get_projid(ip) != fa->fsx_projid)
+ return -EINVAL;
+ if ((fa->fsx_xflags & XFS_XFLAG_PROJINHERIT) !=
+ (ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT))
+ return -EINVAL;
+
+ return 0;
+}
STATIC int
xfs_ioctl_setattr(
xfs_inode_t *ip,
- struct fsxattr *fa,
- int mask)
+ struct fsxattr *fa)
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp;
- unsigned int lock_flags = 0;
struct xfs_dquot *udqp = NULL;
struct xfs_dquot *pdqp = NULL;
struct xfs_dquot *olddquot = NULL;
@@ -1034,17 +1170,9 @@ xfs_ioctl_setattr(
trace_xfs_ioctl_setattr(ip);
- if (mp->m_flags & XFS_MOUNT_RDONLY)
- return -EROFS;
- if (XFS_FORCED_SHUTDOWN(mp))
- return -EIO;
-
- /*
- * Disallow 32bit project ids when projid32bit feature is not enabled.
- */
- if ((mask & FSX_PROJID) && (fa->fsx_projid > (__uint16_t)-1) &&
- !xfs_sb_version_hasprojid32bit(&ip->i_mount->m_sb))
- return -EINVAL;
+ code = xfs_ioctl_setattr_check_projid(ip, fa);
+ if (code)
+ return code;
/*
* If disk quotas is on, we make sure that the dquots do exist on disk,
@@ -1054,7 +1182,7 @@ xfs_ioctl_setattr(
* If the IDs do change before we take the ilock, we're covered
* because the i_*dquot fields will get updated anyway.
*/
- if (XFS_IS_QUOTA_ON(mp) && (mask & FSX_PROJID)) {
+ if (XFS_IS_QUOTA_ON(mp)) {
code = xfs_qm_vop_dqalloc(ip, ip->i_d.di_uid,
ip->i_d.di_gid, fa->fsx_projid,
XFS_QMOPT_PQUOTA, &udqp, NULL, &pdqp);
@@ -1062,175 +1190,49 @@ xfs_ioctl_setattr(
return code;
}
- /*
- * For the other attributes, we acquire the inode lock and
- * first do an error checking pass.
- */
- tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
- code = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0);
- if (code)
- goto error_return;
-
- lock_flags = XFS_ILOCK_EXCL;
- xfs_ilock(ip, lock_flags);
-
- /*
- * CAP_FOWNER overrides the following restrictions:
- *
- * The user ID of the calling process must be equal
- * to the file owner ID, except in cases where the
- * CAP_FSETID capability is applicable.
- */
- if (!inode_owner_or_capable(VFS_I(ip))) {
- code = -EPERM;
- goto error_return;
- }
-
- /*
- * Do a quota reservation only if projid is actually going to change.
- * Only allow changing of projid from init_user_ns since it is a
- * non user namespace aware identifier.
- */
- if (mask & FSX_PROJID) {
- if (current_user_ns() != &init_user_ns) {
- code = -EINVAL;
- goto error_return;
- }
-
- if (XFS_IS_QUOTA_RUNNING(mp) &&
- XFS_IS_PQUOTA_ON(mp) &&
- xfs_get_projid(ip) != fa->fsx_projid) {
- ASSERT(tp);
- code = xfs_qm_vop_chown_reserve(tp, ip, udqp, NULL,
- pdqp, capable(CAP_FOWNER) ?
- XFS_QMOPT_FORCE_RES : 0);
- if (code) /* out of quota */
- goto error_return;
- }
+ tp = xfs_ioctl_setattr_get_trans(ip);
+ if (IS_ERR(tp)) {
+ code = PTR_ERR(tp);
+ goto error_free_dquots;
}
- if (mask & FSX_EXTSIZE) {
- /*
- * Can't change extent size if any extents are allocated.
- */
- if (ip->i_d.di_nextents &&
- ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) !=
- fa->fsx_extsize)) {
- code = -EINVAL; /* EFBIG? */
- goto error_return;
- }
- /*
- * Extent size must be a multiple of the appropriate block
- * size, if set at all. It must also be smaller than the
- * maximum extent size supported by the filesystem.
- *
- * Also, for non-realtime files, limit the extent size hint to
- * half the size of the AGs in the filesystem so alignment
- * doesn't result in extents larger than an AG.
- */
- if (fa->fsx_extsize != 0) {
- xfs_extlen_t size;
- xfs_fsblock_t extsize_fsb;
-
- extsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_extsize);
- if (extsize_fsb > MAXEXTLEN) {
- code = -EINVAL;
- goto error_return;
- }
-
- if (XFS_IS_REALTIME_INODE(ip) ||
- ((mask & FSX_XFLAGS) &&
- (fa->fsx_xflags & XFS_XFLAG_REALTIME))) {
- size = mp->m_sb.sb_rextsize <<
- mp->m_sb.sb_blocklog;
- } else {
- size = mp->m_sb.sb_blocksize;
- if (extsize_fsb > mp->m_sb.sb_agblocks / 2) {
- code = -EINVAL;
- goto error_return;
- }
- }
-
- if (fa->fsx_extsize % size) {
- code = -EINVAL;
- goto error_return;
- }
- }
+ if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp) &&
+ xfs_get_projid(ip) != fa->fsx_projid) {
+ code = xfs_qm_vop_chown_reserve(tp, ip, udqp, NULL, pdqp,
+ capable(CAP_FOWNER) ? XFS_QMOPT_FORCE_RES : 0);
+ if (code) /* out of quota */
+ goto error_trans_cancel;
}
+ code = xfs_ioctl_setattr_check_extsize(ip, fa);
+ if (code)
+ goto error_trans_cancel;
- if (mask & FSX_XFLAGS) {
- /*
- * Can't change realtime flag if any extents are allocated.
- */
- if ((ip->i_d.di_nextents || ip->i_delayed_blks) &&
- (XFS_IS_REALTIME_INODE(ip)) !=
- (fa->fsx_xflags & XFS_XFLAG_REALTIME)) {
- code = -EINVAL; /* EFBIG? */
- goto error_return;
- }
-
- /*
- * If realtime flag is set then must have realtime data.
- */
- if ((fa->fsx_xflags & XFS_XFLAG_REALTIME)) {
- if ((mp->m_sb.sb_rblocks == 0) ||
- (mp->m_sb.sb_rextsize == 0) ||
- (ip->i_d.di_extsize % mp->m_sb.sb_rextsize)) {
- code = -EINVAL;
- goto error_return;
- }
- }
-
- /*
- * Can't modify an immutable/append-only file unless
- * we have appropriate permission.
- */
- if ((ip->i_d.di_flags &
- (XFS_DIFLAG_IMMUTABLE|XFS_DIFLAG_APPEND) ||
- (fa->fsx_xflags &
- (XFS_XFLAG_IMMUTABLE | XFS_XFLAG_APPEND))) &&
- !capable(CAP_LINUX_IMMUTABLE)) {
- code = -EPERM;
- goto error_return;
- }
- }
-
- xfs_trans_ijoin(tp, ip, 0);
+ code = xfs_ioctl_setattr_xflags(tp, ip, fa);
+ if (code)
+ goto error_trans_cancel;
/*
- * Change file ownership. Must be the owner or privileged.
+ * Change file ownership. Must be the owner or privileged. CAP_FSETID
+ * overrides the following restrictions:
+ *
+ * The set-user-ID and set-group-ID bits of a file will be cleared upon
+ * successful return from chown()
*/
- if (mask & FSX_PROJID) {
- /*
- * CAP_FSETID overrides the following restrictions:
- *
- * The set-user-ID and set-group-ID bits of a file will be
- * cleared upon successful return from chown()
- */
- if ((ip->i_d.di_mode & (S_ISUID|S_ISGID)) &&
- !capable_wrt_inode_uidgid(VFS_I(ip), CAP_FSETID))
- ip->i_d.di_mode &= ~(S_ISUID|S_ISGID);
-
- /*
- * Change the ownerships and register quota modifications
- * in the transaction.
- */
- if (xfs_get_projid(ip) != fa->fsx_projid) {
- if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp)) {
- olddquot = xfs_qm_vop_chown(tp, ip,
- &ip->i_pdquot, pdqp);
- }
- ASSERT(ip->i_d.di_version > 1);
- xfs_set_projid(ip, fa->fsx_projid);
- }
- }
+ if ((ip->i_d.di_mode & (S_ISUID|S_ISGID)) &&
+ !capable_wrt_inode_uidgid(VFS_I(ip), CAP_FSETID))
+ ip->i_d.di_mode &= ~(S_ISUID|S_ISGID);
- if (mask & FSX_XFLAGS) {
- xfs_set_diflags(ip, fa->fsx_xflags);
- xfs_diflags_to_linux(ip);
+ /* Change the ownerships and register project quota modifications */
+ if (xfs_get_projid(ip) != fa->fsx_projid) {
+ if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp)) {
+ olddquot = xfs_qm_vop_chown(tp, ip,
+ &ip->i_pdquot, pdqp);
+ }
+ ASSERT(ip->i_d.di_version > 1);
+ xfs_set_projid(ip, fa->fsx_projid);
}
/*
@@ -1238,34 +1240,12 @@ xfs_ioctl_setattr(
* extent size hint should be set on the inode. If no extent size flags
* are set on the inode then unconditionally clear the extent size hint.
*/
- if (mask & FSX_EXTSIZE) {
- int extsize = 0;
-
- if (ip->i_d.di_flags &
- (XFS_DIFLAG_EXTSIZE | XFS_DIFLAG_EXTSZINHERIT))
- extsize = fa->fsx_extsize >> mp->m_sb.sb_blocklog;
- ip->i_d.di_extsize = extsize;
- }
-
- xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
- xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-
- XFS_STATS_INC(xs_ig_attrchg);
+ if (ip->i_d.di_flags & (XFS_DIFLAG_EXTSIZE | XFS_DIFLAG_EXTSZINHERIT))
+ ip->i_d.di_extsize = fa->fsx_extsize >> mp->m_sb.sb_blocklog;
+ else
+ ip->i_d.di_extsize = 0;
- /*
- * If this is a synchronous mount, make sure that the
- * transaction goes to disk before returning to the user.
- * This is slightly sub-optimal in that truncates require
- * two sync transactions instead of one for wsync filesystems.
- * One for the truncate and one for the timestamps since we
- * don't want to change the timestamps unless we're sure the
- * truncate worked. Truncates are less than 1% of the laddis
- * mix so this probably isn't worth the trouble to optimize.
- */
- if (mp->m_flags & XFS_MOUNT_WSYNC)
- xfs_trans_set_sync(tp);
code = xfs_trans_commit(tp, 0);
- xfs_iunlock(ip, lock_flags);
/*
* Release any dquot(s) the inode had kept before chown.
@@ -1276,12 +1256,11 @@ xfs_ioctl_setattr(
return code;
- error_return:
+error_trans_cancel:
+ xfs_trans_cancel(tp, 0);
+error_free_dquots:
xfs_qm_dqrele(udqp);
xfs_qm_dqrele(pdqp);
- xfs_trans_cancel(tp, 0);
- if (lock_flags)
- xfs_iunlock(ip, lock_flags);
return code;
}
@@ -1292,20 +1271,15 @@ xfs_ioc_fssetxattr(
void __user *arg)
{
struct fsxattr fa;
- unsigned int mask;
int error;
if (copy_from_user(&fa, arg, sizeof(fa)))
return -EFAULT;
- mask = FSX_XFLAGS | FSX_EXTSIZE | FSX_PROJID;
- if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
- mask |= FSX_NONBLOCK;
-
error = mnt_want_write_file(filp);
if (error)
return error;
- error = xfs_ioctl_setattr(ip, &fa, mask);
+ error = xfs_ioctl_setattr(ip, &fa);
mnt_drop_write_file(filp);
return error;
}
@@ -1325,14 +1299,14 @@ xfs_ioc_getxflags(
STATIC int
xfs_ioc_setxflags(
- xfs_inode_t *ip,
+ struct xfs_inode *ip,
struct file *filp,
void __user *arg)
{
+ struct xfs_trans *tp;
struct fsxattr fa;
unsigned int flags;
- unsigned int mask;
- int error;
+ int error;
if (copy_from_user(&flags, arg, sizeof(flags)))
return -EFAULT;
@@ -1342,15 +1316,26 @@ xfs_ioc_setxflags(
FS_SYNC_FL))
return -EOPNOTSUPP;
- mask = FSX_XFLAGS;
- if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
- mask |= FSX_NONBLOCK;
fa.fsx_xflags = xfs_merge_ioc_xflags(flags, xfs_ip2xflags(ip));
error = mnt_want_write_file(filp);
if (error)
return error;
- error = xfs_ioctl_setattr(ip, &fa, mask);
+
+ tp = xfs_ioctl_setattr_get_trans(ip);
+ if (IS_ERR(tp)) {
+ error = PTR_ERR(tp);
+ goto out_drop_write;
+ }
+
+ error = xfs_ioctl_setattr_xflags(tp, ip, &fa);
+ if (error) {
+ xfs_trans_cancel(tp, 0);
+ goto out_drop_write;
+ }
+
+ error = xfs_trans_commit(tp, 0);
+out_drop_write:
mnt_drop_write_file(filp);
return error;
}
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index ec67728..bfc7c7c 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -423,7 +423,7 @@ xfs_compat_attrmulti_by_handle(
ops = memdup_user(compat_ptr(am_hreq.ops), size);
if (IS_ERR(ops)) {
- error = -PTR_ERR(ops);
+ error = PTR_ERR(ops);
goto out_dput;
}
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index c980e2a..ccb1dd0 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -802,7 +802,7 @@ int
xfs_iomap_write_unwritten(
xfs_inode_t *ip,
xfs_off_t offset,
- size_t count)
+ xfs_off_t count)
{
xfs_mount_t *mp = ip->i_mount;
xfs_fileoff_t offset_fsb;
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
index 411fbb8..8688e66 100644
--- a/fs/xfs/xfs_iomap.h
+++ b/fs/xfs/xfs_iomap.h
@@ -27,6 +27,6 @@ int xfs_iomap_write_delay(struct xfs_inode *, xfs_off_t, size_t,
struct xfs_bmbt_irec *);
int xfs_iomap_write_allocate(struct xfs_inode *, xfs_off_t,
struct xfs_bmbt_irec *);
-int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, size_t);
+int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t);
#endif /* __XFS_IOMAP_H__*/
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index c50311c..ce80eeb 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -380,18 +380,27 @@ xfs_vn_rename(
struct inode *odir,
struct dentry *odentry,
struct inode *ndir,
- struct dentry *ndentry)
+ struct dentry *ndentry,
+ unsigned int flags)
{
struct inode *new_inode = ndentry->d_inode;
+ int omode = 0;
struct xfs_name oname;
struct xfs_name nname;
- xfs_dentry_to_name(&oname, odentry, 0);
+ if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE))
+ return -EINVAL;
+
+ /* if we are exchanging files, we need to set i_mode of both files */
+ if (flags & RENAME_EXCHANGE)
+ omode = ndentry->d_inode->i_mode;
+
+ xfs_dentry_to_name(&oname, odentry, omode);
xfs_dentry_to_name(&nname, ndentry, odentry->d_inode->i_mode);
return xfs_rename(XFS_I(odir), &oname, XFS_I(odentry->d_inode),
- XFS_I(ndir), &nname, new_inode ?
- XFS_I(new_inode) : NULL);
+ XFS_I(ndir), &nname,
+ new_inode ? XFS_I(new_inode) : NULL, flags);
}
/*
@@ -1144,7 +1153,7 @@ static const struct inode_operations xfs_dir_inode_operations = {
*/
.rmdir = xfs_vn_unlink,
.mknod = xfs_vn_mknod,
- .rename = xfs_vn_rename,
+ .rename2 = xfs_vn_rename,
.get_acl = xfs_get_acl,
.set_acl = xfs_set_acl,
.getattr = xfs_vn_getattr,
@@ -1172,7 +1181,7 @@ static const struct inode_operations xfs_dir_ci_inode_operations = {
*/
.rmdir = xfs_vn_unlink,
.mknod = xfs_vn_mknod,
- .rename = xfs_vn_rename,
+ .rename2 = xfs_vn_rename,
.get_acl = xfs_get_acl,
.set_acl = xfs_set_acl,
.getattr = xfs_vn_getattr,
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index e408bf5..bcc7cfa 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -33,6 +33,7 @@
#include "xfs_fsops.h"
#include "xfs_cksum.h"
#include "xfs_sysfs.h"
+#include "xfs_sb.h"
kmem_zone_t *xfs_log_ticket_zone;
@@ -1290,9 +1291,20 @@ xfs_log_worker(
struct xfs_mount *mp = log->l_mp;
/* dgc: errors ignored - not fatal and nowhere to report them */
- if (xfs_log_need_covered(mp))
- xfs_fs_log_dummy(mp);
- else
+ if (xfs_log_need_covered(mp)) {
+ /*
+ * Dump a transaction into the log that contains no real change.
+ * This is needed to stamp the current tail LSN into the log
+ * during the covering operation.
+ *
+ * We cannot use an inode here for this - that will push dirty
+ * state back up into the VFS and then periodic inode flushing
+ * will prevent log covering from making progress. Hence we
+ * synchronously log the superblock instead to ensure the
+ * superblock is immediately unpinned and can be written back.
+ */
+ xfs_sync_sb(mp, true);
+ } else
xfs_log_force(mp, 0);
/* start pushing all the metadata that is currently dirty */
@@ -1395,6 +1407,8 @@ xlog_alloc_log(
ASSERT(xfs_buf_islocked(bp));
xfs_buf_unlock(bp);
+ /* use high priority wq for log I/O completion */
+ bp->b_ioend_wq = mp->m_log_workqueue;
bp->b_iodone = xlog_iodone;
log->l_xbuf = bp;
@@ -1427,6 +1441,8 @@ xlog_alloc_log(
ASSERT(xfs_buf_islocked(bp));
xfs_buf_unlock(bp);
+ /* use high priority wq for log I/O completion */
+ bp->b_ioend_wq = mp->m_log_workqueue;
bp->b_iodone = xlog_iodone;
iclog->ic_bp = bp;
iclog->ic_data = bp->b_addr;
@@ -1806,8 +1822,6 @@ xlog_sync(
XFS_BUF_ZEROFLAGS(bp);
XFS_BUF_ASYNC(bp);
bp->b_flags |= XBF_SYNCIO;
- /* use high priority completion wq */
- bp->b_ioend_wq = log->l_mp->m_log_workqueue;
if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) {
bp->b_flags |= XBF_FUA;
@@ -1856,8 +1870,6 @@ xlog_sync(
bp->b_flags |= XBF_SYNCIO;
if (log->l_mp->m_flags & XFS_MOUNT_BARRIER)
bp->b_flags |= XBF_FUA;
- /* use high priority completion wq */
- bp->b_ioend_wq = log->l_mp->m_log_workqueue;
ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize);
@@ -2027,7 +2039,7 @@ xlog_print_tic_res(
" total reg = %u bytes (o/flow = %u bytes)\n"
" ophdrs = %u (ophdr space = %u bytes)\n"
" ophdr + reg = %u bytes\n"
- " num regions = %u\n",
+ " num regions = %u",
((ticket->t_trans_type <= 0 ||
ticket->t_trans_type > XFS_TRANS_TYPE_MAX) ?
"bad-trans-type" : trans_type_str[ticket->t_trans_type-1]),
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index d3d3883..4fa80e6 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -408,11 +408,11 @@ xfs_update_alignment(xfs_mount_t *mp)
if (xfs_sb_version_hasdalign(sbp)) {
if (sbp->sb_unit != mp->m_dalign) {
sbp->sb_unit = mp->m_dalign;
- mp->m_update_flags |= XFS_SB_UNIT;
+ mp->m_update_sb = true;
}
if (sbp->sb_width != mp->m_swidth) {
sbp->sb_width = mp->m_swidth;
- mp->m_update_flags |= XFS_SB_WIDTH;
+ mp->m_update_sb = true;
}
} else {
xfs_warn(mp,
@@ -583,38 +583,19 @@ int
xfs_mount_reset_sbqflags(
struct xfs_mount *mp)
{
- int error;
- struct xfs_trans *tp;
-
mp->m_qflags = 0;
- /*
- * It is OK to look at sb_qflags here in mount path,
- * without m_sb_lock.
- */
+ /* It is OK to look at sb_qflags in the mount path without m_sb_lock. */
if (mp->m_sb.sb_qflags == 0)
return 0;
spin_lock(&mp->m_sb_lock);
mp->m_sb.sb_qflags = 0;
spin_unlock(&mp->m_sb_lock);
- /*
- * If the fs is readonly, let the incore superblock run
- * with quotas off but don't flush the update out to disk
- */
- if (mp->m_flags & XFS_MOUNT_RDONLY)
+ if (!xfs_fs_writable(mp, SB_FREEZE_WRITE))
return 0;
- tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_sbchange, 0, 0);
- if (error) {
- xfs_trans_cancel(tp, 0);
- xfs_alert(mp, "%s: Superblock update failed!", __func__);
- return error;
- }
-
- xfs_mod_sb(tp, XFS_SB_QFLAGS);
- return xfs_trans_commit(tp, 0);
+ return xfs_sync_sb(mp, false);
}
__uint64_t
@@ -659,26 +640,25 @@ xfs_mountfs(
xfs_sb_mount_common(mp, sbp);
/*
- * Check for a mismatched features2 values. Older kernels
- * read & wrote into the wrong sb offset for sb_features2
- * on some platforms due to xfs_sb_t not being 64bit size aligned
- * when sb_features2 was added, which made older superblock
- * reading/writing routines swap it as a 64-bit value.
+ * Check for a mismatched features2 values. Older kernels read & wrote
+ * into the wrong sb offset for sb_features2 on some platforms due to
+ * xfs_sb_t not being 64bit size aligned when sb_features2 was added,
+ * which made older superblock reading/writing routines swap it as a
+ * 64-bit value.
*
* For backwards compatibility, we make both slots equal.
*
- * If we detect a mismatched field, we OR the set bits into the
- * existing features2 field in case it has already been modified; we
- * don't want to lose any features. We then update the bad location
- * with the ORed value so that older kernels will see any features2
- * flags, and mark the two fields as needing updates once the
- * transaction subsystem is online.
+ * If we detect a mismatched field, we OR the set bits into the existing
+ * features2 field in case it has already been modified; we don't want
+ * to lose any features. We then update the bad location with the ORed
+ * value so that older kernels will see any features2 flags. The
+ * superblock writeback code ensures the new sb_features2 is copied to
+ * sb_bad_features2 before it is logged or written to disk.
*/
if (xfs_sb_has_mismatched_features2(sbp)) {
xfs_warn(mp, "correcting sb_features alignment problem");
sbp->sb_features2 |= sbp->sb_bad_features2;
- sbp->sb_bad_features2 = sbp->sb_features2;
- mp->m_update_flags |= XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2;
+ mp->m_update_sb = true;
/*
* Re-check for ATTR2 in case it was found in bad_features2
@@ -692,17 +672,17 @@ xfs_mountfs(
if (xfs_sb_version_hasattr2(&mp->m_sb) &&
(mp->m_flags & XFS_MOUNT_NOATTR2)) {
xfs_sb_version_removeattr2(&mp->m_sb);
- mp->m_update_flags |= XFS_SB_FEATURES2;
+ mp->m_update_sb = true;
/* update sb_versionnum for the clearing of the morebits */
if (!sbp->sb_features2)
- mp->m_update_flags |= XFS_SB_VERSIONNUM;
+ mp->m_update_sb = true;
}
/* always use v2 inodes by default now */
if (!(mp->m_sb.sb_versionnum & XFS_SB_VERSION_NLINKBIT)) {
mp->m_sb.sb_versionnum |= XFS_SB_VERSION_NLINKBIT;
- mp->m_update_flags |= XFS_SB_VERSIONNUM;
+ mp->m_update_sb = true;
}
/*
@@ -895,8 +875,8 @@ xfs_mountfs(
* the next remount into writeable mode. Otherwise we would never
* perform the update e.g. for the root filesystem.
*/
- if (mp->m_update_flags && !(mp->m_flags & XFS_MOUNT_RDONLY)) {
- error = xfs_mount_log_sb(mp, mp->m_update_flags);
+ if (mp->m_update_sb && !(mp->m_flags & XFS_MOUNT_RDONLY)) {
+ error = xfs_sync_sb(mp, false);
if (error) {
xfs_warn(mp, "failed to write sb changes");
goto out_rtunmount;
@@ -1103,9 +1083,6 @@ xfs_fs_writable(
int
xfs_log_sbcount(xfs_mount_t *mp)
{
- xfs_trans_t *tp;
- int error;
-
/* allow this to proceed during the freeze sequence... */
if (!xfs_fs_writable(mp, SB_FREEZE_COMPLETE))
return 0;
@@ -1119,17 +1096,7 @@ xfs_log_sbcount(xfs_mount_t *mp)
if (!xfs_sb_version_haslazysbcount(&mp->m_sb))
return 0;
- tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_COUNT, KM_SLEEP);
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0);
- if (error) {
- xfs_trans_cancel(tp, 0);
- return error;
- }
-
- xfs_mod_sb(tp, XFS_SB_IFREE | XFS_SB_ICOUNT | XFS_SB_FDBLOCKS);
- xfs_trans_set_sync(tp);
- error = xfs_trans_commit(tp, 0);
- return error;
+ return xfs_sync_sb(mp, true);
}
/*
@@ -1423,34 +1390,6 @@ xfs_freesb(
}
/*
- * Used to log changes to the superblock unit and width fields which could
- * be altered by the mount options, as well as any potential sb_features2
- * fixup. Only the first superblock is updated.
- */
-int
-xfs_mount_log_sb(
- xfs_mount_t *mp,
- __int64_t fields)
-{
- xfs_trans_t *tp;
- int error;
-
- ASSERT(fields & (XFS_SB_UNIT | XFS_SB_WIDTH | XFS_SB_UUID |
- XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2 |
- XFS_SB_VERSIONNUM));
-
- tp = xfs_trans_alloc(mp, XFS_TRANS_SB_UNIT);
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0);
- if (error) {
- xfs_trans_cancel(tp, 0);
- return error;
- }
- xfs_mod_sb(tp, fields);
- error = xfs_trans_commit(tp, 0);
- return error;
-}
-
-/*
* If the underlying (data/log/rt) device is readonly, there are some
* operations that cannot proceed.
*/
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 22ccf69..a5b2ff8 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -162,8 +162,7 @@ typedef struct xfs_mount {
struct delayed_work m_reclaim_work; /* background inode reclaim */
struct delayed_work m_eofblocks_work; /* background eof blocks
trimming */
- __int64_t m_update_flags; /* sb flags we need to update
- on the next remount,rw */
+ bool m_update_sb; /* sb needs update in mount */
int64_t m_low_space[XFS_LOWSP_MAX];
/* low free space thresholds */
struct xfs_kobj m_kobj;
@@ -378,7 +377,7 @@ extern void xfs_unmountfs(xfs_mount_t *);
extern int xfs_mod_incore_sb(xfs_mount_t *, xfs_sb_field_t, int64_t, int);
extern int xfs_mod_incore_sb_batch(xfs_mount_t *, xfs_mod_sb_t *,
uint, int);
-extern int xfs_mount_log_sb(xfs_mount_t *, __int64_t);
+extern int xfs_mount_log_sb(xfs_mount_t *);
extern struct xfs_buf *xfs_getsb(xfs_mount_t *, int);
extern int xfs_readsb(xfs_mount_t *, int);
extern void xfs_freesb(xfs_mount_t *);
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 79fb19d..3e81862 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -714,7 +714,6 @@ STATIC int
xfs_qm_qino_alloc(
xfs_mount_t *mp,
xfs_inode_t **ip,
- __int64_t sbfields,
uint flags)
{
xfs_trans_t *tp;
@@ -777,11 +776,6 @@ xfs_qm_qino_alloc(
spin_lock(&mp->m_sb_lock);
if (flags & XFS_QMOPT_SBVERSION) {
ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
- ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
- XFS_SB_GQUOTINO | XFS_SB_PQUOTINO | XFS_SB_QFLAGS)) ==
- (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
- XFS_SB_GQUOTINO | XFS_SB_PQUOTINO |
- XFS_SB_QFLAGS));
xfs_sb_version_addquota(&mp->m_sb);
mp->m_sb.sb_uquotino = NULLFSINO;
@@ -798,7 +792,7 @@ xfs_qm_qino_alloc(
else
mp->m_sb.sb_pquotino = (*ip)->i_ino;
spin_unlock(&mp->m_sb_lock);
- xfs_mod_sb(tp, sbfields);
+ xfs_log_sb(tp);
if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) {
xfs_alert(mp, "%s failed (error %d)!", __func__, error);
@@ -1451,7 +1445,7 @@ xfs_qm_mount_quotas(
spin_unlock(&mp->m_sb_lock);
if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
- if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) {
+ if (xfs_sync_sb(mp, false)) {
/*
* We could only have been turning quotas off.
* We aren't in very good shape actually because
@@ -1482,7 +1476,6 @@ xfs_qm_init_quotainos(
struct xfs_inode *gip = NULL;
struct xfs_inode *pip = NULL;
int error;
- __int64_t sbflags = 0;
uint flags = 0;
ASSERT(mp->m_quotainfo);
@@ -1517,9 +1510,6 @@ xfs_qm_init_quotainos(
}
} else {
flags |= XFS_QMOPT_SBVERSION;
- sbflags |= (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
- XFS_SB_GQUOTINO | XFS_SB_PQUOTINO |
- XFS_SB_QFLAGS);
}
/*
@@ -1530,7 +1520,6 @@ xfs_qm_init_quotainos(
*/
if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
error = xfs_qm_qino_alloc(mp, &uip,
- sbflags | XFS_SB_UQUOTINO,
flags | XFS_QMOPT_UQUOTA);
if (error)
goto error_rele;
@@ -1539,7 +1528,6 @@ xfs_qm_init_quotainos(
}
if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
error = xfs_qm_qino_alloc(mp, &gip,
- sbflags | XFS_SB_GQUOTINO,
flags | XFS_QMOPT_GQUOTA);
if (error)
goto error_rele;
@@ -1548,7 +1536,6 @@ xfs_qm_init_quotainos(
}
if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
error = xfs_qm_qino_alloc(mp, &pip,
- sbflags | XFS_SB_PQUOTINO,
flags | XFS_QMOPT_PQUOTA);
if (error)
goto error_rele;
@@ -1587,32 +1574,6 @@ xfs_qm_dqfree_one(
xfs_qm_dqdestroy(dqp);
}
-/*
- * Start a transaction and write the incore superblock changes to
- * disk. flags parameter indicates which fields have changed.
- */
-int
-xfs_qm_write_sb_changes(
- xfs_mount_t *mp,
- __int64_t flags)
-{
- xfs_trans_t *tp;
- int error;
-
- tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_sbchange, 0, 0);
- if (error) {
- xfs_trans_cancel(tp, 0);
- return error;
- }
-
- xfs_mod_sb(tp, flags);
- error = xfs_trans_commit(tp, 0);
-
- return error;
-}
-
-
/* --------------- utility functions for vnodeops ---------------- */
diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h
index 3a07a93..0d4d359 100644
--- a/fs/xfs/xfs_qm.h
+++ b/fs/xfs/xfs_qm.h
@@ -157,7 +157,6 @@ struct xfs_dquot_acct {
#define XFS_QM_RTBWARNLIMIT 5
extern void xfs_qm_destroy_quotainfo(struct xfs_mount *);
-extern int xfs_qm_write_sb_changes(struct xfs_mount *, __int64_t);
/* dquot stuff */
extern void xfs_qm_dqpurge_all(struct xfs_mount *, uint);
@@ -166,9 +165,9 @@ extern void xfs_qm_dqrele_all_inodes(struct xfs_mount *, uint);
/* quota ops */
extern int xfs_qm_scall_trunc_qfiles(struct xfs_mount *, uint);
extern int xfs_qm_scall_getquota(struct xfs_mount *, xfs_dqid_t,
- uint, struct fs_disk_quota *);
+ uint, struct qc_dqblk *);
extern int xfs_qm_scall_setqlim(struct xfs_mount *, xfs_dqid_t, uint,
- struct fs_disk_quota *);
+ struct qc_dqblk *);
extern int xfs_qm_scall_getqstat(struct xfs_mount *,
struct fs_quota_stat *);
extern int xfs_qm_scall_getqstatv(struct xfs_mount *,
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c
index 74fca68..9b965db4 100644
--- a/fs/xfs/xfs_qm_syscalls.c
+++ b/fs/xfs/xfs_qm_syscalls.c
@@ -39,7 +39,6 @@ STATIC int xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint);
STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *,
uint);
STATIC uint xfs_qm_export_flags(uint);
-STATIC uint xfs_qm_export_qtype_flags(uint);
/*
* Turn off quota accounting and/or enforcement for all udquots and/or
@@ -92,8 +91,7 @@ xfs_qm_scall_quotaoff(
mutex_unlock(&q->qi_quotaofflock);
/* XXX what to do if error ? Revert back to old vals incore ? */
- error = xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS);
- return error;
+ return xfs_sync_sb(mp, false);
}
dqtype = 0;
@@ -314,7 +312,6 @@ xfs_qm_scall_quotaon(
{
int error;
uint qf;
- __int64_t sbflags;
flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
/*
@@ -322,30 +319,22 @@ xfs_qm_scall_quotaon(
*/
flags &= ~(XFS_ALL_QUOTA_ACCT);
- sbflags = 0;
-
if (flags == 0) {
xfs_debug(mp, "%s: zero flags, m_qflags=%x",
__func__, mp->m_qflags);
return -EINVAL;
}
- /* No fs can turn on quotas with a delayed effect */
- ASSERT((flags & XFS_ALL_QUOTA_ACCT) == 0);
-
/*
* Can't enforce without accounting. We check the superblock
* qflags here instead of m_qflags because rootfs can have
* quota acct on ondisk without m_qflags' knowing.
*/
- if (((flags & XFS_UQUOTA_ACCT) == 0 &&
- (mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 &&
+ if (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 &&
(flags & XFS_UQUOTA_ENFD)) ||
- ((flags & XFS_GQUOTA_ACCT) == 0 &&
- (mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 &&
+ ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 &&
(flags & XFS_GQUOTA_ENFD)) ||
- ((flags & XFS_PQUOTA_ACCT) == 0 &&
- (mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 &&
+ ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 &&
(flags & XFS_PQUOTA_ENFD))) {
xfs_debug(mp,
"%s: Can't enforce without acct, flags=%x sbflags=%x",
@@ -370,11 +359,11 @@ xfs_qm_scall_quotaon(
/*
* There's nothing to change if it's the same.
*/
- if ((qf & flags) == flags && sbflags == 0)
+ if ((qf & flags) == flags)
return -EEXIST;
- sbflags |= XFS_SB_QFLAGS;
- if ((error = xfs_qm_write_sb_changes(mp, sbflags)))
+ error = xfs_sync_sb(mp, false);
+ if (error)
return error;
/*
* If we aren't trying to switch on quota enforcement, we are done.
@@ -384,8 +373,7 @@ xfs_qm_scall_quotaon(
((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) !=
(mp->m_qflags & XFS_PQUOTA_ACCT)) ||
((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) !=
- (mp->m_qflags & XFS_GQUOTA_ACCT)) ||
- (flags & XFS_ALL_QUOTA_ENFD) == 0)
+ (mp->m_qflags & XFS_GQUOTA_ACCT)))
return 0;
if (! XFS_IS_QUOTA_RUNNING(mp))
@@ -422,20 +410,12 @@ xfs_qm_scall_getqstat(
memset(out, 0, sizeof(fs_quota_stat_t));
out->qs_version = FS_QSTAT_VERSION;
- if (!xfs_sb_version_hasquota(&mp->m_sb)) {
- out->qs_uquota.qfs_ino = NULLFSINO;
- out->qs_gquota.qfs_ino = NULLFSINO;
- return 0;
- }
-
out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags &
(XFS_ALL_QUOTA_ACCT|
XFS_ALL_QUOTA_ENFD));
- if (q) {
- uip = q->qi_uquotaip;
- gip = q->qi_gquotaip;
- pip = q->qi_pquotaip;
- }
+ uip = q->qi_uquotaip;
+ gip = q->qi_gquotaip;
+ pip = q->qi_pquotaip;
if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) {
if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
0, 0, &uip) == 0)
@@ -481,14 +461,13 @@ xfs_qm_scall_getqstat(
if (temppqip)
IRELE(pip);
}
- if (q) {
- out->qs_incoredqs = q->qi_dquots;
- out->qs_btimelimit = q->qi_btimelimit;
- out->qs_itimelimit = q->qi_itimelimit;
- out->qs_rtbtimelimit = q->qi_rtbtimelimit;
- out->qs_bwarnlimit = q->qi_bwarnlimit;
- out->qs_iwarnlimit = q->qi_iwarnlimit;
- }
+ out->qs_incoredqs = q->qi_dquots;
+ out->qs_btimelimit = q->qi_btimelimit;
+ out->qs_itimelimit = q->qi_itimelimit;
+ out->qs_rtbtimelimit = q->qi_rtbtimelimit;
+ out->qs_bwarnlimit = q->qi_bwarnlimit;
+ out->qs_iwarnlimit = q->qi_iwarnlimit;
+
return 0;
}
@@ -509,13 +488,6 @@ xfs_qm_scall_getqstatv(
bool tempgqip = false;
bool temppqip = false;
- if (!xfs_sb_version_hasquota(&mp->m_sb)) {
- out->qs_uquota.qfs_ino = NULLFSINO;
- out->qs_gquota.qfs_ino = NULLFSINO;
- out->qs_pquota.qfs_ino = NULLFSINO;
- return 0;
- }
-
out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags &
(XFS_ALL_QUOTA_ACCT|
XFS_ALL_QUOTA_ENFD));
@@ -523,11 +495,9 @@ xfs_qm_scall_getqstatv(
out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino;
out->qs_pquota.qfs_ino = mp->m_sb.sb_pquotino;
- if (q) {
- uip = q->qi_uquotaip;
- gip = q->qi_gquotaip;
- pip = q->qi_pquotaip;
- }
+ uip = q->qi_uquotaip;
+ gip = q->qi_gquotaip;
+ pip = q->qi_pquotaip;
if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) {
if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
0, 0, &uip) == 0)
@@ -562,19 +532,18 @@ xfs_qm_scall_getqstatv(
if (temppqip)
IRELE(pip);
}
- if (q) {
- out->qs_incoredqs = q->qi_dquots;
- out->qs_btimelimit = q->qi_btimelimit;
- out->qs_itimelimit = q->qi_itimelimit;
- out->qs_rtbtimelimit = q->qi_rtbtimelimit;
- out->qs_bwarnlimit = q->qi_bwarnlimit;
- out->qs_iwarnlimit = q->qi_iwarnlimit;
- }
+ out->qs_incoredqs = q->qi_dquots;
+ out->qs_btimelimit = q->qi_btimelimit;
+ out->qs_itimelimit = q->qi_itimelimit;
+ out->qs_rtbtimelimit = q->qi_rtbtimelimit;
+ out->qs_bwarnlimit = q->qi_bwarnlimit;
+ out->qs_iwarnlimit = q->qi_iwarnlimit;
+
return 0;
}
-#define XFS_DQ_MASK \
- (FS_DQ_LIMIT_MASK | FS_DQ_TIMER_MASK | FS_DQ_WARNS_MASK)
+#define XFS_QC_MASK \
+ (QC_LIMIT_MASK | QC_TIMER_MASK | QC_WARNS_MASK)
/*
* Adjust quota limits, and start/stop timers accordingly.
@@ -584,7 +553,7 @@ xfs_qm_scall_setqlim(
struct xfs_mount *mp,
xfs_dqid_t id,
uint type,
- fs_disk_quota_t *newlim)
+ struct qc_dqblk *newlim)
{
struct xfs_quotainfo *q = mp->m_quotainfo;
struct xfs_disk_dquot *ddq;
@@ -593,9 +562,9 @@ xfs_qm_scall_setqlim(
int error;
xfs_qcnt_t hard, soft;
- if (newlim->d_fieldmask & ~XFS_DQ_MASK)
+ if (newlim->d_fieldmask & ~XFS_QC_MASK)
return -EINVAL;
- if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0)
+ if ((newlim->d_fieldmask & XFS_QC_MASK) == 0)
return 0;
/*
@@ -633,11 +602,11 @@ xfs_qm_scall_setqlim(
/*
* Make sure that hardlimits are >= soft limits before changing.
*/
- hard = (newlim->d_fieldmask & FS_DQ_BHARD) ?
- (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) :
+ hard = (newlim->d_fieldmask & QC_SPC_HARD) ?
+ (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_hardlimit) :
be64_to_cpu(ddq->d_blk_hardlimit);
- soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ?
- (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) :
+ soft = (newlim->d_fieldmask & QC_SPC_SOFT) ?
+ (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_softlimit) :
be64_to_cpu(ddq->d_blk_softlimit);
if (hard == 0 || hard >= soft) {
ddq->d_blk_hardlimit = cpu_to_be64(hard);
@@ -650,11 +619,11 @@ xfs_qm_scall_setqlim(
} else {
xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft);
}
- hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ?
- (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) :
+ hard = (newlim->d_fieldmask & QC_RT_SPC_HARD) ?
+ (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_hardlimit) :
be64_to_cpu(ddq->d_rtb_hardlimit);
- soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ?
- (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) :
+ soft = (newlim->d_fieldmask & QC_RT_SPC_SOFT) ?
+ (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_softlimit) :
be64_to_cpu(ddq->d_rtb_softlimit);
if (hard == 0 || hard >= soft) {
ddq->d_rtb_hardlimit = cpu_to_be64(hard);
@@ -667,10 +636,10 @@ xfs_qm_scall_setqlim(
xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft);
}
- hard = (newlim->d_fieldmask & FS_DQ_IHARD) ?
+ hard = (newlim->d_fieldmask & QC_INO_HARD) ?
(xfs_qcnt_t) newlim->d_ino_hardlimit :
be64_to_cpu(ddq->d_ino_hardlimit);
- soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ?
+ soft = (newlim->d_fieldmask & QC_INO_SOFT) ?
(xfs_qcnt_t) newlim->d_ino_softlimit :
be64_to_cpu(ddq->d_ino_softlimit);
if (hard == 0 || hard >= soft) {
@@ -687,12 +656,12 @@ xfs_qm_scall_setqlim(
/*
* Update warnings counter(s) if requested
*/
- if (newlim->d_fieldmask & FS_DQ_BWARNS)
- ddq->d_bwarns = cpu_to_be16(newlim->d_bwarns);
- if (newlim->d_fieldmask & FS_DQ_IWARNS)
- ddq->d_iwarns = cpu_to_be16(newlim->d_iwarns);
- if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
- ddq->d_rtbwarns = cpu_to_be16(newlim->d_rtbwarns);
+ if (newlim->d_fieldmask & QC_SPC_WARNS)
+ ddq->d_bwarns = cpu_to_be16(newlim->d_spc_warns);
+ if (newlim->d_fieldmask & QC_INO_WARNS)
+ ddq->d_iwarns = cpu_to_be16(newlim->d_ino_warns);
+ if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
+ ddq->d_rtbwarns = cpu_to_be16(newlim->d_rt_spc_warns);
if (id == 0) {
/*
@@ -702,24 +671,24 @@ xfs_qm_scall_setqlim(
* soft and hard limit values (already done, above), and
* for warnings.
*/
- if (newlim->d_fieldmask & FS_DQ_BTIMER) {
- q->qi_btimelimit = newlim->d_btimer;
- ddq->d_btimer = cpu_to_be32(newlim->d_btimer);
+ if (newlim->d_fieldmask & QC_SPC_TIMER) {
+ q->qi_btimelimit = newlim->d_spc_timer;
+ ddq->d_btimer = cpu_to_be32(newlim->d_spc_timer);
}
- if (newlim->d_fieldmask & FS_DQ_ITIMER) {
- q->qi_itimelimit = newlim->d_itimer;
- ddq->d_itimer = cpu_to_be32(newlim->d_itimer);
+ if (newlim->d_fieldmask & QC_INO_TIMER) {
+ q->qi_itimelimit = newlim->d_ino_timer;
+ ddq->d_itimer = cpu_to_be32(newlim->d_ino_timer);
}
- if (newlim->d_fieldmask & FS_DQ_RTBTIMER) {
- q->qi_rtbtimelimit = newlim->d_rtbtimer;
- ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer);
+ if (newlim->d_fieldmask & QC_RT_SPC_TIMER) {
+ q->qi_rtbtimelimit = newlim->d_rt_spc_timer;
+ ddq->d_rtbtimer = cpu_to_be32(newlim->d_rt_spc_timer);
}
- if (newlim->d_fieldmask & FS_DQ_BWARNS)
- q->qi_bwarnlimit = newlim->d_bwarns;
- if (newlim->d_fieldmask & FS_DQ_IWARNS)
- q->qi_iwarnlimit = newlim->d_iwarns;
- if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
- q->qi_rtbwarnlimit = newlim->d_rtbwarns;
+ if (newlim->d_fieldmask & QC_SPC_WARNS)
+ q->qi_bwarnlimit = newlim->d_spc_warns;
+ if (newlim->d_fieldmask & QC_INO_WARNS)
+ q->qi_iwarnlimit = newlim->d_ino_warns;
+ if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
+ q->qi_rtbwarnlimit = newlim->d_rt_spc_warns;
} else {
/*
* If the user is now over quota, start the timelimit.
@@ -801,7 +770,7 @@ xfs_qm_log_quotaoff(
mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL;
spin_unlock(&mp->m_sb_lock);
- xfs_mod_sb(tp, XFS_SB_QFLAGS);
+ xfs_log_sb(tp);
/*
* We have to make sure that the transaction is secure on disk before we
@@ -824,7 +793,7 @@ xfs_qm_scall_getquota(
struct xfs_mount *mp,
xfs_dqid_t id,
uint type,
- struct fs_disk_quota *dst)
+ struct qc_dqblk *dst)
{
struct xfs_dquot *dqp;
int error;
@@ -848,28 +817,25 @@ xfs_qm_scall_getquota(
}
memset(dst, 0, sizeof(*dst));
- dst->d_version = FS_DQUOT_VERSION;
- dst->d_flags = xfs_qm_export_qtype_flags(dqp->q_core.d_flags);
- dst->d_id = be32_to_cpu(dqp->q_core.d_id);
- dst->d_blk_hardlimit =
- XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit));
- dst->d_blk_softlimit =
- XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit));
+ dst->d_spc_hardlimit =
+ XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit));
+ dst->d_spc_softlimit =
+ XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit));
dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
- dst->d_bcount = XFS_FSB_TO_BB(mp, dqp->q_res_bcount);
- dst->d_icount = dqp->q_res_icount;
- dst->d_btimer = be32_to_cpu(dqp->q_core.d_btimer);
- dst->d_itimer = be32_to_cpu(dqp->q_core.d_itimer);
- dst->d_iwarns = be16_to_cpu(dqp->q_core.d_iwarns);
- dst->d_bwarns = be16_to_cpu(dqp->q_core.d_bwarns);
- dst->d_rtb_hardlimit =
- XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit));
- dst->d_rtb_softlimit =
- XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit));
- dst->d_rtbcount = XFS_FSB_TO_BB(mp, dqp->q_res_rtbcount);
- dst->d_rtbtimer = be32_to_cpu(dqp->q_core.d_rtbtimer);
- dst->d_rtbwarns = be16_to_cpu(dqp->q_core.d_rtbwarns);
+ dst->d_space = XFS_FSB_TO_B(mp, dqp->q_res_bcount);
+ dst->d_ino_count = dqp->q_res_icount;
+ dst->d_spc_timer = be32_to_cpu(dqp->q_core.d_btimer);
+ dst->d_ino_timer = be32_to_cpu(dqp->q_core.d_itimer);
+ dst->d_ino_warns = be16_to_cpu(dqp->q_core.d_iwarns);
+ dst->d_spc_warns = be16_to_cpu(dqp->q_core.d_bwarns);
+ dst->d_rt_spc_hardlimit =
+ XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit));
+ dst->d_rt_spc_softlimit =
+ XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit));
+ dst->d_rt_space = XFS_FSB_TO_B(mp, dqp->q_res_rtbcount);
+ dst->d_rt_spc_timer = be32_to_cpu(dqp->q_core.d_rtbtimer);
+ dst->d_rt_spc_warns = be16_to_cpu(dqp->q_core.d_rtbwarns);
/*
* Internally, we don't reset all the timers when quota enforcement
@@ -882,23 +848,23 @@ xfs_qm_scall_getquota(
dqp->q_core.d_flags == XFS_DQ_GROUP) ||
(!XFS_IS_PQUOTA_ENFORCED(mp) &&
dqp->q_core.d_flags == XFS_DQ_PROJ)) {
- dst->d_btimer = 0;
- dst->d_itimer = 0;
- dst->d_rtbtimer = 0;
+ dst->d_spc_timer = 0;
+ dst->d_ino_timer = 0;
+ dst->d_rt_spc_timer = 0;
}
#ifdef DEBUG
- if (((XFS_IS_UQUOTA_ENFORCED(mp) && dst->d_flags == FS_USER_QUOTA) ||
- (XFS_IS_GQUOTA_ENFORCED(mp) && dst->d_flags == FS_GROUP_QUOTA) ||
- (XFS_IS_PQUOTA_ENFORCED(mp) && dst->d_flags == FS_PROJ_QUOTA)) &&
- dst->d_id != 0) {
- if ((dst->d_bcount > dst->d_blk_softlimit) &&
- (dst->d_blk_softlimit > 0)) {
- ASSERT(dst->d_btimer != 0);
+ if (((XFS_IS_UQUOTA_ENFORCED(mp) && type == XFS_DQ_USER) ||
+ (XFS_IS_GQUOTA_ENFORCED(mp) && type == XFS_DQ_GROUP) ||
+ (XFS_IS_PQUOTA_ENFORCED(mp) && type == XFS_DQ_PROJ)) &&
+ id != 0) {
+ if ((dst->d_space > dst->d_spc_softlimit) &&
+ (dst->d_spc_softlimit > 0)) {
+ ASSERT(dst->d_spc_timer != 0);
}
- if ((dst->d_icount > dst->d_ino_softlimit) &&
+ if ((dst->d_ino_count > dst->d_ino_softlimit) &&
(dst->d_ino_softlimit > 0)) {
- ASSERT(dst->d_itimer != 0);
+ ASSERT(dst->d_ino_timer != 0);
}
}
#endif
@@ -908,26 +874,6 @@ out_put:
}
STATIC uint
-xfs_qm_export_qtype_flags(
- uint flags)
-{
- /*
- * Can't be more than one, or none.
- */
- ASSERT((flags & (FS_PROJ_QUOTA | FS_USER_QUOTA)) !=
- (FS_PROJ_QUOTA | FS_USER_QUOTA));
- ASSERT((flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)) !=
- (FS_PROJ_QUOTA | FS_GROUP_QUOTA));
- ASSERT((flags & (FS_USER_QUOTA | FS_GROUP_QUOTA)) !=
- (FS_USER_QUOTA | FS_GROUP_QUOTA));
- ASSERT((flags & (FS_PROJ_QUOTA|FS_USER_QUOTA|FS_GROUP_QUOTA)) != 0);
-
- return (flags & XFS_DQ_USER) ?
- FS_USER_QUOTA : (flags & XFS_DQ_PROJ) ?
- FS_PROJ_QUOTA : FS_GROUP_QUOTA;
-}
-
-STATIC uint
xfs_qm_export_flags(
uint flags)
{
diff --git a/fs/xfs/xfs_quotaops.c b/fs/xfs/xfs_quotaops.c
index 7542bbe..6923905 100644
--- a/fs/xfs/xfs_quotaops.c
+++ b/fs/xfs/xfs_quotaops.c
@@ -64,19 +64,10 @@ xfs_fs_get_xstatev(
return xfs_qm_scall_getqstatv(mp, fqs);
}
-STATIC int
-xfs_fs_set_xstate(
- struct super_block *sb,
- unsigned int uflags,
- int op)
+static unsigned int
+xfs_quota_flags(unsigned int uflags)
{
- struct xfs_mount *mp = XFS_M(sb);
- unsigned int flags = 0;
-
- if (sb->s_flags & MS_RDONLY)
- return -EROFS;
- if (op != Q_XQUOTARM && !XFS_IS_QUOTA_RUNNING(mp))
- return -ENOSYS;
+ unsigned int flags = 0;
if (uflags & FS_QUOTA_UDQ_ACCT)
flags |= XFS_UQUOTA_ACCT;
@@ -91,16 +82,39 @@ xfs_fs_set_xstate(
if (uflags & FS_QUOTA_PDQ_ENFD)
flags |= XFS_PQUOTA_ENFD;
- switch (op) {
- case Q_XQUOTAON:
- return xfs_qm_scall_quotaon(mp, flags);
- case Q_XQUOTAOFF:
- if (!XFS_IS_QUOTA_ON(mp))
- return -EINVAL;
- return xfs_qm_scall_quotaoff(mp, flags);
- }
+ return flags;
+}
+
+STATIC int
+xfs_quota_enable(
+ struct super_block *sb,
+ unsigned int uflags)
+{
+ struct xfs_mount *mp = XFS_M(sb);
+
+ if (sb->s_flags & MS_RDONLY)
+ return -EROFS;
+ if (!XFS_IS_QUOTA_RUNNING(mp))
+ return -ENOSYS;
+
+ return xfs_qm_scall_quotaon(mp, xfs_quota_flags(uflags));
+}
+
+STATIC int
+xfs_quota_disable(
+ struct super_block *sb,
+ unsigned int uflags)
+{
+ struct xfs_mount *mp = XFS_M(sb);
+
+ if (sb->s_flags & MS_RDONLY)
+ return -EROFS;
+ if (!XFS_IS_QUOTA_RUNNING(mp))
+ return -ENOSYS;
+ if (!XFS_IS_QUOTA_ON(mp))
+ return -EINVAL;
- return -EINVAL;
+ return xfs_qm_scall_quotaoff(mp, xfs_quota_flags(uflags));
}
STATIC int
@@ -131,7 +145,7 @@ STATIC int
xfs_fs_get_dqblk(
struct super_block *sb,
struct kqid qid,
- struct fs_disk_quota *fdq)
+ struct qc_dqblk *qdq)
{
struct xfs_mount *mp = XFS_M(sb);
@@ -141,14 +155,14 @@ xfs_fs_get_dqblk(
return -ESRCH;
return xfs_qm_scall_getquota(mp, from_kqid(&init_user_ns, qid),
- xfs_quota_type(qid.type), fdq);
+ xfs_quota_type(qid.type), qdq);
}
STATIC int
xfs_fs_set_dqblk(
struct super_block *sb,
struct kqid qid,
- struct fs_disk_quota *fdq)
+ struct qc_dqblk *qdq)
{
struct xfs_mount *mp = XFS_M(sb);
@@ -160,13 +174,14 @@ xfs_fs_set_dqblk(
return -ESRCH;
return xfs_qm_scall_setqlim(mp, from_kqid(&init_user_ns, qid),
- xfs_quota_type(qid.type), fdq);
+ xfs_quota_type(qid.type), qdq);
}
const struct quotactl_ops xfs_quotactl_operations = {
.get_xstatev = xfs_fs_get_xstatev,
.get_xstate = xfs_fs_get_xstate,
- .set_xstate = xfs_fs_set_xstate,
+ .quota_enable = xfs_quota_enable,
+ .quota_disable = xfs_quota_disable,
.rm_xquota = xfs_fs_rm_xquota,
.get_dqblk = xfs_fs_get_dqblk,
.set_dqblk = xfs_fs_set_dqblk,
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 19cbda1..f2449fd 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -685,7 +685,7 @@ xfs_blkdev_get(
mp);
if (IS_ERR(*bdevp)) {
error = PTR_ERR(*bdevp);
- xfs_warn(mp, "Invalid device [%s], error=%d\n", name, error);
+ xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
}
return error;
@@ -1111,6 +1111,11 @@ xfs_fs_statfs(
statp->f_files,
mp->m_maxicount);
+ /* If sb_icount overshot maxicount, report actual allocation */
+ statp->f_files = max_t(typeof(statp->f_files),
+ statp->f_files,
+ sbp->sb_icount);
+
/* make sure statp->f_ffree does not underflow */
ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree);
statp->f_ffree = max_t(__int64_t, ffree, 0);
@@ -1257,13 +1262,13 @@ xfs_fs_remount(
* If this is the first remount to writeable state we
* might have some superblock changes to update.
*/
- if (mp->m_update_flags) {
- error = xfs_mount_log_sb(mp, mp->m_update_flags);
+ if (mp->m_update_sb) {
+ error = xfs_sync_sb(mp, false);
if (error) {
xfs_warn(mp, "failed to write sb changes");
return error;
}
- mp->m_update_flags = 0;
+ mp->m_update_sb = false;
}
/*
@@ -1293,8 +1298,9 @@ xfs_fs_remount(
/*
* Second stage of a freeze. The data is already frozen so we only
- * need to take care of the metadata. Once that's done write a dummy
- * record to dirty the log in case of a crash while frozen.
+ * need to take care of the metadata. Once that's done sync the superblock
+ * to the log to dirty it in case of a crash while frozen. This ensures that we
+ * will recover the unlinked inode lists on the next mount.
*/
STATIC int
xfs_fs_freeze(
@@ -1304,7 +1310,7 @@ xfs_fs_freeze(
xfs_save_resvblks(mp);
xfs_quiesce_attr(mp);
- return xfs_fs_log_dummy(mp);
+ return xfs_sync_sb(mp, true);
}
STATIC int
diff --git a/fs/xfs/xfs_sysctl.c b/fs/xfs/xfs_sysctl.c
index 1743b9f..a0c8067 100644
--- a/fs/xfs/xfs_sysctl.c
+++ b/fs/xfs/xfs_sysctl.c
@@ -149,24 +149,6 @@ static struct ctl_table xfs_table[] = {
.extra2 = &xfs_params.inherit_noatim.max
},
{
- .procname = "xfsbufd_centisecs",
- .data = &xfs_params.xfs_buf_timer.val,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &xfs_params.xfs_buf_timer.min,
- .extra2 = &xfs_params.xfs_buf_timer.max
- },
- {
- .procname = "age_buffer_centisecs",
- .data = &xfs_params.xfs_buf_age.val,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &xfs_params.xfs_buf_age.min,
- .extra2 = &xfs_params.xfs_buf_age.max
- },
- {
.procname = "inherit_nosymlinks",
.data = &xfs_params.inherit_nosym.val,
.maxlen = sizeof(int),
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index fa3135b..eb90cd5 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -472,6 +472,7 @@ xfs_trans_apply_sb_deltas(
whole = 1;
}
+ xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
if (whole)
/*
* Log the whole thing, the fields are noncontiguous.
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 0a4d4ab..7579841 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -327,9 +327,10 @@ xfs_trans_read_buf_map(
return -EIO;
}
- if (tp)
+ if (tp) {
_xfs_trans_bjoin(tp, bp, 1);
- trace_xfs_trans_read_buf(bp->b_fspriv);
+ trace_xfs_trans_read_buf(bp->b_fspriv);
+ }
*bpp = bp;
return 0;
OpenPOWER on IntegriCloud