summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorJ. Bruce Fields <bfields@citi.umich.edu>2009-08-21 11:27:29 -0400
committerJ. Bruce Fields <bfields@citi.umich.edu>2009-08-21 11:27:29 -0400
commite9dc122166b8d863d3057a66ada04838e5548e52 (patch)
tree749e15bf719b64bf9113db7acd8e043d9742cb26 /fs
parent560ab42ef923aaf2e4347315bdfcc74b2708972c (diff)
parent405d8f8b1d936414da2093d4149ff790ff3f84a5 (diff)
downloadop-kernel-dev-e9dc122166b8d863d3057a66ada04838e5548e52.zip
op-kernel-dev-e9dc122166b8d863d3057a66ada04838e5548e52.tar.gz
Merge branch 'nfs-for-2.6.32' of git://git.linux-nfs.org/projects/trondmy/nfs-2.6 into for-2.6.32-incoming
Conflicts: net/sunrpc/cache.c
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/vfs_addr.c2
-rw-r--r--fs/Kconfig27
-rw-r--r--fs/adfs/super.c1
-rw-r--r--fs/afs/dir.c2
-rw-r--r--fs/afs/flock.c1
-rw-r--r--fs/afs/mntpt.c1
-rw-r--r--fs/afs/super.c1
-rw-r--r--fs/aio.c24
-rw-r--r--fs/autofs4/dev-ioctl.c1
-rw-r--r--fs/bfs/dir.c1
-rw-r--r--fs/bfs/file.c1
-rw-r--r--fs/binfmt_elf.c9
-rw-r--r--fs/binfmt_flat.c17
-rw-r--r--fs/bio-integrity.c170
-rw-r--r--fs/bio.c33
-rw-r--r--fs/block_dev.c10
-rw-r--r--fs/btrfs/async-thread.c6
-rw-r--r--fs/btrfs/compression.c1
-rw-r--r--fs/btrfs/ctree.c121
-rw-r--r--fs/btrfs/ctree.h30
-rw-r--r--fs/btrfs/disk-io.c15
-rw-r--r--fs/btrfs/extent-tree.c1096
-rw-r--r--fs/btrfs/file.c6
-rw-r--r--fs/btrfs/free-space-cache.c1058
-rw-r--r--fs/btrfs/free-space-cache.h8
-rw-r--r--fs/btrfs/inode.c31
-rw-r--r--fs/btrfs/ioctl.c7
-rw-r--r--fs/btrfs/print-tree.c6
-rw-r--r--fs/btrfs/relocation.c17
-rw-r--r--fs/btrfs/super.c1
-rw-r--r--fs/btrfs/transaction.c60
-rw-r--r--fs/btrfs/transaction.h1
-rw-r--r--fs/btrfs/tree-log.c2
-rw-r--r--fs/btrfs/volumes.c46
-rw-r--r--fs/btrfs/zlib.c6
-rw-r--r--fs/char_dev.c1
-rw-r--r--fs/cifs/CHANGES13
-rw-r--r--fs/cifs/README25
-rw-r--r--fs/cifs/asn1.c55
-rw-r--r--fs/cifs/cifs_debug.c8
-rw-r--r--fs/cifs/cifs_dfs_ref.c12
-rw-r--r--fs/cifs/cifs_spnego.c9
-rw-r--r--fs/cifs/cifs_unicode.c2
-rw-r--r--fs/cifs/cifsacl.c26
-rw-r--r--fs/cifs/cifsfs.c162
-rw-r--r--fs/cifs/cifsfs.h15
-rw-r--r--fs/cifs/cifsglob.h32
-rw-r--r--fs/cifs/cifspdu.h14
-rw-r--r--fs/cifs/cifsproto.h23
-rw-r--r--fs/cifs/cifssmb.c152
-rw-r--r--fs/cifs/connect.c103
-rw-r--r--fs/cifs/dir.c52
-rw-r--r--fs/cifs/dns_resolve.c25
-rw-r--r--fs/cifs/file.c40
-rw-r--r--fs/cifs/inode.c780
-rw-r--r--fs/cifs/link.c3
-rw-r--r--fs/cifs/netmisc.c56
-rw-r--r--fs/cifs/readdir.c505
-rw-r--r--fs/cifs/sess.c2
-rw-r--r--fs/cifs/xattr.c12
-rw-r--r--fs/compat.c5
-rw-r--r--fs/compat_ioctl.c2
-rw-r--r--fs/dlm/lock.c2
-rw-r--r--fs/dlm/lowcomms.c4
-rw-r--r--fs/dlm/plock.c17
-rw-r--r--fs/ecryptfs/keystore.c13
-rw-r--r--fs/eventfd.c122
-rw-r--r--fs/exec.c4
-rw-r--r--fs/exofs/common.h4
-rw-r--r--fs/exofs/dir.c4
-rw-r--r--fs/exofs/exofs.h7
-rw-r--r--fs/exofs/file.c21
-rw-r--r--fs/exofs/inode.c7
-rw-r--r--fs/exofs/namei.c4
-rw-r--r--fs/exofs/osd.c4
-rw-r--r--fs/exofs/super.c7
-rw-r--r--fs/exofs/symlink.c4
-rw-r--r--fs/ext2/ioctl.c1
-rw-r--r--fs/ext2/namei.c12
-rw-r--r--fs/ext3/dir.c3
-rw-r--r--fs/ext3/inode.c32
-rw-r--r--fs/ext4/ext4.h14
-rw-r--r--fs/ext4/ext4_jbd2.c4
-rw-r--r--fs/ext4/ext4_jbd2.h6
-rw-r--r--fs/ext4/extents.c1
-rw-r--r--fs/ext4/ialloc.c2
-rw-r--r--fs/ext4/inode.c384
-rw-r--r--fs/ext4/ioctl.c21
-rw-r--r--fs/ext4/mballoc.c50
-rw-r--r--fs/fat/dir.c1
-rw-r--r--fs/fat/file.c2
-rw-r--r--fs/fat/namei_msdos.c1
-rw-r--r--fs/fat/namei_vfat.c1
-rw-r--r--fs/fcntl.c1
-rw-r--r--fs/freevxfs/vxfs_super.c1
-rw-r--r--fs/fuse/dev.c91
-rw-r--r--fs/fuse/dir.c57
-rw-r--r--fs/fuse/file.c2
-rw-r--r--fs/fuse/fuse_i.h27
-rw-r--r--fs/fuse/inode.c68
-rw-r--r--fs/gfs2/aops.c39
-rw-r--r--fs/gfs2/glock.c138
-rw-r--r--fs/gfs2/glock.h3
-rw-r--r--fs/gfs2/glops.c21
-rw-r--r--fs/gfs2/incore.h2
-rw-r--r--fs/gfs2/rgrp.c23
-rw-r--r--fs/gfs2/super.c40
-rw-r--r--fs/gfs2/super.h4
-rw-r--r--fs/gfs2/trace_gfs2.h8
-rw-r--r--fs/hfs/super.c1
-rw-r--r--fs/hfsplus/super.c1
-rw-r--r--fs/hostfs/hostfs_kern.c1
-rw-r--r--fs/hpfs/dir.c1
-rw-r--r--fs/hpfs/file.c1
-rw-r--r--fs/hpfs/hpfs_fn.h1
-rw-r--r--fs/hpfs/inode.c1
-rw-r--r--fs/hpfs/namei.c1
-rw-r--r--fs/inode.c40
-rw-r--r--fs/isofs/inode.c4
-rw-r--r--fs/jbd/journal.c26
-rw-r--r--fs/jbd/transaction.c68
-rw-r--r--fs/jbd2/journal.c31
-rw-r--r--fs/jbd2/transaction.c68
-rw-r--r--fs/jffs2/erase.c10
-rw-r--r--fs/jffs2/file.c2
-rw-r--r--fs/jffs2/scan.c4
-rw-r--r--fs/jffs2/super.c1
-rw-r--r--fs/jfs/acl.c4
-rw-r--r--fs/lockd/clntproc.c1
-rw-r--r--fs/lockd/host.c14
-rw-r--r--fs/lockd/mon.c44
-rw-r--r--fs/lockd/svc4proc.c1
-rw-r--r--fs/lockd/svcproc.c1
-rw-r--r--fs/namei.c7
-rw-r--r--fs/namespace.c4
-rw-r--r--fs/nfs/Makefile3
-rw-r--r--fs/nfs/cache_lib.c140
-rw-r--r--fs/nfs/cache_lib.h27
-rw-r--r--fs/nfs/callback.c26
-rw-r--r--fs/nfs/client.c33
-rw-r--r--fs/nfs/delegation.c1
-rw-r--r--fs/nfs/dir.c3
-rw-r--r--fs/nfs/direct.c23
-rw-r--r--fs/nfs/dns_resolve.c335
-rw-r--r--fs/nfs/dns_resolve.h14
-rw-r--r--fs/nfs/file.c50
-rw-r--r--fs/nfs/getroot.c1
-rw-r--r--fs/nfs/idmap.c6
-rw-r--r--fs/nfs/inode.c101
-rw-r--r--fs/nfs/internal.h29
-rw-r--r--fs/nfs/mount_clnt.c83
-rw-r--r--fs/nfs/nfs4_fs.h6
-rw-r--r--fs/nfs/nfs4namespace.c24
-rw-r--r--fs/nfs/nfs4proc.c81
-rw-r--r--fs/nfs/nfs4state.c2
-rw-r--r--fs/nfs/nfs4xdr.c1460
-rw-r--r--fs/nfs/read.c7
-rw-r--r--fs/nfs/super.c247
-rw-r--r--fs/nfs/write.c105
-rw-r--r--fs/nfsd/export.c14
-rw-r--r--fs/nfsd/nfs4idmap.c20
-rw-r--r--fs/nfsd/nfsctl.c22
-rw-r--r--fs/nfsd/nfssvc.c1
-rw-r--r--fs/nfsd/vfs.c3
-rw-r--r--fs/nilfs2/Kconfig25
-rw-r--r--fs/nilfs2/bmap.c5
-rw-r--r--fs/nilfs2/cpfile.c5
-rw-r--r--fs/nilfs2/dat.c9
-rw-r--r--fs/nilfs2/dir.c1
-rw-r--r--fs/nilfs2/mdt.c4
-rw-r--r--fs/nilfs2/segment.c44
-rw-r--r--fs/notify/Kconfig12
-rw-r--r--fs/notify/dnotify/Kconfig2
-rw-r--r--fs/notify/fsnotify.c4
-rw-r--r--fs/notify/inotify/Kconfig2
-rw-r--r--fs/notify/inotify/inotify_user.c112
-rw-r--r--fs/notify/notification.c19
-rw-r--r--fs/ocfs2/alloc.c47
-rw-r--r--fs/ocfs2/aops.c69
-rw-r--r--fs/ocfs2/dcache.c35
-rw-r--r--fs/ocfs2/dcache.h3
-rw-r--r--fs/ocfs2/dlm/dlmast.c1
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c2
-rw-r--r--fs/ocfs2/file.c5
-rw-r--r--fs/ocfs2/ioctl.c1
-rw-r--r--fs/ocfs2/journal.c8
-rw-r--r--fs/ocfs2/journal.h19
-rw-r--r--fs/ocfs2/ocfs2.h22
-rw-r--r--fs/ocfs2/quota.h1
-rw-r--r--fs/ocfs2/quota_global.c134
-rw-r--r--fs/ocfs2/quota_local.c110
-rw-r--r--fs/ocfs2/stack_o2cb.c3
-rw-r--r--fs/ocfs2/super.c30
-rw-r--r--fs/ocfs2/xattr.c3
-rw-r--r--fs/partitions/check.c2
-rw-r--r--fs/pipe.c4
-rw-r--r--fs/proc/base.c27
-rw-r--r--fs/proc/task_mmu.c1
-rw-r--r--fs/proc/task_nommu.c1
-rw-r--r--fs/quota/dquot.c9
-rw-r--r--fs/ramfs/file-nommu.c1
-rw-r--r--fs/reiserfs/journal.c2
-rw-r--r--fs/reiserfs/super.c1
-rw-r--r--fs/reiserfs/xattr.c1
-rw-r--r--fs/squashfs/super.c1
-rw-r--r--fs/sync.c5
-rw-r--r--fs/sysfs/bin.c1
-rw-r--r--fs/sysfs/dir.c2
-rw-r--r--fs/ubifs/io.c57
-rw-r--r--fs/ubifs/ioctl.c1
-rw-r--r--fs/ubifs/recovery.c57
-rw-r--r--fs/ubifs/replay.c9
-rw-r--r--fs/ubifs/scan.c20
-rw-r--r--fs/ubifs/super.c14
-rw-r--r--fs/ubifs/ubifs.h11
-rw-r--r--fs/udf/super.c12
-rw-r--r--fs/xfs/linux-2.6/kmem.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c8
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c1
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c4
-rw-r--r--fs/xfs/xfs_attr.c8
-rw-r--r--fs/xfs/xfs_bmap.c2
-rw-r--r--fs/xfs/xfs_btree.c4
-rw-r--r--fs/xfs/xfs_da_btree.c6
-rw-r--r--fs/xfs/xfs_dir2.c2
-rw-r--r--fs/xfs/xfs_fsops.c20
-rw-r--r--fs/xfs/xfs_iget.c142
-rw-r--r--fs/xfs/xfs_inode.c10
-rw-r--r--fs/xfs/xfs_inode.h17
-rw-r--r--fs/xfs/xfs_log.c2
-rw-r--r--fs/xfs/xfs_vnodeops.c4
232 files changed, 6761 insertions, 3895 deletions
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index 6fcb1e7..9282828 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -57,7 +57,7 @@ static int v9fs_vfs_readpage(struct file *filp, struct page *page)
buffer = kmap(page);
offset = page_offset(page);
- retval = v9fs_file_readn(filp, buffer, NULL, offset, PAGE_CACHE_SIZE);
+ retval = v9fs_file_readn(filp, buffer, NULL, PAGE_CACHE_SIZE, offset);
if (retval < 0)
goto done;
diff --git a/fs/Kconfig b/fs/Kconfig
index a97263b..0e7da7b 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -186,32 +186,7 @@ source "fs/romfs/Kconfig"
source "fs/sysv/Kconfig"
source "fs/ufs/Kconfig"
source "fs/exofs/Kconfig"
-
-config NILFS2_FS
- tristate "NILFS2 file system support (EXPERIMENTAL)"
- depends on BLOCK && EXPERIMENTAL
- select CRC32
- help
- NILFS2 is a log-structured file system (LFS) supporting continuous
- snapshotting. In addition to versioning capability of the entire
- file system, users can even restore files mistakenly overwritten or
- destroyed just a few seconds ago. Since this file system can keep
- consistency like conventional LFS, it achieves quick recovery after
- system crashes.
-
- NILFS2 creates a number of checkpoints every few seconds or per
- synchronous write basis (unless there is no change). Users can
- select significant versions among continuously created checkpoints,
- and can change them into snapshots which will be preserved for long
- periods until they are changed back to checkpoints. Each
- snapshot is mountable as a read-only file system concurrently with
- its writable mount, and this feature is convenient for online backup.
-
- Some features including atime, extended attributes, and POSIX ACLs,
- are not supported yet.
-
- To compile this file system support as a module, choose M here: the
- module will be called nilfs2. If unsure, say N.
+source "fs/nilfs2/Kconfig"
endif # MISC_FILESYSTEMS
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index aad92f0..6910a98 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -13,6 +13,7 @@
#include <linux/parser.h>
#include <linux/mount.h>
#include <linux/seq_file.h>
+#include <linux/smp_lock.h>
#include <linux/statfs.h>
#include "adfs.h"
#include "dir_f.h"
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 9bd7577..88067f3 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -564,7 +564,7 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
static int afs_d_revalidate(struct dentry *dentry, struct nameidata *nd)
{
struct afs_vnode *vnode, *dir;
- struct afs_fid fid;
+ struct afs_fid uninitialized_var(fid);
struct dentry *parent;
struct key *key;
void *dir_version;
diff --git a/fs/afs/flock.c b/fs/afs/flock.c
index 210acaf..3ff8bdd 100644
--- a/fs/afs/flock.c
+++ b/fs/afs/flock.c
@@ -432,7 +432,6 @@ vfs_rejected_lock:
list_del_init(&fl->fl_u.afs.link);
if (list_empty(&vnode->granted_locks))
afs_defer_unlock(vnode, key);
- spin_unlock(&vnode->lock);
goto abort_attempt;
}
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index c52be53..5ffb570 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -17,7 +17,6 @@
#include <linux/pagemap.h>
#include <linux/mount.h>
#include <linux/namei.h>
-#include <linux/mnt_namespace.h>
#include "internal.h"
diff --git a/fs/afs/super.c b/fs/afs/super.c
index ad0514d..e1ea1c2 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
+#include <linux/smp_lock.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/parser.h>
diff --git a/fs/aio.c b/fs/aio.c
index 76da125..d065b2c 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -485,6 +485,8 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
{
assert_spin_locked(&ctx->ctx_lock);
+ if (req->ki_eventfd != NULL)
+ eventfd_ctx_put(req->ki_eventfd);
if (req->ki_dtor)
req->ki_dtor(req);
if (req->ki_iovec != &req->ki_inline_vec)
@@ -509,8 +511,6 @@ static void aio_fput_routine(struct work_struct *data)
/* Complete the fput(s) */
if (req->ki_filp != NULL)
__fput(req->ki_filp);
- if (req->ki_eventfd != NULL)
- __fput(req->ki_eventfd);
/* Link the iocb into the context's free list */
spin_lock_irq(&ctx->ctx_lock);
@@ -528,8 +528,6 @@ static void aio_fput_routine(struct work_struct *data)
*/
static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
{
- int schedule_putreq = 0;
-
dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n",
req, atomic_long_read(&req->ki_filp->f_count));
@@ -549,24 +547,16 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
* we would not be holding the last reference to the file*, so
* this function will be executed w/out any aio kthread wakeup.
*/
- if (unlikely(atomic_long_dec_and_test(&req->ki_filp->f_count)))
- schedule_putreq++;
- else
- req->ki_filp = NULL;
- if (req->ki_eventfd != NULL) {
- if (unlikely(atomic_long_dec_and_test(&req->ki_eventfd->f_count)))
- schedule_putreq++;
- else
- req->ki_eventfd = NULL;
- }
- if (unlikely(schedule_putreq)) {
+ if (unlikely(atomic_long_dec_and_test(&req->ki_filp->f_count))) {
get_ioctx(ctx);
spin_lock(&fput_lock);
list_add(&req->ki_list, &fput_head);
spin_unlock(&fput_lock);
queue_work(aio_wq, &fput_work);
- } else
+ } else {
+ req->ki_filp = NULL;
really_put_req(ctx, req);
+ }
return 1;
}
@@ -1622,7 +1612,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
* an eventfd() fd, and will be signaled for each completed
* event using the eventfd_signal() function.
*/
- req->ki_eventfd = eventfd_fget((int) iocb->aio_resfd);
+ req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd);
if (IS_ERR(req->ki_eventfd)) {
ret = PTR_ERR(req->ki_eventfd);
req->ki_eventfd = NULL;
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
index f3da2eb..00bf8fc 100644
--- a/fs/autofs4/dev-ioctl.c
+++ b/fs/autofs4/dev-ioctl.c
@@ -19,7 +19,6 @@
#include <linux/sched.h>
#include <linux/compat.h>
#include <linux/syscalls.h>
-#include <linux/smp_lock.h>
#include <linux/magic.h>
#include <linux/dcache.h>
#include <linux/uaccess.h>
diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c
index 54bd07d..1e41aad 100644
--- a/fs/bfs/dir.c
+++ b/fs/bfs/dir.c
@@ -8,7 +8,6 @@
#include <linux/time.h>
#include <linux/string.h>
#include <linux/fs.h>
-#include <linux/smp_lock.h>
#include <linux/buffer_head.h>
#include <linux/sched.h>
#include "bfs.h"
diff --git a/fs/bfs/file.c b/fs/bfs/file.c
index 6a02126..88b9a3f 100644
--- a/fs/bfs/file.c
+++ b/fs/bfs/file.c
@@ -11,7 +11,6 @@
#include <linux/fs.h>
#include <linux/buffer_head.h>
-#include <linux/smp_lock.h>
#include "bfs.h"
#undef DEBUG
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 9fa212b0..b7c1603 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1522,11 +1522,11 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
info->thread = NULL;
psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
- fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
-
if (psinfo == NULL)
return 0;
+ fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
+
/*
* Figure out how many notes we're going to need for each thread.
*/
@@ -1929,7 +1929,10 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
elf = kmalloc(sizeof(*elf), GFP_KERNEL);
if (!elf)
goto out;
-
+ /*
+ * The number of segs are recored into ELF header as 16bit value.
+ * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here.
+ */
segs = current->mm->map_count;
#ifdef ELF_CORE_EXTRA_PHDRS
segs += ELF_CORE_EXTRA_PHDRS;
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index 697f6b5..e92f229 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -828,15 +828,22 @@ static int load_flat_shared_library(int id, struct lib_info *libs)
if (IS_ERR(bprm.file))
return res;
+ bprm.cred = prepare_exec_creds();
+ res = -ENOMEM;
+ if (!bprm.cred)
+ goto out;
+
res = prepare_binprm(&bprm);
if (res <= (unsigned long)-4096)
res = load_flat_file(&bprm, libs, id, NULL);
- if (bprm.file) {
- allow_write_access(bprm.file);
- fput(bprm.file);
- bprm.file = NULL;
- }
+
+ abort_creds(bprm.cred);
+
+out:
+ allow_write_access(bprm.file);
+ fput(bprm.file);
+
return(res);
}
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index 31c46a2..49a34e7 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -1,7 +1,7 @@
/*
* bio-integrity.c - bio data integrity extensions
*
- * Copyright (C) 2007, 2008 Oracle Corporation
+ * Copyright (C) 2007, 2008, 2009 Oracle Corporation
* Written by: Martin K. Petersen <martin.petersen@oracle.com>
*
* This program is free software; you can redistribute it and/or
@@ -25,63 +25,121 @@
#include <linux/bio.h>
#include <linux/workqueue.h>
-static struct kmem_cache *bio_integrity_slab __read_mostly;
-static mempool_t *bio_integrity_pool;
-static struct bio_set *integrity_bio_set;
+struct integrity_slab {
+ struct kmem_cache *slab;
+ unsigned short nr_vecs;
+ char name[8];
+};
+
+#define IS(x) { .nr_vecs = x, .name = "bip-"__stringify(x) }
+struct integrity_slab bip_slab[BIOVEC_NR_POOLS] __read_mostly = {
+ IS(1), IS(4), IS(16), IS(64), IS(128), IS(BIO_MAX_PAGES),
+};
+#undef IS
+
static struct workqueue_struct *kintegrityd_wq;
+static inline unsigned int vecs_to_idx(unsigned int nr)
+{
+ switch (nr) {
+ case 1:
+ return 0;
+ case 2 ... 4:
+ return 1;
+ case 5 ... 16:
+ return 2;
+ case 17 ... 64:
+ return 3;
+ case 65 ... 128:
+ return 4;
+ case 129 ... BIO_MAX_PAGES:
+ return 5;
+ default:
+ BUG();
+ }
+}
+
+static inline int use_bip_pool(unsigned int idx)
+{
+ if (idx == BIOVEC_NR_POOLS)
+ return 1;
+
+ return 0;
+}
+
/**
- * bio_integrity_alloc - Allocate integrity payload and attach it to bio
+ * bio_integrity_alloc_bioset - Allocate integrity payload and attach it to bio
* @bio: bio to attach integrity metadata to
* @gfp_mask: Memory allocation mask
* @nr_vecs: Number of integrity metadata scatter-gather elements
+ * @bs: bio_set to allocate from
*
* Description: This function prepares a bio for attaching integrity
* metadata. nr_vecs specifies the maximum number of pages containing
* integrity metadata that can be attached.
*/
-struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
- gfp_t gfp_mask,
- unsigned int nr_vecs)
+struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *bio,
+ gfp_t gfp_mask,
+ unsigned int nr_vecs,
+ struct bio_set *bs)
{
struct bio_integrity_payload *bip;
- struct bio_vec *iv;
- unsigned long idx;
+ unsigned int idx = vecs_to_idx(nr_vecs);
BUG_ON(bio == NULL);
+ bip = NULL;
- bip = mempool_alloc(bio_integrity_pool, gfp_mask);
- if (unlikely(bip == NULL)) {
- printk(KERN_ERR "%s: could not alloc bip\n", __func__);
- return NULL;
- }
+ /* Lower order allocations come straight from slab */
+ if (!use_bip_pool(idx))
+ bip = kmem_cache_alloc(bip_slab[idx].slab, gfp_mask);
- memset(bip, 0, sizeof(*bip));
+ /* Use mempool if lower order alloc failed or max vecs were requested */
+ if (bip == NULL) {
+ bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask);
- iv = bvec_alloc_bs(gfp_mask, nr_vecs, &idx, integrity_bio_set);
- if (unlikely(iv == NULL)) {
- printk(KERN_ERR "%s: could not alloc bip_vec\n", __func__);
- mempool_free(bip, bio_integrity_pool);
- return NULL;
+ if (unlikely(bip == NULL)) {
+ printk(KERN_ERR "%s: could not alloc bip\n", __func__);
+ return NULL;
+ }
}
- bip->bip_pool = idx;
- bip->bip_vec = iv;
+ memset(bip, 0, sizeof(*bip));
+
+ bip->bip_slab = idx;
bip->bip_bio = bio;
bio->bi_integrity = bip;
return bip;
}
+EXPORT_SYMBOL(bio_integrity_alloc_bioset);
+
+/**
+ * bio_integrity_alloc - Allocate integrity payload and attach it to bio
+ * @bio: bio to attach integrity metadata to
+ * @gfp_mask: Memory allocation mask
+ * @nr_vecs: Number of integrity metadata scatter-gather elements
+ *
+ * Description: This function prepares a bio for attaching integrity
+ * metadata. nr_vecs specifies the maximum number of pages containing
+ * integrity metadata that can be attached.
+ */
+struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
+ gfp_t gfp_mask,
+ unsigned int nr_vecs)
+{
+ return bio_integrity_alloc_bioset(bio, gfp_mask, nr_vecs, fs_bio_set);
+}
EXPORT_SYMBOL(bio_integrity_alloc);
/**
* bio_integrity_free - Free bio integrity payload
* @bio: bio containing bip to be freed
+ * @bs: bio_set this bio was allocated from
*
* Description: Used to free the integrity portion of a bio. Usually
* called from bio_free().
*/
-void bio_integrity_free(struct bio *bio)
+void bio_integrity_free(struct bio *bio, struct bio_set *bs)
{
struct bio_integrity_payload *bip = bio->bi_integrity;
@@ -92,8 +150,10 @@ void bio_integrity_free(struct bio *bio)
&& bip->bip_buf != NULL)
kfree(bip->bip_buf);
- bvec_free_bs(integrity_bio_set, bip->bip_vec, bip->bip_pool);
- mempool_free(bip, bio_integrity_pool);
+ if (use_bip_pool(bip->bip_slab))
+ mempool_free(bip, bs->bio_integrity_pool);
+ else
+ kmem_cache_free(bip_slab[bip->bip_slab].slab, bip);
bio->bi_integrity = NULL;
}
@@ -114,7 +174,7 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
struct bio_integrity_payload *bip = bio->bi_integrity;
struct bio_vec *iv;
- if (bip->bip_vcnt >= bvec_nr_vecs(bip->bip_pool)) {
+ if (bip->bip_vcnt >= bvec_nr_vecs(bip->bip_slab)) {
printk(KERN_ERR "%s: bip_vec full\n", __func__);
return 0;
}
@@ -647,8 +707,8 @@ void bio_integrity_split(struct bio *bio, struct bio_pair *bp, int sectors)
bp->iv1 = bip->bip_vec[0];
bp->iv2 = bip->bip_vec[0];
- bp->bip1.bip_vec = &bp->iv1;
- bp->bip2.bip_vec = &bp->iv2;
+ bp->bip1.bip_vec[0] = bp->iv1;
+ bp->bip2.bip_vec[0] = bp->iv2;
bp->iv1.bv_len = sectors * bi->tuple_size;
bp->iv2.bv_offset += sectors * bi->tuple_size;
@@ -667,17 +727,19 @@ EXPORT_SYMBOL(bio_integrity_split);
* @bio: New bio
* @bio_src: Original bio
* @gfp_mask: Memory allocation mask
+ * @bs: bio_set to allocate bip from
*
* Description: Called to allocate a bip when cloning a bio
*/
-int bio_integrity_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp_mask)
+int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
+ gfp_t gfp_mask, struct bio_set *bs)
{
struct bio_integrity_payload *bip_src = bio_src->bi_integrity;
struct bio_integrity_payload *bip;
BUG_ON(bip_src == NULL);
- bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt);
+ bip = bio_integrity_alloc_bioset(bio, gfp_mask, bip_src->bip_vcnt, bs);
if (bip == NULL)
return -EIO;
@@ -693,25 +755,43 @@ int bio_integrity_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp_mask)
}
EXPORT_SYMBOL(bio_integrity_clone);
-static int __init bio_integrity_init(void)
+int bioset_integrity_create(struct bio_set *bs, int pool_size)
{
- kintegrityd_wq = create_workqueue("kintegrityd");
+ unsigned int max_slab = vecs_to_idx(BIO_MAX_PAGES);
+
+ bs->bio_integrity_pool =
+ mempool_create_slab_pool(pool_size, bip_slab[max_slab].slab);
+ if (!bs->bio_integrity_pool)
+ return -1;
+
+ return 0;
+}
+EXPORT_SYMBOL(bioset_integrity_create);
+
+void bioset_integrity_free(struct bio_set *bs)
+{
+ if (bs->bio_integrity_pool)
+ mempool_destroy(bs->bio_integrity_pool);
+}
+EXPORT_SYMBOL(bioset_integrity_free);
+
+void __init bio_integrity_init(void)
+{
+ unsigned int i;
+
+ kintegrityd_wq = create_workqueue("kintegrityd");
if (!kintegrityd_wq)
panic("Failed to create kintegrityd\n");
- bio_integrity_slab = KMEM_CACHE(bio_integrity_payload,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC);
+ for (i = 0 ; i < BIOVEC_NR_POOLS ; i++) {
+ unsigned int size;
- bio_integrity_pool = mempool_create_slab_pool(BIO_POOL_SIZE,
- bio_integrity_slab);
- if (!bio_integrity_pool)
- panic("bio_integrity: can't allocate bip pool\n");
+ size = sizeof(struct bio_integrity_payload)
+ + bip_slab[i].nr_vecs * sizeof(struct bio_vec);
- integrity_bio_set = bioset_create(BIO_POOL_SIZE, 0);
- if (!integrity_bio_set)
- panic("bio_integrity: can't allocate bio_set\n");
-
- return 0;
+ bip_slab[i].slab =
+ kmem_cache_create(bip_slab[i].name, size, 0,
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+ }
}
-subsys_initcall(bio_integrity_init);
diff --git a/fs/bio.c b/fs/bio.c
index 24c9140..7673800 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -238,7 +238,7 @@ void bio_free(struct bio *bio, struct bio_set *bs)
bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio));
if (bio_integrity(bio))
- bio_integrity_free(bio);
+ bio_integrity_free(bio, bs);
/*
* If we have front padding, adjust the bio pointer before freeing
@@ -341,7 +341,7 @@ struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs)
static void bio_kmalloc_destructor(struct bio *bio)
{
if (bio_integrity(bio))
- bio_integrity_free(bio);
+ bio_integrity_free(bio, fs_bio_set);
kfree(bio);
}
@@ -472,7 +472,7 @@ struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
if (bio_integrity(bio)) {
int ret;
- ret = bio_integrity_clone(b, bio, gfp_mask);
+ ret = bio_integrity_clone(b, bio, gfp_mask, fs_bio_set);
if (ret < 0) {
bio_put(b);
@@ -705,14 +705,13 @@ static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count,
}
static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
- struct sg_iovec *iov, int iov_count, int uncopy,
- int do_free_page)
+ struct sg_iovec *iov, int iov_count,
+ int to_user, int from_user, int do_free_page)
{
int ret = 0, i;
struct bio_vec *bvec;
int iov_idx = 0;
unsigned int iov_off = 0;
- int read = bio_data_dir(bio) == READ;
__bio_for_each_segment(bvec, bio, i, 0) {
char *bv_addr = page_address(bvec->bv_page);
@@ -727,13 +726,14 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
iov_addr = iov[iov_idx].iov_base + iov_off;
if (!ret) {
- if (!read && !uncopy)
- ret = copy_from_user(bv_addr, iov_addr,
- bytes);
- if (read && uncopy)
+ if (to_user)
ret = copy_to_user(iov_addr, bv_addr,
bytes);
+ if (from_user)
+ ret = copy_from_user(bv_addr, iov_addr,
+ bytes);
+
if (ret)
ret = -EFAULT;
}
@@ -770,7 +770,8 @@ int bio_uncopy_user(struct bio *bio)
if (!bio_flagged(bio, BIO_NULL_MAPPED))
ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
- bmd->nr_sgvecs, 1, bmd->is_our_pages);
+ bmd->nr_sgvecs, bio_data_dir(bio) == READ,
+ 0, bmd->is_our_pages);
bio_free_map_data(bmd);
bio_put(bio);
return ret;
@@ -875,8 +876,9 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
/*
* success
*/
- if (!write_to_vm && (!map_data || !map_data->null_mapped)) {
- ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0, 0);
+ if ((!write_to_vm && (!map_data || !map_data->null_mapped)) ||
+ (map_data && map_data->from_user)) {
+ ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0, 1, 0);
if (ret)
goto cleanup;
}
@@ -1539,6 +1541,7 @@ void bioset_free(struct bio_set *bs)
if (bs->bio_pool)
mempool_destroy(bs->bio_pool);
+ bioset_integrity_free(bs);
biovec_free_pools(bs);
bio_put_slab(bs);
@@ -1579,6 +1582,9 @@ struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
if (!bs->bio_pool)
goto bad;
+ if (bioset_integrity_create(bs, pool_size))
+ goto bad;
+
if (!biovec_create_pools(bs, pool_size))
return bs;
@@ -1616,6 +1622,7 @@ static int __init init_bio(void)
if (!bio_slabs)
panic("bio: can't allocate bios\n");
+ bio_integrity_init();
biovec_init_slabs();
fs_bio_set = bioset_create(BIO_POOL_SIZE, 0);
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 3a6d4fb..94dfda2 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -564,6 +564,16 @@ struct block_device *bdget(dev_t dev)
EXPORT_SYMBOL(bdget);
+/**
+ * bdgrab -- Grab a reference to an already referenced block device
+ * @bdev: Block device to grab a reference to.
+ */
+struct block_device *bdgrab(struct block_device *bdev)
+{
+ atomic_inc(&bdev->bd_inode->i_count);
+ return bdev;
+}
+
long nr_blockdev_pages(void)
{
struct block_device *bdev;
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 7f88628..019e8af 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -299,8 +299,8 @@ int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
"btrfs-%s-%d", workers->name,
workers->num_workers + i);
if (IS_ERR(worker->task)) {
- kfree(worker);
ret = PTR_ERR(worker->task);
+ kfree(worker);
goto fail;
}
@@ -424,11 +424,11 @@ int btrfs_requeue_work(struct btrfs_work *work)
* list
*/
if (worker->idle) {
- spin_lock_irqsave(&worker->workers->lock, flags);
+ spin_lock(&worker->workers->lock);
worker->idle = 0;
list_move_tail(&worker->worker_list,
&worker->workers->worker_list);
- spin_unlock_irqrestore(&worker->workers->lock, flags);
+ spin_unlock(&worker->workers->lock);
}
if (!worker->working) {
wake = 1;
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index de1e2fd..9d8ba4d 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -26,7 +26,6 @@
#include <linux/time.h>
#include <linux/init.h>
#include <linux/string.h>
-#include <linux/smp_lock.h>
#include <linux/backing-dev.h>
#include <linux/mpage.h>
#include <linux/swap.h>
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 60a45f3..3fdcc05 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -557,19 +557,7 @@ static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
btrfs_disk_key_to_cpu(&k1, disk);
- if (k1.objectid > k2->objectid)
- return 1;
- if (k1.objectid < k2->objectid)
- return -1;
- if (k1.type > k2->type)
- return 1;
- if (k1.type < k2->type)
- return -1;
- if (k1.offset > k2->offset)
- return 1;
- if (k1.offset < k2->offset)
- return -1;
- return 0;
+ return btrfs_comp_cpu_keys(&k1, k2);
}
/*
@@ -1052,9 +1040,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
return 0;
- if (btrfs_header_nritems(mid) > 2)
- return 0;
-
if (btrfs_header_nritems(mid) < 2)
err_on_enospc = 1;
@@ -1701,6 +1686,7 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
struct extent_buffer *b;
int slot;
int ret;
+ int err;
int level;
int lowest_unlock = 1;
u8 lowest_level = 0;
@@ -1737,8 +1723,6 @@ again:
p->locks[level] = 1;
if (cow) {
- int wret;
-
/*
* if we don't really need to cow this block
* then we don't want to set the path blocking,
@@ -1749,12 +1733,12 @@ again:
btrfs_set_path_blocking(p);
- wret = btrfs_cow_block(trans, root, b,
- p->nodes[level + 1],
- p->slots[level + 1], &b);
- if (wret) {
+ err = btrfs_cow_block(trans, root, b,
+ p->nodes[level + 1],
+ p->slots[level + 1], &b);
+ if (err) {
free_extent_buffer(b);
- ret = wret;
+ ret = err;
goto done;
}
}
@@ -1793,41 +1777,45 @@ cow_done:
ret = bin_search(b, key, level, &slot);
if (level != 0) {
- if (ret && slot > 0)
+ int dec = 0;
+ if (ret && slot > 0) {
+ dec = 1;
slot -= 1;
+ }
p->slots[level] = slot;
- ret = setup_nodes_for_search(trans, root, p, b, level,
+ err = setup_nodes_for_search(trans, root, p, b, level,
ins_len);
- if (ret == -EAGAIN)
+ if (err == -EAGAIN)
goto again;
- else if (ret)
+ if (err) {
+ ret = err;
goto done;
+ }
b = p->nodes[level];
slot = p->slots[level];
unlock_up(p, level, lowest_unlock);
- /* this is only true while dropping a snapshot */
if (level == lowest_level) {
- ret = 0;
+ if (dec)
+ p->slots[level]++;
goto done;
}
- ret = read_block_for_search(trans, root, p,
+ err = read_block_for_search(trans, root, p,
&b, level, slot, key);
- if (ret == -EAGAIN)
+ if (err == -EAGAIN)
goto again;
-
- if (ret == -EIO)
+ if (err) {
+ ret = err;
goto done;
+ }
if (!p->skip_locking) {
- int lret;
-
btrfs_clear_path_blocking(p, NULL);
- lret = btrfs_try_spin_lock(b);
+ err = btrfs_try_spin_lock(b);
- if (!lret) {
+ if (!err) {
btrfs_set_path_blocking(p);
btrfs_tree_lock(b);
btrfs_clear_path_blocking(p, b);
@@ -1837,16 +1825,14 @@ cow_done:
p->slots[level] = slot;
if (ins_len > 0 &&
btrfs_leaf_free_space(root, b) < ins_len) {
- int sret;
-
btrfs_set_path_blocking(p);
- sret = split_leaf(trans, root, key,
- p, ins_len, ret == 0);
+ err = split_leaf(trans, root, key,
+ p, ins_len, ret == 0);
btrfs_clear_path_blocking(p, NULL);
- BUG_ON(sret > 0);
- if (sret) {
- ret = sret;
+ BUG_ON(err > 0);
+ if (err) {
+ ret = err;
goto done;
}
}
@@ -3807,7 +3793,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
}
/* delete the leaf if it is mostly empty */
- if (used < BTRFS_LEAF_DATA_SIZE(root) / 2) {
+ if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
/* push_leaf_left fixes the path.
* make sure the path still points to our leaf
* for possible call to del_ptr below
@@ -4042,10 +4028,9 @@ out:
* calling this function.
*/
int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
- struct btrfs_key *key, int lowest_level,
+ struct btrfs_key *key, int level,
int cache_only, u64 min_trans)
{
- int level = lowest_level;
int slot;
struct extent_buffer *c;
@@ -4058,11 +4043,40 @@ int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
c = path->nodes[level];
next:
if (slot >= btrfs_header_nritems(c)) {
- level++;
- if (level == BTRFS_MAX_LEVEL)
+ int ret;
+ int orig_lowest;
+ struct btrfs_key cur_key;
+ if (level + 1 >= BTRFS_MAX_LEVEL ||
+ !path->nodes[level + 1])
return 1;
- continue;
+
+ if (path->locks[level + 1]) {
+ level++;
+ continue;
+ }
+
+ slot = btrfs_header_nritems(c) - 1;
+ if (level == 0)
+ btrfs_item_key_to_cpu(c, &cur_key, slot);
+ else
+ btrfs_node_key_to_cpu(c, &cur_key, slot);
+
+ orig_lowest = path->lowest_level;
+ btrfs_release_path(root, path);
+ path->lowest_level = level;
+ ret = btrfs_search_slot(NULL, root, &cur_key, path,
+ 0, 0);
+ path->lowest_level = orig_lowest;
+ if (ret < 0)
+ return ret;
+
+ c = path->nodes[level];
+ slot = path->slots[level];
+ if (ret == 0)
+ slot++;
+ goto next;
}
+
if (level == 0)
btrfs_item_key_to_cpu(c, key, slot);
else {
@@ -4146,7 +4160,8 @@ again:
* advance the path if there are now more items available.
*/
if (nritems > 0 && path->slots[0] < nritems - 1) {
- path->slots[0]++;
+ if (ret == 0)
+ path->slots[0]++;
ret = 0;
goto done;
}
@@ -4278,10 +4293,10 @@ int btrfs_previous_item(struct btrfs_root *root,
path->slots[0]--;
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
- if (found_key.type == type)
- return 0;
if (found_key.objectid < min_objectid)
break;
+ if (found_key.type == type)
+ return 0;
if (found_key.objectid == min_objectid &&
found_key.type < type)
break;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 2779c2f..837435c 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -481,7 +481,7 @@ struct btrfs_shared_data_ref {
struct btrfs_extent_inline_ref {
u8 type;
- u64 offset;
+ __le64 offset;
} __attribute__ ((__packed__));
/* old style backrefs item */
@@ -689,6 +689,7 @@ struct btrfs_space_info {
struct list_head block_groups;
spinlock_t lock;
struct rw_semaphore groups_sem;
+ atomic_t caching_threads;
};
/*
@@ -707,6 +708,9 @@ struct btrfs_free_cluster {
/* first extent starting offset */
u64 window_start;
+ /* if this cluster simply points at a bitmap in the block group */
+ bool points_to_bitmap;
+
struct btrfs_block_group_cache *block_group;
/*
* when a cluster is allocated from a block group, we put the
@@ -716,24 +720,37 @@ struct btrfs_free_cluster {
struct list_head block_group_list;
};
+enum btrfs_caching_type {
+ BTRFS_CACHE_NO = 0,
+ BTRFS_CACHE_STARTED = 1,
+ BTRFS_CACHE_FINISHED = 2,
+};
+
struct btrfs_block_group_cache {
struct btrfs_key key;
struct btrfs_block_group_item item;
+ struct btrfs_fs_info *fs_info;
spinlock_t lock;
- struct mutex cache_mutex;
u64 pinned;
u64 reserved;
u64 flags;
- int cached;
+ u64 sectorsize;
+ int extents_thresh;
+ int free_extents;
+ int total_bitmaps;
int ro;
int dirty;
+ /* cache tracking stuff */
+ wait_queue_head_t caching_q;
+ int cached;
+
struct btrfs_space_info *space_info;
/* free space cache stuff */
spinlock_t tree_lock;
- struct rb_root free_space_bytes;
struct rb_root free_space_offset;
+ u64 free_space;
/* block group cache stuff */
struct rb_node cache_node;
@@ -808,6 +825,7 @@ struct btrfs_fs_info {
struct mutex drop_mutex;
struct mutex volume_mutex;
struct mutex tree_reloc_mutex;
+ struct rw_semaphore extent_commit_sem;
/*
* this protects the ordered operations list only while we are
@@ -1988,6 +2006,7 @@ void btrfs_delalloc_reserve_space(struct btrfs_root *root, struct inode *inode,
u64 bytes);
void btrfs_delalloc_free_space(struct btrfs_root *root, struct inode *inode,
u64 bytes);
+void btrfs_free_pinned_extents(struct btrfs_fs_info *info);
/* ctree.c */
int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
int level, int *slot);
@@ -2074,8 +2093,7 @@ static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans,
int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path);
int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf);
-int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
- *root);
+int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref);
int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *node,
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index d28d29c..e83be2e 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1639,6 +1639,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
mutex_init(&fs_info->cleaner_mutex);
mutex_init(&fs_info->volume_mutex);
mutex_init(&fs_info->tree_reloc_mutex);
+ init_rwsem(&fs_info->extent_commit_sem);
btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
@@ -1799,6 +1800,11 @@ struct btrfs_root *open_ctree(struct super_block *sb,
btrfs_super_chunk_root(disk_super),
blocksize, generation);
BUG_ON(!chunk_root->node);
+ if (!test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
+ printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n",
+ sb->s_id);
+ goto fail_chunk_root;
+ }
btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
chunk_root->commit_root = btrfs_root_node(chunk_root);
@@ -1826,6 +1832,11 @@ struct btrfs_root *open_ctree(struct super_block *sb,
blocksize, generation);
if (!tree_root->node)
goto fail_chunk_root;
+ if (!test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) {
+ printk(KERN_WARNING "btrfs: failed to read tree root on %s\n",
+ sb->s_id);
+ goto fail_tree_root;
+ }
btrfs_set_root_node(&tree_root->root_item, tree_root->node);
tree_root->commit_root = btrfs_root_node(tree_root);
@@ -2322,6 +2333,9 @@ int close_ctree(struct btrfs_root *root)
printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
}
+ fs_info->closing = 2;
+ smp_mb();
+
if (fs_info->delalloc_bytes) {
printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n",
(unsigned long long)fs_info->delalloc_bytes);
@@ -2343,6 +2357,7 @@ int close_ctree(struct btrfs_root *root)
free_extent_buffer(root->fs_info->csum_root->commit_root);
btrfs_free_block_groups(root->fs_info);
+ btrfs_free_pinned_extents(root->fs_info);
del_fs_roots(fs_info);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index edc7d20..72a2b9c 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -21,6 +21,7 @@
#include <linux/blkdev.h>
#include <linux/sort.h>
#include <linux/rcupdate.h>
+#include <linux/kthread.h>
#include "compat.h"
#include "hash.h"
#include "ctree.h"
@@ -61,6 +62,13 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
struct btrfs_root *extent_root, u64 alloc_bytes,
u64 flags, int force);
+static noinline int
+block_group_cache_done(struct btrfs_block_group_cache *cache)
+{
+ smp_mb();
+ return cache->cached == BTRFS_CACHE_FINISHED;
+}
+
static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
{
return (cache->flags & bits) == bits;
@@ -146,20 +154,70 @@ block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
}
/*
+ * We always set EXTENT_LOCKED for the super mirror extents so we don't
+ * overwrite them, so those bits need to be unset. Also, if we are unmounting
+ * with pinned extents still sitting there because we had a block group caching,
+ * we need to clear those now, since we are done.
+ */
+void btrfs_free_pinned_extents(struct btrfs_fs_info *info)
+{
+ u64 start, end, last = 0;
+ int ret;
+
+ while (1) {
+ ret = find_first_extent_bit(&info->pinned_extents, last,
+ &start, &end,
+ EXTENT_LOCKED|EXTENT_DIRTY);
+ if (ret)
+ break;
+
+ clear_extent_bits(&info->pinned_extents, start, end,
+ EXTENT_LOCKED|EXTENT_DIRTY, GFP_NOFS);
+ last = end+1;
+ }
+}
+
+static int remove_sb_from_cache(struct btrfs_root *root,
+ struct btrfs_block_group_cache *cache)
+{
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ u64 bytenr;
+ u64 *logical;
+ int stripe_len;
+ int i, nr, ret;
+
+ for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
+ bytenr = btrfs_sb_offset(i);
+ ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
+ cache->key.objectid, bytenr,
+ 0, &logical, &nr, &stripe_len);
+ BUG_ON(ret);
+ while (nr--) {
+ try_lock_extent(&fs_info->pinned_extents,
+ logical[nr],
+ logical[nr] + stripe_len - 1, GFP_NOFS);
+ }
+ kfree(logical);
+ }
+
+ return 0;
+}
+
+/*
* this is only called by cache_block_group, since we could have freed extents
* we need to check the pinned_extents for any extents that can't be used yet
* since their free space will be released as soon as the transaction commits.
*/
-static int add_new_free_space(struct btrfs_block_group_cache *block_group,
+static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
struct btrfs_fs_info *info, u64 start, u64 end)
{
- u64 extent_start, extent_end, size;
+ u64 extent_start, extent_end, size, total_added = 0;
int ret;
while (start < end) {
ret = find_first_extent_bit(&info->pinned_extents, start,
&extent_start, &extent_end,
- EXTENT_DIRTY);
+ EXTENT_DIRTY|EXTENT_LOCKED);
if (ret)
break;
@@ -167,6 +225,7 @@ static int add_new_free_space(struct btrfs_block_group_cache *block_group,
start = extent_end + 1;
} else if (extent_start > start && extent_start < end) {
size = extent_start - start;
+ total_added += size;
ret = btrfs_add_free_space(block_group, start,
size);
BUG_ON(ret);
@@ -178,84 +237,93 @@ static int add_new_free_space(struct btrfs_block_group_cache *block_group,
if (start < end) {
size = end - start;
+ total_added += size;
ret = btrfs_add_free_space(block_group, start, size);
BUG_ON(ret);
}
- return 0;
+ return total_added;
}
-static int remove_sb_from_cache(struct btrfs_root *root,
- struct btrfs_block_group_cache *cache)
-{
- u64 bytenr;
- u64 *logical;
- int stripe_len;
- int i, nr, ret;
-
- for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
- bytenr = btrfs_sb_offset(i);
- ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
- cache->key.objectid, bytenr, 0,
- &logical, &nr, &stripe_len);
- BUG_ON(ret);
- while (nr--) {
- btrfs_remove_free_space(cache, logical[nr],
- stripe_len);
- }
- kfree(logical);
- }
- return 0;
-}
-
-static int cache_block_group(struct btrfs_root *root,
- struct btrfs_block_group_cache *block_group)
+static int caching_kthread(void *data)
{
+ struct btrfs_block_group_cache *block_group = data;
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
+ u64 last = 0;
struct btrfs_path *path;
int ret = 0;
struct btrfs_key key;
struct extent_buffer *leaf;
int slot;
- u64 last;
+ u64 total_found = 0;
- if (!block_group)
- return 0;
-
- root = root->fs_info->extent_root;
-
- if (block_group->cached)
- return 0;
+ BUG_ON(!fs_info);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
- path->reada = 2;
+ atomic_inc(&block_group->space_info->caching_threads);
+ last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
/*
- * we get into deadlocks with paths held by callers of this function.
- * since the alloc_mutex is protecting things right now, just
- * skip the locking here
+ * We don't want to deadlock with somebody trying to allocate a new
+ * extent for the extent root while also trying to search the extent
+ * root to add free space. So we skip locking and search the commit
+ * root, since its read-only
*/
path->skip_locking = 1;
- last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
+ path->search_commit_root = 1;
+ path->reada = 2;
+
key.objectid = last;
key.offset = 0;
btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
- ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+again:
+ /* need to make sure the commit_root doesn't disappear */
+ down_read(&fs_info->extent_commit_sem);
+
+ ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
if (ret < 0)
goto err;
while (1) {
+ smp_mb();
+ if (block_group->fs_info->closing > 1) {
+ last = (u64)-1;
+ break;
+ }
+
leaf = path->nodes[0];
slot = path->slots[0];
if (slot >= btrfs_header_nritems(leaf)) {
- ret = btrfs_next_leaf(root, path);
+ ret = btrfs_next_leaf(fs_info->extent_root, path);
if (ret < 0)
goto err;
- if (ret == 0)
- continue;
- else
+ else if (ret)
break;
+
+ if (need_resched() ||
+ btrfs_transaction_in_commit(fs_info)) {
+ leaf = path->nodes[0];
+
+ /* this shouldn't happen, but if the
+ * leaf is empty just move on.
+ */
+ if (btrfs_header_nritems(leaf) == 0)
+ break;
+ /*
+ * we need to copy the key out so that
+ * we are sure the next search advances
+ * us forward in the btree.
+ */
+ btrfs_item_key_to_cpu(leaf, &key, 0);
+ btrfs_release_path(fs_info->extent_root, path);
+ up_read(&fs_info->extent_commit_sem);
+ schedule_timeout(1);
+ goto again;
+ }
+
+ continue;
}
btrfs_item_key_to_cpu(leaf, &key, slot);
if (key.objectid < block_group->key.objectid)
@@ -266,24 +334,59 @@ static int cache_block_group(struct btrfs_root *root,
break;
if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
- add_new_free_space(block_group, root->fs_info, last,
- key.objectid);
-
+ total_found += add_new_free_space(block_group,
+ fs_info, last,
+ key.objectid);
last = key.objectid + key.offset;
}
+
+ if (total_found > (1024 * 1024 * 2)) {
+ total_found = 0;
+ wake_up(&block_group->caching_q);
+ }
next:
path->slots[0]++;
}
+ ret = 0;
- add_new_free_space(block_group, root->fs_info, last,
- block_group->key.objectid +
- block_group->key.offset);
+ total_found += add_new_free_space(block_group, fs_info, last,
+ block_group->key.objectid +
+ block_group->key.offset);
+
+ spin_lock(&block_group->lock);
+ block_group->cached = BTRFS_CACHE_FINISHED;
+ spin_unlock(&block_group->lock);
- block_group->cached = 1;
- remove_sb_from_cache(root, block_group);
- ret = 0;
err:
btrfs_free_path(path);
+ up_read(&fs_info->extent_commit_sem);
+ atomic_dec(&block_group->space_info->caching_threads);
+ wake_up(&block_group->caching_q);
+
+ return 0;
+}
+
+static int cache_block_group(struct btrfs_block_group_cache *cache)
+{
+ struct task_struct *tsk;
+ int ret = 0;
+
+ spin_lock(&cache->lock);
+ if (cache->cached != BTRFS_CACHE_NO) {
+ spin_unlock(&cache->lock);
+ return ret;
+ }
+ cache->cached = BTRFS_CACHE_STARTED;
+ spin_unlock(&cache->lock);
+
+ tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n",
+ cache->key.objectid);
+ if (IS_ERR(tsk)) {
+ ret = PTR_ERR(tsk);
+ printk(KERN_ERR "error running thread %d\n", ret);
+ BUG();
+ }
+
return ret;
}
@@ -990,15 +1093,13 @@ static inline int extent_ref_type(u64 parent, u64 owner)
return type;
}
-static int find_next_key(struct btrfs_path *path, struct btrfs_key *key)
+static int find_next_key(struct btrfs_path *path, int level,
+ struct btrfs_key *key)
{
- int level;
- BUG_ON(!path->keep_locks);
- for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
+ for (; level < BTRFS_MAX_LEVEL; level++) {
if (!path->nodes[level])
break;
- btrfs_assert_tree_locked(path->nodes[level]);
if (path->slots[level] + 1 >=
btrfs_header_nritems(path->nodes[level]))
continue;
@@ -1158,7 +1259,8 @@ int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
* For simplicity, we just do not add new inline back
* ref if there is any kind of item for this block
*/
- if (find_next_key(path, &key) == 0 && key.objectid == bytenr &&
+ if (find_next_key(path, 0, &key) == 0 &&
+ key.objectid == bytenr &&
key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
err = -EAGAIN;
goto out;
@@ -2388,13 +2490,29 @@ fail:
}
+static struct btrfs_block_group_cache *
+next_block_group(struct btrfs_root *root,
+ struct btrfs_block_group_cache *cache)
+{
+ struct rb_node *node;
+ spin_lock(&root->fs_info->block_group_cache_lock);
+ node = rb_next(&cache->cache_node);
+ btrfs_put_block_group(cache);
+ if (node) {
+ cache = rb_entry(node, struct btrfs_block_group_cache,
+ cache_node);
+ atomic_inc(&cache->count);
+ } else
+ cache = NULL;
+ spin_unlock(&root->fs_info->block_group_cache_lock);
+ return cache;
+}
+
int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
- struct btrfs_block_group_cache *cache, *entry;
- struct rb_node *n;
+ struct btrfs_block_group_cache *cache;
int err = 0;
- int werr = 0;
struct btrfs_path *path;
u64 last = 0;
@@ -2403,39 +2521,35 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
return -ENOMEM;
while (1) {
- cache = NULL;
- spin_lock(&root->fs_info->block_group_cache_lock);
- for (n = rb_first(&root->fs_info->block_group_cache_tree);
- n; n = rb_next(n)) {
- entry = rb_entry(n, struct btrfs_block_group_cache,
- cache_node);
- if (entry->dirty) {
- cache = entry;
- break;
- }
+ if (last == 0) {
+ err = btrfs_run_delayed_refs(trans, root,
+ (unsigned long)-1);
+ BUG_ON(err);
}
- spin_unlock(&root->fs_info->block_group_cache_lock);
- if (!cache)
- break;
+ cache = btrfs_lookup_first_block_group(root->fs_info, last);
+ while (cache) {
+ if (cache->dirty)
+ break;
+ cache = next_block_group(root, cache);
+ }
+ if (!cache) {
+ if (last == 0)
+ break;
+ last = 0;
+ continue;
+ }
cache->dirty = 0;
- last += cache->key.offset;
+ last = cache->key.objectid + cache->key.offset;
- err = write_one_cache_group(trans, root,
- path, cache);
- /*
- * if we fail to write the cache group, we want
- * to keep it marked dirty in hopes that a later
- * write will work
- */
- if (err) {
- werr = err;
- continue;
- }
+ err = write_one_cache_group(trans, root, path, cache);
+ BUG_ON(err);
+ btrfs_put_block_group(cache);
}
+
btrfs_free_path(path);
- return werr;
+ return 0;
}
int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
@@ -2485,6 +2599,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
found->force_alloc = 0;
*space_info = found;
list_add_rcu(&found->list, &info->space_info);
+ atomic_set(&found->caching_threads, 0);
return 0;
}
@@ -2697,7 +2812,7 @@ again:
printk(KERN_ERR "no space left, need %llu, %llu delalloc bytes"
", %llu bytes_used, %llu bytes_reserved, "
- "%llu bytes_pinned, %llu bytes_readonly, %llu may use"
+ "%llu bytes_pinned, %llu bytes_readonly, %llu may use "
"%llu total\n", (unsigned long long)bytes,
(unsigned long long)data_sinfo->bytes_delalloc,
(unsigned long long)data_sinfo->bytes_used,
@@ -2948,13 +3063,9 @@ int btrfs_update_pinned_extents(struct btrfs_root *root,
struct btrfs_block_group_cache *cache;
struct btrfs_fs_info *fs_info = root->fs_info;
- if (pin) {
+ if (pin)
set_extent_dirty(&fs_info->pinned_extents,
bytenr, bytenr + num - 1, GFP_NOFS);
- } else {
- clear_extent_dirty(&fs_info->pinned_extents,
- bytenr, bytenr + num - 1, GFP_NOFS);
- }
while (num > 0) {
cache = btrfs_lookup_block_group(fs_info, bytenr);
@@ -2970,14 +3081,34 @@ int btrfs_update_pinned_extents(struct btrfs_root *root,
spin_unlock(&cache->space_info->lock);
fs_info->total_pinned += len;
} else {
+ int unpin = 0;
+
+ /*
+ * in order to not race with the block group caching, we
+ * only want to unpin the extent if we are cached. If
+ * we aren't cached, we want to start async caching this
+ * block group so we can free the extent the next time
+ * around.
+ */
spin_lock(&cache->space_info->lock);
spin_lock(&cache->lock);
- cache->pinned -= len;
- cache->space_info->bytes_pinned -= len;
+ unpin = (cache->cached == BTRFS_CACHE_FINISHED);
+ if (likely(unpin)) {
+ cache->pinned -= len;
+ cache->space_info->bytes_pinned -= len;
+ fs_info->total_pinned -= len;
+ }
spin_unlock(&cache->lock);
spin_unlock(&cache->space_info->lock);
- fs_info->total_pinned -= len;
- if (cache->cached)
+
+ if (likely(unpin))
+ clear_extent_dirty(&fs_info->pinned_extents,
+ bytenr, bytenr + len -1,
+ GFP_NOFS);
+ else
+ cache_block_group(cache);
+
+ if (unpin)
btrfs_add_free_space(cache, bytenr, len);
}
btrfs_put_block_group(cache);
@@ -3031,6 +3162,7 @@ int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy)
&start, &end, EXTENT_DIRTY);
if (ret)
break;
+
set_extent_dirty(copy, start, end, GFP_NOFS);
last = end + 1;
}
@@ -3059,6 +3191,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
cond_resched();
}
+
return ret;
}
@@ -3437,6 +3570,45 @@ static u64 stripe_align(struct btrfs_root *root, u64 val)
}
/*
+ * when we wait for progress in the block group caching, its because
+ * our allocation attempt failed at least once. So, we must sleep
+ * and let some progress happen before we try again.
+ *
+ * This function will sleep at least once waiting for new free space to
+ * show up, and then it will check the block group free space numbers
+ * for our min num_bytes. Another option is to have it go ahead
+ * and look in the rbtree for a free extent of a given size, but this
+ * is a good start.
+ */
+static noinline int
+wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
+ u64 num_bytes)
+{
+ DEFINE_WAIT(wait);
+
+ prepare_to_wait(&cache->caching_q, &wait, TASK_UNINTERRUPTIBLE);
+
+ if (block_group_cache_done(cache)) {
+ finish_wait(&cache->caching_q, &wait);
+ return 0;
+ }
+ schedule();
+ finish_wait(&cache->caching_q, &wait);
+
+ wait_event(cache->caching_q, block_group_cache_done(cache) ||
+ (cache->free_space >= num_bytes));
+ return 0;
+}
+
+enum btrfs_loop_type {
+ LOOP_CACHED_ONLY = 0,
+ LOOP_CACHING_NOWAIT = 1,
+ LOOP_CACHING_WAIT = 2,
+ LOOP_ALLOC_CHUNK = 3,
+ LOOP_NO_EMPTY_SIZE = 4,
+};
+
+/*
* walks the btree of allocated extents and find a hole of a given size.
* The key ins is changed to record the hole:
* ins->objectid == block start
@@ -3461,6 +3633,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
struct btrfs_space_info *space_info;
int last_ptr_loop = 0;
int loop = 0;
+ bool found_uncached_bg = false;
WARN_ON(num_bytes < root->sectorsize);
btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
@@ -3492,15 +3665,18 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
search_start = max(search_start, first_logical_byte(root, 0));
search_start = max(search_start, hint_byte);
- if (!last_ptr) {
+ if (!last_ptr)
empty_cluster = 0;
- loop = 1;
- }
if (search_start == hint_byte) {
block_group = btrfs_lookup_block_group(root->fs_info,
search_start);
- if (block_group && block_group_bits(block_group, data)) {
+ /*
+ * we don't want to use the block group if it doesn't match our
+ * allocation bits, or if its not cached.
+ */
+ if (block_group && block_group_bits(block_group, data) &&
+ block_group_cache_done(block_group)) {
down_read(&space_info->groups_sem);
if (list_empty(&block_group->list) ||
block_group->ro) {
@@ -3523,21 +3699,35 @@ search:
down_read(&space_info->groups_sem);
list_for_each_entry(block_group, &space_info->block_groups, list) {
u64 offset;
+ int cached;
atomic_inc(&block_group->count);
search_start = block_group->key.objectid;
have_block_group:
- if (unlikely(!block_group->cached)) {
- mutex_lock(&block_group->cache_mutex);
- ret = cache_block_group(root, block_group);
- mutex_unlock(&block_group->cache_mutex);
- if (ret) {
- btrfs_put_block_group(block_group);
- break;
+ if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
+ /*
+ * we want to start caching kthreads, but not too many
+ * right off the bat so we don't overwhelm the system,
+ * so only start them if there are less than 2 and we're
+ * in the initial allocation phase.
+ */
+ if (loop > LOOP_CACHING_NOWAIT ||
+ atomic_read(&space_info->caching_threads) < 2) {
+ ret = cache_block_group(block_group);
+ BUG_ON(ret);
}
}
+ cached = block_group_cache_done(block_group);
+ if (unlikely(!cached)) {
+ found_uncached_bg = true;
+
+ /* if we only want cached bgs, loop */
+ if (loop == LOOP_CACHED_ONLY)
+ goto loop;
+ }
+
if (unlikely(block_group->ro))
goto loop;
@@ -3616,14 +3806,21 @@ refill_cluster:
spin_unlock(&last_ptr->refill_lock);
goto checks;
}
+ } else if (!cached && loop > LOOP_CACHING_NOWAIT) {
+ spin_unlock(&last_ptr->refill_lock);
+
+ wait_block_group_cache_progress(block_group,
+ num_bytes + empty_cluster + empty_size);
+ goto have_block_group;
}
+
/*
* at this point we either didn't find a cluster
* or we weren't able to allocate a block from our
* cluster. Free the cluster we've been trying
* to use, and go to the next block group
*/
- if (loop < 2) {
+ if (loop < LOOP_NO_EMPTY_SIZE) {
btrfs_return_cluster_to_free_space(NULL,
last_ptr);
spin_unlock(&last_ptr->refill_lock);
@@ -3634,11 +3831,17 @@ refill_cluster:
offset = btrfs_find_space_for_alloc(block_group, search_start,
num_bytes, empty_size);
- if (!offset)
+ if (!offset && (cached || (!cached &&
+ loop == LOOP_CACHING_NOWAIT))) {
goto loop;
+ } else if (!offset && (!cached &&
+ loop > LOOP_CACHING_NOWAIT)) {
+ wait_block_group_cache_progress(block_group,
+ num_bytes + empty_size);
+ goto have_block_group;
+ }
checks:
search_start = stripe_align(root, offset);
-
/* move on to the next group */
if (search_start + num_bytes >= search_end) {
btrfs_add_free_space(block_group, offset, num_bytes);
@@ -3684,13 +3887,26 @@ loop:
}
up_read(&space_info->groups_sem);
- /* loop == 0, try to find a clustered alloc in every block group
- * loop == 1, try again after forcing a chunk allocation
- * loop == 2, set empty_size and empty_cluster to 0 and try again
+ /* LOOP_CACHED_ONLY, only search fully cached block groups
+ * LOOP_CACHING_NOWAIT, search partially cached block groups, but
+ * dont wait foR them to finish caching
+ * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
+ * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
+ * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
+ * again
*/
- if (!ins->objectid && loop < 3 &&
- (empty_size || empty_cluster || allowed_chunk_alloc)) {
- if (loop >= 2) {
+ if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE &&
+ (found_uncached_bg || empty_size || empty_cluster ||
+ allowed_chunk_alloc)) {
+ if (found_uncached_bg) {
+ found_uncached_bg = false;
+ if (loop < LOOP_CACHING_WAIT) {
+ loop++;
+ goto search;
+ }
+ }
+
+ if (loop == LOOP_ALLOC_CHUNK) {
empty_size = 0;
empty_cluster = 0;
}
@@ -3703,7 +3919,7 @@ loop:
space_info->force_alloc = 1;
}
- if (loop < 3) {
+ if (loop < LOOP_NO_EMPTY_SIZE) {
loop++;
goto search;
}
@@ -3799,7 +4015,7 @@ again:
num_bytes, data, 1);
goto again;
}
- if (ret) {
+ if (ret == -ENOSPC) {
struct btrfs_space_info *sinfo;
sinfo = __find_space_info(root->fs_info, data);
@@ -3807,7 +4023,6 @@ again:
"wanted %llu\n", (unsigned long long)data,
(unsigned long long)num_bytes);
dump_space_info(sinfo, num_bytes);
- BUG();
}
return ret;
@@ -3845,7 +4060,9 @@ int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size,
empty_size, hint_byte, search_end, ins,
data);
- update_reserved_extents(root, ins->objectid, ins->offset, 1);
+ if (!ret)
+ update_reserved_extents(root, ins->objectid, ins->offset, 1);
+
return ret;
}
@@ -4007,9 +4224,9 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
struct btrfs_block_group_cache *block_group;
block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
- mutex_lock(&block_group->cache_mutex);
- cache_block_group(root, block_group);
- mutex_unlock(&block_group->cache_mutex);
+ cache_block_group(block_group);
+ wait_event(block_group->caching_q,
+ block_group_cache_done(block_group));
ret = btrfs_remove_free_space(block_group, ins->objectid,
ins->offset);
@@ -4040,7 +4257,8 @@ static int alloc_tree_block(struct btrfs_trans_handle *trans,
ret = __btrfs_reserve_extent(trans, root, num_bytes, num_bytes,
empty_size, hint_byte, search_end,
ins, 0);
- BUG_ON(ret);
+ if (ret)
+ return ret;
if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
if (parent == 0)
@@ -4128,6 +4346,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
return buf;
}
+#if 0
int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct extent_buffer *leaf)
{
@@ -4171,8 +4390,6 @@ int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
return 0;
}
-#if 0
-
static noinline int cache_drop_leaf_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_leaf_ref *ref)
@@ -4553,262 +4770,471 @@ out:
}
#endif
+struct walk_control {
+ u64 refs[BTRFS_MAX_LEVEL];
+ u64 flags[BTRFS_MAX_LEVEL];
+ struct btrfs_key update_progress;
+ int stage;
+ int level;
+ int shared_level;
+ int update_ref;
+ int keep_locks;
+};
+
+#define DROP_REFERENCE 1
+#define UPDATE_BACKREF 2
+
/*
- * helper function for drop_subtree, this function is similar to
- * walk_down_tree. The main difference is that it checks reference
- * counts while tree blocks are locked.
+ * hepler to process tree block while walking down the tree.
+ *
+ * when wc->stage == DROP_REFERENCE, this function checks
+ * reference count of the block. if the block is shared and
+ * we need update back refs for the subtree rooted at the
+ * block, this function changes wc->stage to UPDATE_BACKREF
+ *
+ * when wc->stage == UPDATE_BACKREF, this function updates
+ * back refs for pointers in the block.
+ *
+ * NOTE: return value 1 means we should stop walking down.
*/
-static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
+static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
- struct btrfs_path *path, int *level)
+ struct btrfs_path *path,
+ struct walk_control *wc)
{
- struct extent_buffer *next;
- struct extent_buffer *cur;
- struct extent_buffer *parent;
- u64 bytenr;
- u64 ptr_gen;
- u64 refs;
- u64 flags;
- u32 blocksize;
+ int level = wc->level;
+ struct extent_buffer *eb = path->nodes[level];
+ struct btrfs_key key;
+ u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
int ret;
- cur = path->nodes[*level];
- ret = btrfs_lookup_extent_info(trans, root, cur->start, cur->len,
- &refs, &flags);
- BUG_ON(ret);
- if (refs > 1)
- goto out;
+ if (wc->stage == UPDATE_BACKREF &&
+ btrfs_header_owner(eb) != root->root_key.objectid)
+ return 1;
- BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
+ /*
+ * when reference count of tree block is 1, it won't increase
+ * again. once full backref flag is set, we never clear it.
+ */
+ if ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
+ (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag))) {
+ BUG_ON(!path->locks[level]);
+ ret = btrfs_lookup_extent_info(trans, root,
+ eb->start, eb->len,
+ &wc->refs[level],
+ &wc->flags[level]);
+ BUG_ON(ret);
+ BUG_ON(wc->refs[level] == 0);
+ }
- while (*level >= 0) {
- cur = path->nodes[*level];
- if (*level == 0) {
- ret = btrfs_drop_leaf_ref(trans, root, cur);
- BUG_ON(ret);
- clean_tree_block(trans, root, cur);
- break;
- }
- if (path->slots[*level] >= btrfs_header_nritems(cur)) {
- clean_tree_block(trans, root, cur);
- break;
+ if (wc->stage == DROP_REFERENCE &&
+ wc->update_ref && wc->refs[level] > 1) {
+ BUG_ON(eb == root->node);
+ BUG_ON(path->slots[level] > 0);
+ if (level == 0)
+ btrfs_item_key_to_cpu(eb, &key, path->slots[level]);
+ else
+ btrfs_node_key_to_cpu(eb, &key, path->slots[level]);
+ if (btrfs_header_owner(eb) == root->root_key.objectid &&
+ btrfs_comp_cpu_keys(&key, &wc->update_progress) >= 0) {
+ wc->stage = UPDATE_BACKREF;
+ wc->shared_level = level;
}
+ }
- bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
- blocksize = btrfs_level_size(root, *level - 1);
- ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
+ if (wc->stage == DROP_REFERENCE) {
+ if (wc->refs[level] > 1)
+ return 1;
- next = read_tree_block(root, bytenr, blocksize, ptr_gen);
- btrfs_tree_lock(next);
- btrfs_set_lock_blocking(next);
+ if (path->locks[level] && !wc->keep_locks) {
+ btrfs_tree_unlock(eb);
+ path->locks[level] = 0;
+ }
+ return 0;
+ }
- ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
- &refs, &flags);
+ /* wc->stage == UPDATE_BACKREF */
+ if (!(wc->flags[level] & flag)) {
+ BUG_ON(!path->locks[level]);
+ ret = btrfs_inc_ref(trans, root, eb, 1);
BUG_ON(ret);
- if (refs > 1) {
- parent = path->nodes[*level];
- ret = btrfs_free_extent(trans, root, bytenr,
- blocksize, parent->start,
- btrfs_header_owner(parent),
- *level - 1, 0);
+ ret = btrfs_dec_ref(trans, root, eb, 0);
+ BUG_ON(ret);
+ ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
+ eb->len, flag, 0);
+ BUG_ON(ret);
+ wc->flags[level] |= flag;
+ }
+
+ /*
+ * the block is shared by multiple trees, so it's not good to
+ * keep the tree lock
+ */
+ if (path->locks[level] && level > 0) {
+ btrfs_tree_unlock(eb);
+ path->locks[level] = 0;
+ }
+ return 0;
+}
+
+/*
+ * hepler to process tree block while walking up the tree.
+ *
+ * when wc->stage == DROP_REFERENCE, this function drops
+ * reference count on the block.
+ *
+ * when wc->stage == UPDATE_BACKREF, this function changes
+ * wc->stage back to DROP_REFERENCE if we changed wc->stage
+ * to UPDATE_BACKREF previously while processing the block.
+ *
+ * NOTE: return value 1 means we should stop walking up.
+ */
+static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct walk_control *wc)
+{
+ int ret = 0;
+ int level = wc->level;
+ struct extent_buffer *eb = path->nodes[level];
+ u64 parent = 0;
+
+ if (wc->stage == UPDATE_BACKREF) {
+ BUG_ON(wc->shared_level < level);
+ if (level < wc->shared_level)
+ goto out;
+
+ BUG_ON(wc->refs[level] <= 1);
+ ret = find_next_key(path, level + 1, &wc->update_progress);
+ if (ret > 0)
+ wc->update_ref = 0;
+
+ wc->stage = DROP_REFERENCE;
+ wc->shared_level = -1;
+ path->slots[level] = 0;
+
+ /*
+ * check reference count again if the block isn't locked.
+ * we should start walking down the tree again if reference
+ * count is one.
+ */
+ if (!path->locks[level]) {
+ BUG_ON(level == 0);
+ btrfs_tree_lock(eb);
+ btrfs_set_lock_blocking(eb);
+ path->locks[level] = 1;
+
+ ret = btrfs_lookup_extent_info(trans, root,
+ eb->start, eb->len,
+ &wc->refs[level],
+ &wc->flags[level]);
BUG_ON(ret);
- path->slots[*level]++;
- btrfs_tree_unlock(next);
- free_extent_buffer(next);
- continue;
+ BUG_ON(wc->refs[level] == 0);
+ if (wc->refs[level] == 1) {
+ btrfs_tree_unlock(eb);
+ path->locks[level] = 0;
+ return 1;
+ }
+ } else {
+ BUG_ON(level != 0);
}
+ }
- BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
+ /* wc->stage == DROP_REFERENCE */
+ BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
- *level = btrfs_header_level(next);
- path->nodes[*level] = next;
- path->slots[*level] = 0;
- path->locks[*level] = 1;
- cond_resched();
+ if (wc->refs[level] == 1) {
+ if (level == 0) {
+ if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
+ ret = btrfs_dec_ref(trans, root, eb, 1);
+ else
+ ret = btrfs_dec_ref(trans, root, eb, 0);
+ BUG_ON(ret);
+ }
+ /* make block locked assertion in clean_tree_block happy */
+ if (!path->locks[level] &&
+ btrfs_header_generation(eb) == trans->transid) {
+ btrfs_tree_lock(eb);
+ btrfs_set_lock_blocking(eb);
+ path->locks[level] = 1;
+ }
+ clean_tree_block(trans, root, eb);
}
-out:
- if (path->nodes[*level] == root->node)
- parent = path->nodes[*level];
- else
- parent = path->nodes[*level + 1];
- bytenr = path->nodes[*level]->start;
- blocksize = path->nodes[*level]->len;
- ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent->start,
- btrfs_header_owner(parent), *level, 0);
+ if (eb == root->node) {
+ if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
+ parent = eb->start;
+ else
+ BUG_ON(root->root_key.objectid !=
+ btrfs_header_owner(eb));
+ } else {
+ if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
+ parent = path->nodes[level + 1]->start;
+ else
+ BUG_ON(root->root_key.objectid !=
+ btrfs_header_owner(path->nodes[level + 1]));
+ }
+
+ ret = btrfs_free_extent(trans, root, eb->start, eb->len, parent,
+ root->root_key.objectid, level, 0);
BUG_ON(ret);
+out:
+ wc->refs[level] = 0;
+ wc->flags[level] = 0;
+ return ret;
+}
+
+static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct walk_control *wc)
+{
+ struct extent_buffer *next;
+ struct extent_buffer *cur;
+ u64 bytenr;
+ u64 ptr_gen;
+ u32 blocksize;
+ int level = wc->level;
+ int ret;
+
+ while (level >= 0) {
+ cur = path->nodes[level];
+ BUG_ON(path->slots[level] >= btrfs_header_nritems(cur));
+
+ ret = walk_down_proc(trans, root, path, wc);
+ if (ret > 0)
+ break;
+
+ if (level == 0)
+ break;
+
+ bytenr = btrfs_node_blockptr(cur, path->slots[level]);
+ blocksize = btrfs_level_size(root, level - 1);
+ ptr_gen = btrfs_node_ptr_generation(cur, path->slots[level]);
+
+ next = read_tree_block(root, bytenr, blocksize, ptr_gen);
+ btrfs_tree_lock(next);
+ btrfs_set_lock_blocking(next);
- if (path->locks[*level]) {
- btrfs_tree_unlock(path->nodes[*level]);
- path->locks[*level] = 0;
+ level--;
+ BUG_ON(level != btrfs_header_level(next));
+ path->nodes[level] = next;
+ path->slots[level] = 0;
+ path->locks[level] = 1;
+ wc->level = level;
}
- free_extent_buffer(path->nodes[*level]);
- path->nodes[*level] = NULL;
- *level += 1;
- cond_resched();
return 0;
}
-/*
- * helper for dropping snapshots. This walks back up the tree in the path
- * to find the first node higher up where we haven't yet gone through
- * all the slots
- */
static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
- int *level, int max_level)
+ struct walk_control *wc, int max_level)
{
- struct btrfs_root_item *root_item = &root->root_item;
- int i;
- int slot;
+ int level = wc->level;
int ret;
- for (i = *level; i < max_level && path->nodes[i]; i++) {
- slot = path->slots[i];
- if (slot + 1 < btrfs_header_nritems(path->nodes[i])) {
- /*
- * there is more work to do in this level.
- * Update the drop_progress marker to reflect
- * the work we've done so far, and then bump
- * the slot number
- */
- path->slots[i]++;
- WARN_ON(*level == 0);
- if (max_level == BTRFS_MAX_LEVEL) {
- btrfs_node_key(path->nodes[i],
- &root_item->drop_progress,
- path->slots[i]);
- root_item->drop_level = i;
- }
- *level = i;
+ path->slots[level] = btrfs_header_nritems(path->nodes[level]);
+ while (level < max_level && path->nodes[level]) {
+ wc->level = level;
+ if (path->slots[level] + 1 <
+ btrfs_header_nritems(path->nodes[level])) {
+ path->slots[level]++;
return 0;
} else {
- struct extent_buffer *parent;
-
- /*
- * this whole node is done, free our reference
- * on it and go up one level
- */
- if (path->nodes[*level] == root->node)
- parent = path->nodes[*level];
- else
- parent = path->nodes[*level + 1];
+ ret = walk_up_proc(trans, root, path, wc);
+ if (ret > 0)
+ return 0;
- clean_tree_block(trans, root, path->nodes[i]);
- ret = btrfs_free_extent(trans, root,
- path->nodes[i]->start,
- path->nodes[i]->len,
- parent->start,
- btrfs_header_owner(parent),
- *level, 0);
- BUG_ON(ret);
- if (path->locks[*level]) {
- btrfs_tree_unlock(path->nodes[i]);
- path->locks[i] = 0;
+ if (path->locks[level]) {
+ btrfs_tree_unlock(path->nodes[level]);
+ path->locks[level] = 0;
}
- free_extent_buffer(path->nodes[i]);
- path->nodes[i] = NULL;
- *level = i + 1;
+ free_extent_buffer(path->nodes[level]);
+ path->nodes[level] = NULL;
+ level++;
}
}
return 1;
}
/*
- * drop the reference count on the tree rooted at 'snap'. This traverses
- * the tree freeing any blocks that have a ref count of zero after being
- * decremented.
+ * drop a subvolume tree.
+ *
+ * this function traverses the tree freeing any blocks that only
+ * referenced by the tree.
+ *
+ * when a shared tree block is found. this function decreases its
+ * reference count by one. if update_ref is true, this function
+ * also make sure backrefs for the shared block and all lower level
+ * blocks are properly updated.
*/
-int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
- *root)
+int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref)
{
- int ret = 0;
- int wret;
- int level;
struct btrfs_path *path;
- int update_count;
+ struct btrfs_trans_handle *trans;
+ struct btrfs_root *tree_root = root->fs_info->tree_root;
struct btrfs_root_item *root_item = &root->root_item;
+ struct walk_control *wc;
+ struct btrfs_key key;
+ int err = 0;
+ int ret;
+ int level;
path = btrfs_alloc_path();
BUG_ON(!path);
- level = btrfs_header_level(root->node);
+ wc = kzalloc(sizeof(*wc), GFP_NOFS);
+ BUG_ON(!wc);
+
+ trans = btrfs_start_transaction(tree_root, 1);
+
if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
+ level = btrfs_header_level(root->node);
path->nodes[level] = btrfs_lock_root_node(root);
btrfs_set_lock_blocking(path->nodes[level]);
path->slots[level] = 0;
path->locks[level] = 1;
+ memset(&wc->update_progress, 0,
+ sizeof(wc->update_progress));
} else {
- struct btrfs_key key;
- struct btrfs_disk_key found_key;
- struct extent_buffer *node;
-
btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
+ memcpy(&wc->update_progress, &key,
+ sizeof(wc->update_progress));
+
level = root_item->drop_level;
+ BUG_ON(level == 0);
path->lowest_level = level;
- wret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
- if (wret < 0) {
- ret = wret;
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ path->lowest_level = 0;
+ if (ret < 0) {
+ err = ret;
goto out;
}
- node = path->nodes[level];
- btrfs_node_key(node, &found_key, path->slots[level]);
- WARN_ON(memcmp(&found_key, &root_item->drop_progress,
- sizeof(found_key)));
+ btrfs_node_key_to_cpu(path->nodes[level], &key,
+ path->slots[level]);
+ WARN_ON(memcmp(&key, &wc->update_progress, sizeof(key)));
+
/*
* unlock our path, this is safe because only this
* function is allowed to delete this snapshot
*/
btrfs_unlock_up_safe(path, 0);
+
+ level = btrfs_header_level(root->node);
+ while (1) {
+ btrfs_tree_lock(path->nodes[level]);
+ btrfs_set_lock_blocking(path->nodes[level]);
+
+ ret = btrfs_lookup_extent_info(trans, root,
+ path->nodes[level]->start,
+ path->nodes[level]->len,
+ &wc->refs[level],
+ &wc->flags[level]);
+ BUG_ON(ret);
+ BUG_ON(wc->refs[level] == 0);
+
+ if (level == root_item->drop_level)
+ break;
+
+ btrfs_tree_unlock(path->nodes[level]);
+ WARN_ON(wc->refs[level] != 1);
+ level--;
+ }
}
+
+ wc->level = level;
+ wc->shared_level = -1;
+ wc->stage = DROP_REFERENCE;
+ wc->update_ref = update_ref;
+ wc->keep_locks = 0;
+
while (1) {
- unsigned long update;
- wret = walk_down_tree(trans, root, path, &level);
- if (wret > 0)
+ ret = walk_down_tree(trans, root, path, wc);
+ if (ret < 0) {
+ err = ret;
break;
- if (wret < 0)
- ret = wret;
+ }
- wret = walk_up_tree(trans, root, path, &level,
- BTRFS_MAX_LEVEL);
- if (wret > 0)
+ ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
+ if (ret < 0) {
+ err = ret;
break;
- if (wret < 0)
- ret = wret;
- if (trans->transaction->in_commit ||
- trans->transaction->delayed_refs.flushing) {
- ret = -EAGAIN;
+ }
+
+ if (ret > 0) {
+ BUG_ON(wc->stage != DROP_REFERENCE);
break;
}
- for (update_count = 0; update_count < 16; update_count++) {
+
+ if (wc->stage == DROP_REFERENCE) {
+ level = wc->level;
+ btrfs_node_key(path->nodes[level],
+ &root_item->drop_progress,
+ path->slots[level]);
+ root_item->drop_level = level;
+ }
+
+ BUG_ON(wc->level == 0);
+ if (trans->transaction->in_commit ||
+ trans->transaction->delayed_refs.flushing) {
+ ret = btrfs_update_root(trans, tree_root,
+ &root->root_key,
+ root_item);
+ BUG_ON(ret);
+
+ btrfs_end_transaction(trans, tree_root);
+ trans = btrfs_start_transaction(tree_root, 1);
+ } else {
+ unsigned long update;
update = trans->delayed_ref_updates;
trans->delayed_ref_updates = 0;
if (update)
- btrfs_run_delayed_refs(trans, root, update);
- else
- break;
+ btrfs_run_delayed_refs(trans, tree_root,
+ update);
}
}
+ btrfs_release_path(root, path);
+ BUG_ON(err);
+
+ ret = btrfs_del_root(trans, tree_root, &root->root_key);
+ BUG_ON(ret);
+
+ free_extent_buffer(root->node);
+ free_extent_buffer(root->commit_root);
+ kfree(root);
out:
+ btrfs_end_transaction(trans, tree_root);
+ kfree(wc);
btrfs_free_path(path);
- return ret;
+ return err;
}
+/*
+ * drop subtree rooted at tree block 'node'.
+ *
+ * NOTE: this function will unlock and release tree block 'node'
+ */
int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *node,
struct extent_buffer *parent)
{
struct btrfs_path *path;
+ struct walk_control *wc;
int level;
int parent_level;
int ret = 0;
int wret;
+ BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
+
path = btrfs_alloc_path();
BUG_ON(!path);
+ wc = kzalloc(sizeof(*wc), GFP_NOFS);
+ BUG_ON(!wc);
+
btrfs_assert_tree_locked(parent);
parent_level = btrfs_header_level(parent);
extent_buffer_get(parent);
@@ -4817,24 +5243,33 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
btrfs_assert_tree_locked(node);
level = btrfs_header_level(node);
- extent_buffer_get(node);
path->nodes[level] = node;
path->slots[level] = 0;
+ path->locks[level] = 1;
+
+ wc->refs[parent_level] = 1;
+ wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
+ wc->level = level;
+ wc->shared_level = -1;
+ wc->stage = DROP_REFERENCE;
+ wc->update_ref = 0;
+ wc->keep_locks = 1;
while (1) {
- wret = walk_down_tree(trans, root, path, &level);
- if (wret < 0)
+ wret = walk_down_tree(trans, root, path, wc);
+ if (wret < 0) {
ret = wret;
- if (wret != 0)
break;
+ }
- wret = walk_up_tree(trans, root, path, &level, parent_level);
+ wret = walk_up_tree(trans, root, path, wc, parent_level);
if (wret < 0)
ret = wret;
if (wret != 0)
break;
}
+ kfree(wc);
btrfs_free_path(path);
return ret;
}
@@ -6739,11 +7174,16 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
&info->block_group_cache_tree);
spin_unlock(&info->block_group_cache_lock);
- btrfs_remove_free_space_cache(block_group);
down_write(&block_group->space_info->groups_sem);
list_del(&block_group->list);
up_write(&block_group->space_info->groups_sem);
+ if (block_group->cached == BTRFS_CACHE_STARTED)
+ wait_event(block_group->caching_q,
+ block_group_cache_done(block_group));
+
+ btrfs_remove_free_space_cache(block_group);
+
WARN_ON(atomic_read(&block_group->count) != 1);
kfree(block_group);
@@ -6809,9 +7249,19 @@ int btrfs_read_block_groups(struct btrfs_root *root)
atomic_set(&cache->count, 1);
spin_lock_init(&cache->lock);
spin_lock_init(&cache->tree_lock);
- mutex_init(&cache->cache_mutex);
+ cache->fs_info = info;
+ init_waitqueue_head(&cache->caching_q);
INIT_LIST_HEAD(&cache->list);
INIT_LIST_HEAD(&cache->cluster_list);
+
+ /*
+ * we only want to have 32k of ram per block group for keeping
+ * track of free space, and if we pass 1/2 of that we want to
+ * start converting things over to using bitmaps
+ */
+ cache->extents_thresh = ((1024 * 32) / 2) /
+ sizeof(struct btrfs_free_space);
+
read_extent_buffer(leaf, &cache->item,
btrfs_item_ptr_offset(leaf, path->slots[0]),
sizeof(cache->item));
@@ -6820,6 +7270,26 @@ int btrfs_read_block_groups(struct btrfs_root *root)
key.objectid = found_key.objectid + found_key.offset;
btrfs_release_path(root, path);
cache->flags = btrfs_block_group_flags(&cache->item);
+ cache->sectorsize = root->sectorsize;
+
+ remove_sb_from_cache(root, cache);
+
+ /*
+ * check for two cases, either we are full, and therefore
+ * don't need to bother with the caching work since we won't
+ * find any space, or we are empty, and we can just add all
+ * the space in and be done with it. This saves us _alot_ of
+ * time, particularly in the full case.
+ */
+ if (found_key.offset == btrfs_block_group_used(&cache->item)) {
+ cache->cached = BTRFS_CACHE_FINISHED;
+ } else if (btrfs_block_group_used(&cache->item) == 0) {
+ cache->cached = BTRFS_CACHE_FINISHED;
+ add_new_free_space(cache, root->fs_info,
+ found_key.objectid,
+ found_key.objectid +
+ found_key.offset);
+ }
ret = update_space_info(info, cache->flags, found_key.offset,
btrfs_block_group_used(&cache->item),
@@ -6863,10 +7333,19 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
cache->key.objectid = chunk_offset;
cache->key.offset = size;
cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
+ cache->sectorsize = root->sectorsize;
+
+ /*
+ * we only want to have 32k of ram per block group for keeping track
+ * of free space, and if we pass 1/2 of that we want to start
+ * converting things over to using bitmaps
+ */
+ cache->extents_thresh = ((1024 * 32) / 2) /
+ sizeof(struct btrfs_free_space);
atomic_set(&cache->count, 1);
spin_lock_init(&cache->lock);
spin_lock_init(&cache->tree_lock);
- mutex_init(&cache->cache_mutex);
+ init_waitqueue_head(&cache->caching_q);
INIT_LIST_HEAD(&cache->list);
INIT_LIST_HEAD(&cache->cluster_list);
@@ -6875,6 +7354,12 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
cache->flags = type;
btrfs_set_block_group_flags(&cache->item, type);
+ cache->cached = BTRFS_CACHE_FINISHED;
+ remove_sb_from_cache(root, cache);
+
+ add_new_free_space(cache, root->fs_info, chunk_offset,
+ chunk_offset + size);
+
ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
&cache->space_info);
BUG_ON(ret);
@@ -6933,7 +7418,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
rb_erase(&block_group->cache_node,
&root->fs_info->block_group_cache_tree);
spin_unlock(&root->fs_info->block_group_cache_lock);
- btrfs_remove_free_space_cache(block_group);
+
down_write(&block_group->space_info->groups_sem);
/*
* we must use list_del_init so people can check to see if they
@@ -6942,11 +7427,18 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
list_del_init(&block_group->list);
up_write(&block_group->space_info->groups_sem);
+ if (block_group->cached == BTRFS_CACHE_STARTED)
+ wait_event(block_group->caching_q,
+ block_group_cache_done(block_group));
+
+ btrfs_remove_free_space_cache(block_group);
+
spin_lock(&block_group->space_info->lock);
block_group->space_info->total_bytes -= block_group->key.offset;
block_group->space_info->bytes_readonly -= block_group->key.offset;
spin_unlock(&block_group->space_info->lock);
- block_group->space_info->full = 0;
+
+ btrfs_clear_space_info_full(root->fs_info);
btrfs_put_block_group(block_group);
btrfs_put_block_group(block_group);
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 126477e..4b83397 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -22,7 +22,6 @@
#include <linux/time.h>
#include <linux/init.h>
#include <linux/string.h>
-#include <linux/smp_lock.h>
#include <linux/backing-dev.h>
#include <linux/mpage.h>
#include <linux/swap.h>
@@ -151,7 +150,10 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
}
if (end_pos > isize) {
i_size_write(inode, end_pos);
- btrfs_update_inode(trans, root, inode);
+ /* we've only changed i_size in ram, and we haven't updated
+ * the disk i_size. There is no need to log the inode
+ * at this time.
+ */
}
err = btrfs_end_transaction(trans, root);
out_unlock:
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 4538e48..5edcee3 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -16,45 +16,46 @@
* Boston, MA 021110-1307, USA.
*/
+#include <linux/pagemap.h>
#include <linux/sched.h>
+#include <linux/math64.h>
#include "ctree.h"
#include "free-space-cache.h"
#include "transaction.h"
-struct btrfs_free_space {
- struct rb_node bytes_index;
- struct rb_node offset_index;
- u64 offset;
- u64 bytes;
-};
+#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
+#define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
-static int tree_insert_offset(struct rb_root *root, u64 offset,
- struct rb_node *node)
+static inline unsigned long offset_to_bit(u64 bitmap_start, u64 sectorsize,
+ u64 offset)
{
- struct rb_node **p = &root->rb_node;
- struct rb_node *parent = NULL;
- struct btrfs_free_space *info;
+ BUG_ON(offset < bitmap_start);
+ offset -= bitmap_start;
+ return (unsigned long)(div64_u64(offset, sectorsize));
+}
- while (*p) {
- parent = *p;
- info = rb_entry(parent, struct btrfs_free_space, offset_index);
+static inline unsigned long bytes_to_bits(u64 bytes, u64 sectorsize)
+{
+ return (unsigned long)(div64_u64(bytes, sectorsize));
+}
- if (offset < info->offset)
- p = &(*p)->rb_left;
- else if (offset > info->offset)
- p = &(*p)->rb_right;
- else
- return -EEXIST;
- }
+static inline u64 offset_to_bitmap(struct btrfs_block_group_cache *block_group,
+ u64 offset)
+{
+ u64 bitmap_start;
+ u64 bytes_per_bitmap;
- rb_link_node(node, parent, p);
- rb_insert_color(node, root);
+ bytes_per_bitmap = BITS_PER_BITMAP * block_group->sectorsize;
+ bitmap_start = offset - block_group->key.objectid;
+ bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
+ bitmap_start *= bytes_per_bitmap;
+ bitmap_start += block_group->key.objectid;
- return 0;
+ return bitmap_start;
}
-static int tree_insert_bytes(struct rb_root *root, u64 bytes,
- struct rb_node *node)
+static int tree_insert_offset(struct rb_root *root, u64 offset,
+ struct rb_node *node, int bitmap)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
@@ -62,12 +63,34 @@ static int tree_insert_bytes(struct rb_root *root, u64 bytes,
while (*p) {
parent = *p;
- info = rb_entry(parent, struct btrfs_free_space, bytes_index);
+ info = rb_entry(parent, struct btrfs_free_space, offset_index);
- if (bytes < info->bytes)
+ if (offset < info->offset) {
p = &(*p)->rb_left;
- else
+ } else if (offset > info->offset) {
p = &(*p)->rb_right;
+ } else {
+ /*
+ * we could have a bitmap entry and an extent entry
+ * share the same offset. If this is the case, we want
+ * the extent entry to always be found first if we do a
+ * linear search through the tree, since we want to have
+ * the quickest allocation time, and allocating from an
+ * extent is faster than allocating from a bitmap. So
+ * if we're inserting a bitmap and we find an entry at
+ * this offset, we want to go right, or after this entry
+ * logically. If we are inserting an extent and we've
+ * found a bitmap, we want to go left, or before
+ * logically.
+ */
+ if (bitmap) {
+ WARN_ON(info->bitmap);
+ p = &(*p)->rb_right;
+ } else {
+ WARN_ON(!info->bitmap);
+ p = &(*p)->rb_left;
+ }
+ }
}
rb_link_node(node, parent, p);
@@ -79,110 +102,143 @@ static int tree_insert_bytes(struct rb_root *root, u64 bytes,
/*
* searches the tree for the given offset.
*
- * fuzzy == 1: this is used for allocations where we are given a hint of where
- * to look for free space. Because the hint may not be completely on an offset
- * mark, or the hint may no longer point to free space we need to fudge our
- * results a bit. So we look for free space starting at or after offset with at
- * least bytes size. We prefer to find as close to the given offset as we can.
- * Also if the offset is within a free space range, then we will return the free
- * space that contains the given offset, which means we can return a free space
- * chunk with an offset before the provided offset.
- *
- * fuzzy == 0: this is just a normal tree search. Give us the free space that
- * starts at the given offset which is at least bytes size, and if its not there
- * return NULL.
+ * fuzzy - If this is set, then we are trying to make an allocation, and we just
+ * want a section that has at least bytes size and comes at or after the given
+ * offset.
*/
-static struct btrfs_free_space *tree_search_offset(struct rb_root *root,
- u64 offset, u64 bytes,
- int fuzzy)
+static struct btrfs_free_space *
+tree_search_offset(struct btrfs_block_group_cache *block_group,
+ u64 offset, int bitmap_only, int fuzzy)
{
- struct rb_node *n = root->rb_node;
- struct btrfs_free_space *entry, *ret = NULL;
+ struct rb_node *n = block_group->free_space_offset.rb_node;
+ struct btrfs_free_space *entry, *prev = NULL;
+
+ /* find entry that is closest to the 'offset' */
+ while (1) {
+ if (!n) {
+ entry = NULL;
+ break;
+ }
- while (n) {
entry = rb_entry(n, struct btrfs_free_space, offset_index);
+ prev = entry;
- if (offset < entry->offset) {
- if (fuzzy &&
- (!ret || entry->offset < ret->offset) &&
- (bytes <= entry->bytes))
- ret = entry;
+ if (offset < entry->offset)
n = n->rb_left;
- } else if (offset > entry->offset) {
- if (fuzzy &&
- (entry->offset + entry->bytes - 1) >= offset &&
- bytes <= entry->bytes) {
- ret = entry;
- break;
- }
+ else if (offset > entry->offset)
n = n->rb_right;
- } else {
- if (bytes > entry->bytes) {
- n = n->rb_right;
- continue;
- }
- ret = entry;
+ else
break;
- }
}
- return ret;
-}
+ if (bitmap_only) {
+ if (!entry)
+ return NULL;
+ if (entry->bitmap)
+ return entry;
-/*
- * return a chunk at least bytes size, as close to offset that we can get.
- */
-static struct btrfs_free_space *tree_search_bytes(struct rb_root *root,
- u64 offset, u64 bytes)
-{
- struct rb_node *n = root->rb_node;
- struct btrfs_free_space *entry, *ret = NULL;
-
- while (n) {
- entry = rb_entry(n, struct btrfs_free_space, bytes_index);
+ /*
+ * bitmap entry and extent entry may share same offset,
+ * in that case, bitmap entry comes after extent entry.
+ */
+ n = rb_next(n);
+ if (!n)
+ return NULL;
+ entry = rb_entry(n, struct btrfs_free_space, offset_index);
+ if (entry->offset != offset)
+ return NULL;
- if (bytes < entry->bytes) {
+ WARN_ON(!entry->bitmap);
+ return entry;
+ } else if (entry) {
+ if (entry->bitmap) {
/*
- * We prefer to get a hole size as close to the size we
- * are asking for so we don't take small slivers out of
- * huge holes, but we also want to get as close to the
- * offset as possible so we don't have a whole lot of
- * fragmentation.
+ * if previous extent entry covers the offset,
+ * we should return it instead of the bitmap entry
*/
- if (offset <= entry->offset) {
- if (!ret)
- ret = entry;
- else if (entry->bytes < ret->bytes)
- ret = entry;
- else if (entry->offset < ret->offset)
- ret = entry;
+ n = &entry->offset_index;
+ while (1) {
+ n = rb_prev(n);
+ if (!n)
+ break;
+ prev = rb_entry(n, struct btrfs_free_space,
+ offset_index);
+ if (!prev->bitmap) {
+ if (prev->offset + prev->bytes > offset)
+ entry = prev;
+ break;
+ }
}
- n = n->rb_left;
- } else if (bytes > entry->bytes) {
- n = n->rb_right;
+ }
+ return entry;
+ }
+
+ if (!prev)
+ return NULL;
+
+ /* find last entry before the 'offset' */
+ entry = prev;
+ if (entry->offset > offset) {
+ n = rb_prev(&entry->offset_index);
+ if (n) {
+ entry = rb_entry(n, struct btrfs_free_space,
+ offset_index);
+ BUG_ON(entry->offset > offset);
} else {
- /*
- * Ok we may have multiple chunks of the wanted size,
- * so we don't want to take the first one we find, we
- * want to take the one closest to our given offset, so
- * keep searching just in case theres a better match.
- */
- n = n->rb_right;
- if (offset > entry->offset)
- continue;
- else if (!ret || entry->offset < ret->offset)
- ret = entry;
+ if (fuzzy)
+ return entry;
+ else
+ return NULL;
}
}
- return ret;
+ if (entry->bitmap) {
+ n = &entry->offset_index;
+ while (1) {
+ n = rb_prev(n);
+ if (!n)
+ break;
+ prev = rb_entry(n, struct btrfs_free_space,
+ offset_index);
+ if (!prev->bitmap) {
+ if (prev->offset + prev->bytes > offset)
+ return prev;
+ break;
+ }
+ }
+ if (entry->offset + BITS_PER_BITMAP *
+ block_group->sectorsize > offset)
+ return entry;
+ } else if (entry->offset + entry->bytes > offset)
+ return entry;
+
+ if (!fuzzy)
+ return NULL;
+
+ while (1) {
+ if (entry->bitmap) {
+ if (entry->offset + BITS_PER_BITMAP *
+ block_group->sectorsize > offset)
+ break;
+ } else {
+ if (entry->offset + entry->bytes > offset)
+ break;
+ }
+
+ n = rb_next(&entry->offset_index);
+ if (!n)
+ return NULL;
+ entry = rb_entry(n, struct btrfs_free_space, offset_index);
+ }
+ return entry;
}
static void unlink_free_space(struct btrfs_block_group_cache *block_group,
struct btrfs_free_space *info)
{
rb_erase(&info->offset_index, &block_group->free_space_offset);
- rb_erase(&info->bytes_index, &block_group->free_space_bytes);
+ block_group->free_extents--;
+ block_group->free_space -= info->bytes;
}
static int link_free_space(struct btrfs_block_group_cache *block_group,
@@ -190,17 +246,353 @@ static int link_free_space(struct btrfs_block_group_cache *block_group,
{
int ret = 0;
-
- BUG_ON(!info->bytes);
+ BUG_ON(!info->bitmap && !info->bytes);
ret = tree_insert_offset(&block_group->free_space_offset, info->offset,
- &info->offset_index);
+ &info->offset_index, (info->bitmap != NULL));
if (ret)
return ret;
- ret = tree_insert_bytes(&block_group->free_space_bytes, info->bytes,
- &info->bytes_index);
- if (ret)
- return ret;
+ block_group->free_space += info->bytes;
+ block_group->free_extents++;
+ return ret;
+}
+
+static void recalculate_thresholds(struct btrfs_block_group_cache *block_group)
+{
+ u64 max_bytes, possible_bytes;
+
+ /*
+ * The goal is to keep the total amount of memory used per 1gb of space
+ * at or below 32k, so we need to adjust how much memory we allow to be
+ * used by extent based free space tracking
+ */
+ max_bytes = MAX_CACHE_BYTES_PER_GIG *
+ (div64_u64(block_group->key.offset, 1024 * 1024 * 1024));
+
+ possible_bytes = (block_group->total_bitmaps * PAGE_CACHE_SIZE) +
+ (sizeof(struct btrfs_free_space) *
+ block_group->extents_thresh);
+
+ if (possible_bytes > max_bytes) {
+ int extent_bytes = max_bytes -
+ (block_group->total_bitmaps * PAGE_CACHE_SIZE);
+
+ if (extent_bytes <= 0) {
+ block_group->extents_thresh = 0;
+ return;
+ }
+
+ block_group->extents_thresh = extent_bytes /
+ (sizeof(struct btrfs_free_space));
+ }
+}
+
+static void bitmap_clear_bits(struct btrfs_block_group_cache *block_group,
+ struct btrfs_free_space *info, u64 offset,
+ u64 bytes)
+{
+ unsigned long start, end;
+ unsigned long i;
+
+ start = offset_to_bit(info->offset, block_group->sectorsize, offset);
+ end = start + bytes_to_bits(bytes, block_group->sectorsize);
+ BUG_ON(end > BITS_PER_BITMAP);
+
+ for (i = start; i < end; i++)
+ clear_bit(i, info->bitmap);
+
+ info->bytes -= bytes;
+ block_group->free_space -= bytes;
+}
+
+static void bitmap_set_bits(struct btrfs_block_group_cache *block_group,
+ struct btrfs_free_space *info, u64 offset,
+ u64 bytes)
+{
+ unsigned long start, end;
+ unsigned long i;
+
+ start = offset_to_bit(info->offset, block_group->sectorsize, offset);
+ end = start + bytes_to_bits(bytes, block_group->sectorsize);
+ BUG_ON(end > BITS_PER_BITMAP);
+
+ for (i = start; i < end; i++)
+ set_bit(i, info->bitmap);
+
+ info->bytes += bytes;
+ block_group->free_space += bytes;
+}
+
+static int search_bitmap(struct btrfs_block_group_cache *block_group,
+ struct btrfs_free_space *bitmap_info, u64 *offset,
+ u64 *bytes)
+{
+ unsigned long found_bits = 0;
+ unsigned long bits, i;
+ unsigned long next_zero;
+
+ i = offset_to_bit(bitmap_info->offset, block_group->sectorsize,
+ max_t(u64, *offset, bitmap_info->offset));
+ bits = bytes_to_bits(*bytes, block_group->sectorsize);
+
+ for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i);
+ i < BITS_PER_BITMAP;
+ i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i + 1)) {
+ next_zero = find_next_zero_bit(bitmap_info->bitmap,
+ BITS_PER_BITMAP, i);
+ if ((next_zero - i) >= bits) {
+ found_bits = next_zero - i;
+ break;
+ }
+ i = next_zero;
+ }
+
+ if (found_bits) {
+ *offset = (u64)(i * block_group->sectorsize) +
+ bitmap_info->offset;
+ *bytes = (u64)(found_bits) * block_group->sectorsize;
+ return 0;
+ }
+
+ return -1;
+}
+
+static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache
+ *block_group, u64 *offset,
+ u64 *bytes, int debug)
+{
+ struct btrfs_free_space *entry;
+ struct rb_node *node;
+ int ret;
+
+ if (!block_group->free_space_offset.rb_node)
+ return NULL;
+
+ entry = tree_search_offset(block_group,
+ offset_to_bitmap(block_group, *offset),
+ 0, 1);
+ if (!entry)
+ return NULL;
+
+ for (node = &entry->offset_index; node; node = rb_next(node)) {
+ entry = rb_entry(node, struct btrfs_free_space, offset_index);
+ if (entry->bytes < *bytes)
+ continue;
+
+ if (entry->bitmap) {
+ ret = search_bitmap(block_group, entry, offset, bytes);
+ if (!ret)
+ return entry;
+ continue;
+ }
+
+ *offset = entry->offset;
+ *bytes = entry->bytes;
+ return entry;
+ }
+
+ return NULL;
+}
+
+static void add_new_bitmap(struct btrfs_block_group_cache *block_group,
+ struct btrfs_free_space *info, u64 offset)
+{
+ u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize;
+ int max_bitmaps = (int)div64_u64(block_group->key.offset +
+ bytes_per_bg - 1, bytes_per_bg);
+ BUG_ON(block_group->total_bitmaps >= max_bitmaps);
+
+ info->offset = offset_to_bitmap(block_group, offset);
+ link_free_space(block_group, info);
+ block_group->total_bitmaps++;
+
+ recalculate_thresholds(block_group);
+}
+
+static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group,
+ struct btrfs_free_space *bitmap_info,
+ u64 *offset, u64 *bytes)
+{
+ u64 end;
+ u64 search_start, search_bytes;
+ int ret;
+
+again:
+ end = bitmap_info->offset +
+ (u64)(BITS_PER_BITMAP * block_group->sectorsize) - 1;
+
+ /*
+ * XXX - this can go away after a few releases.
+ *
+ * since the only user of btrfs_remove_free_space is the tree logging
+ * stuff, and the only way to test that is under crash conditions, we
+ * want to have this debug stuff here just in case somethings not
+ * working. Search the bitmap for the space we are trying to use to
+ * make sure its actually there. If its not there then we need to stop
+ * because something has gone wrong.
+ */
+ search_start = *offset;
+ search_bytes = *bytes;
+ ret = search_bitmap(block_group, bitmap_info, &search_start,
+ &search_bytes);
+ BUG_ON(ret < 0 || search_start != *offset);
+
+ if (*offset > bitmap_info->offset && *offset + *bytes > end) {
+ bitmap_clear_bits(block_group, bitmap_info, *offset,
+ end - *offset + 1);
+ *bytes -= end - *offset + 1;
+ *offset = end + 1;
+ } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) {
+ bitmap_clear_bits(block_group, bitmap_info, *offset, *bytes);
+ *bytes = 0;
+ }
+
+ if (*bytes) {
+ struct rb_node *next = rb_next(&bitmap_info->offset_index);
+ if (!bitmap_info->bytes) {
+ unlink_free_space(block_group, bitmap_info);
+ kfree(bitmap_info->bitmap);
+ kfree(bitmap_info);
+ block_group->total_bitmaps--;
+ recalculate_thresholds(block_group);
+ }
+
+ /*
+ * no entry after this bitmap, but we still have bytes to
+ * remove, so something has gone wrong.
+ */
+ if (!next)
+ return -EINVAL;
+
+ bitmap_info = rb_entry(next, struct btrfs_free_space,
+ offset_index);
+
+ /*
+ * if the next entry isn't a bitmap we need to return to let the
+ * extent stuff do its work.
+ */
+ if (!bitmap_info->bitmap)
+ return -EAGAIN;
+
+ /*
+ * Ok the next item is a bitmap, but it may not actually hold
+ * the information for the rest of this free space stuff, so
+ * look for it, and if we don't find it return so we can try
+ * everything over again.
+ */
+ search_start = *offset;
+ search_bytes = *bytes;
+ ret = search_bitmap(block_group, bitmap_info, &search_start,
+ &search_bytes);
+ if (ret < 0 || search_start != *offset)
+ return -EAGAIN;
+
+ goto again;
+ } else if (!bitmap_info->bytes) {
+ unlink_free_space(block_group, bitmap_info);
+ kfree(bitmap_info->bitmap);
+ kfree(bitmap_info);
+ block_group->total_bitmaps--;
+ recalculate_thresholds(block_group);
+ }
+
+ return 0;
+}
+
+static int insert_into_bitmap(struct btrfs_block_group_cache *block_group,
+ struct btrfs_free_space *info)
+{
+ struct btrfs_free_space *bitmap_info;
+ int added = 0;
+ u64 bytes, offset, end;
+ int ret;
+
+ /*
+ * If we are below the extents threshold then we can add this as an
+ * extent, and don't have to deal with the bitmap
+ */
+ if (block_group->free_extents < block_group->extents_thresh &&
+ info->bytes > block_group->sectorsize * 4)
+ return 0;
+
+ /*
+ * some block groups are so tiny they can't be enveloped by a bitmap, so
+ * don't even bother to create a bitmap for this
+ */
+ if (BITS_PER_BITMAP * block_group->sectorsize >
+ block_group->key.offset)
+ return 0;
+
+ bytes = info->bytes;
+ offset = info->offset;
+
+again:
+ bitmap_info = tree_search_offset(block_group,
+ offset_to_bitmap(block_group, offset),
+ 1, 0);
+ if (!bitmap_info) {
+ BUG_ON(added);
+ goto new_bitmap;
+ }
+
+ end = bitmap_info->offset +
+ (u64)(BITS_PER_BITMAP * block_group->sectorsize);
+
+ if (offset >= bitmap_info->offset && offset + bytes > end) {
+ bitmap_set_bits(block_group, bitmap_info, offset,
+ end - offset);
+ bytes -= end - offset;
+ offset = end;
+ added = 0;
+ } else if (offset >= bitmap_info->offset && offset + bytes <= end) {
+ bitmap_set_bits(block_group, bitmap_info, offset, bytes);
+ bytes = 0;
+ } else {
+ BUG();
+ }
+
+ if (!bytes) {
+ ret = 1;
+ goto out;
+ } else
+ goto again;
+
+new_bitmap:
+ if (info && info->bitmap) {
+ add_new_bitmap(block_group, info, offset);
+ added = 1;
+ info = NULL;
+ goto again;
+ } else {
+ spin_unlock(&block_group->tree_lock);
+
+ /* no pre-allocated info, allocate a new one */
+ if (!info) {
+ info = kzalloc(sizeof(struct btrfs_free_space),
+ GFP_NOFS);
+ if (!info) {
+ spin_lock(&block_group->tree_lock);
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+
+ /* allocate the bitmap */
+ info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
+ spin_lock(&block_group->tree_lock);
+ if (!info->bitmap) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ goto again;
+ }
+
+out:
+ if (info) {
+ if (info->bitmap)
+ kfree(info->bitmap);
+ kfree(info);
+ }
return ret;
}
@@ -208,8 +600,8 @@ static int link_free_space(struct btrfs_block_group_cache *block_group,
int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
u64 offset, u64 bytes)
{
- struct btrfs_free_space *right_info;
- struct btrfs_free_space *left_info;
+ struct btrfs_free_space *right_info = NULL;
+ struct btrfs_free_space *left_info = NULL;
struct btrfs_free_space *info = NULL;
int ret = 0;
@@ -227,18 +619,38 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
* are adding, if there is remove that struct and add a new one to
* cover the entire range
*/
- right_info = tree_search_offset(&block_group->free_space_offset,
- offset+bytes, 0, 0);
- left_info = tree_search_offset(&block_group->free_space_offset,
- offset-1, 0, 1);
+ right_info = tree_search_offset(block_group, offset + bytes, 0, 0);
+ if (right_info && rb_prev(&right_info->offset_index))
+ left_info = rb_entry(rb_prev(&right_info->offset_index),
+ struct btrfs_free_space, offset_index);
+ else
+ left_info = tree_search_offset(block_group, offset - 1, 0, 0);
+
+ /*
+ * If there was no extent directly to the left or right of this new
+ * extent then we know we're going to have to allocate a new extent, so
+ * before we do that see if we need to drop this into a bitmap
+ */
+ if ((!left_info || left_info->bitmap) &&
+ (!right_info || right_info->bitmap)) {
+ ret = insert_into_bitmap(block_group, info);
+
+ if (ret < 0) {
+ goto out;
+ } else if (ret) {
+ ret = 0;
+ goto out;
+ }
+ }
- if (right_info) {
+ if (right_info && !right_info->bitmap) {
unlink_free_space(block_group, right_info);
info->bytes += right_info->bytes;
kfree(right_info);
}
- if (left_info && left_info->offset + left_info->bytes == offset) {
+ if (left_info && !left_info->bitmap &&
+ left_info->offset + left_info->bytes == offset) {
unlink_free_space(block_group, left_info);
info->offset = left_info->offset;
info->bytes += left_info->bytes;
@@ -248,11 +660,11 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
ret = link_free_space(block_group, info);
if (ret)
kfree(info);
-
+out:
spin_unlock(&block_group->tree_lock);
if (ret) {
- printk(KERN_ERR "btrfs: unable to add free space :%d\n", ret);
+ printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret);
BUG_ON(ret == -EEXIST);
}
@@ -263,40 +675,74 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
u64 offset, u64 bytes)
{
struct btrfs_free_space *info;
+ struct btrfs_free_space *next_info = NULL;
int ret = 0;
spin_lock(&block_group->tree_lock);
- info = tree_search_offset(&block_group->free_space_offset, offset, 0,
- 1);
- if (info && info->offset == offset) {
- if (info->bytes < bytes) {
- printk(KERN_ERR "Found free space at %llu, size %llu,"
- "trying to use %llu\n",
- (unsigned long long)info->offset,
- (unsigned long long)info->bytes,
- (unsigned long long)bytes);
+again:
+ info = tree_search_offset(block_group, offset, 0, 0);
+ if (!info) {
+ /*
+ * oops didn't find an extent that matched the space we wanted
+ * to remove, look for a bitmap instead
+ */
+ info = tree_search_offset(block_group,
+ offset_to_bitmap(block_group, offset),
+ 1, 0);
+ if (!info) {
+ WARN_ON(1);
+ goto out_lock;
+ }
+ }
+
+ if (info->bytes < bytes && rb_next(&info->offset_index)) {
+ u64 end;
+ next_info = rb_entry(rb_next(&info->offset_index),
+ struct btrfs_free_space,
+ offset_index);
+
+ if (next_info->bitmap)
+ end = next_info->offset + BITS_PER_BITMAP *
+ block_group->sectorsize - 1;
+ else
+ end = next_info->offset + next_info->bytes;
+
+ if (next_info->bytes < bytes ||
+ next_info->offset > offset || offset > end) {
+ printk(KERN_CRIT "Found free space at %llu, size %llu,"
+ " trying to use %llu\n",
+ (unsigned long long)info->offset,
+ (unsigned long long)info->bytes,
+ (unsigned long long)bytes);
WARN_ON(1);
ret = -EINVAL;
- spin_unlock(&block_group->tree_lock);
- goto out;
+ goto out_lock;
}
- unlink_free_space(block_group, info);
- if (info->bytes == bytes) {
- kfree(info);
- spin_unlock(&block_group->tree_lock);
- goto out;
+ info = next_info;
+ }
+
+ if (info->bytes == bytes) {
+ unlink_free_space(block_group, info);
+ if (info->bitmap) {
+ kfree(info->bitmap);
+ block_group->total_bitmaps--;
}
+ kfree(info);
+ goto out_lock;
+ }
+ if (!info->bitmap && info->offset == offset) {
+ unlink_free_space(block_group, info);
info->offset += bytes;
info->bytes -= bytes;
+ link_free_space(block_group, info);
+ goto out_lock;
+ }
- ret = link_free_space(block_group, info);
- spin_unlock(&block_group->tree_lock);
- BUG_ON(ret);
- } else if (info && info->offset < offset &&
- info->offset + info->bytes >= offset + bytes) {
+ if (!info->bitmap && info->offset <= offset &&
+ info->offset + info->bytes >= offset + bytes) {
u64 old_start = info->offset;
/*
* we're freeing space in the middle of the info,
@@ -312,7 +758,9 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
info->offset = offset + bytes;
info->bytes = old_end - info->offset;
ret = link_free_space(block_group, info);
- BUG_ON(ret);
+ WARN_ON(ret);
+ if (ret)
+ goto out_lock;
} else {
/* the hole we're creating ends at the end
* of the info struct, just free the info
@@ -320,32 +768,22 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
kfree(info);
}
spin_unlock(&block_group->tree_lock);
- /* step two, insert a new info struct to cover anything
- * before the hole
+
+ /* step two, insert a new info struct to cover
+ * anything before the hole
*/
ret = btrfs_add_free_space(block_group, old_start,
offset - old_start);
- BUG_ON(ret);
- } else {
- spin_unlock(&block_group->tree_lock);
- if (!info) {
- printk(KERN_ERR "couldn't find space %llu to free\n",
- (unsigned long long)offset);
- printk(KERN_ERR "cached is %d, offset %llu bytes %llu\n",
- block_group->cached,
- (unsigned long long)block_group->key.objectid,
- (unsigned long long)block_group->key.offset);
- btrfs_dump_free_space(block_group, bytes);
- } else if (info) {
- printk(KERN_ERR "hmm, found offset=%llu bytes=%llu, "
- "but wanted offset=%llu bytes=%llu\n",
- (unsigned long long)info->offset,
- (unsigned long long)info->bytes,
- (unsigned long long)offset,
- (unsigned long long)bytes);
- }
- WARN_ON(1);
+ WARN_ON(ret);
+ goto out;
}
+
+ ret = remove_from_bitmap(block_group, info, &offset, &bytes);
+ if (ret == -EAGAIN)
+ goto again;
+ BUG_ON(ret);
+out_lock:
+ spin_unlock(&block_group->tree_lock);
out:
return ret;
}
@@ -361,10 +799,13 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
info = rb_entry(n, struct btrfs_free_space, offset_index);
if (info->bytes >= bytes)
count++;
- printk(KERN_ERR "entry offset %llu, bytes %llu\n",
+ printk(KERN_CRIT "entry offset %llu, bytes %llu, bitmap %s\n",
(unsigned long long)info->offset,
- (unsigned long long)info->bytes);
+ (unsigned long long)info->bytes,
+ (info->bitmap) ? "yes" : "no");
}
+ printk(KERN_INFO "block group has cluster?: %s\n",
+ list_empty(&block_group->cluster_list) ? "no" : "yes");
printk(KERN_INFO "%d blocks of free space at or bigger than bytes is"
"\n", count);
}
@@ -397,26 +838,35 @@ __btrfs_return_cluster_to_free_space(
{
struct btrfs_free_space *entry;
struct rb_node *node;
+ bool bitmap;
spin_lock(&cluster->lock);
if (cluster->block_group != block_group)
goto out;
+ bitmap = cluster->points_to_bitmap;
+ cluster->block_group = NULL;
cluster->window_start = 0;
+ list_del_init(&cluster->block_group_list);
+ cluster->points_to_bitmap = false;
+
+ if (bitmap)
+ goto out;
+
node = rb_first(&cluster->root);
- while(node) {
+ while (node) {
entry = rb_entry(node, struct btrfs_free_space, offset_index);
node = rb_next(&entry->offset_index);
rb_erase(&entry->offset_index, &cluster->root);
- link_free_space(block_group, entry);
+ BUG_ON(entry->bitmap);
+ tree_insert_offset(&block_group->free_space_offset,
+ entry->offset, &entry->offset_index, 0);
}
- list_del_init(&cluster->block_group_list);
-
- btrfs_put_block_group(cluster->block_group);
- cluster->block_group = NULL;
cluster->root.rb_node = NULL;
+
out:
spin_unlock(&cluster->lock);
+ btrfs_put_block_group(block_group);
return 0;
}
@@ -425,20 +875,28 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
struct btrfs_free_space *info;
struct rb_node *node;
struct btrfs_free_cluster *cluster;
- struct btrfs_free_cluster *safe;
+ struct list_head *head;
spin_lock(&block_group->tree_lock);
-
- list_for_each_entry_safe(cluster, safe, &block_group->cluster_list,
- block_group_list) {
+ while ((head = block_group->cluster_list.next) !=
+ &block_group->cluster_list) {
+ cluster = list_entry(head, struct btrfs_free_cluster,
+ block_group_list);
WARN_ON(cluster->block_group != block_group);
__btrfs_return_cluster_to_free_space(block_group, cluster);
+ if (need_resched()) {
+ spin_unlock(&block_group->tree_lock);
+ cond_resched();
+ spin_lock(&block_group->tree_lock);
+ }
}
- while ((node = rb_last(&block_group->free_space_bytes)) != NULL) {
- info = rb_entry(node, struct btrfs_free_space, bytes_index);
+ while ((node = rb_last(&block_group->free_space_offset)) != NULL) {
+ info = rb_entry(node, struct btrfs_free_space, offset_index);
unlink_free_space(block_group, info);
+ if (info->bitmap)
+ kfree(info->bitmap);
kfree(info);
if (need_resched()) {
spin_unlock(&block_group->tree_lock);
@@ -446,6 +904,7 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
spin_lock(&block_group->tree_lock);
}
}
+
spin_unlock(&block_group->tree_lock);
}
@@ -453,25 +912,35 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
u64 offset, u64 bytes, u64 empty_size)
{
struct btrfs_free_space *entry = NULL;
+ u64 bytes_search = bytes + empty_size;
u64 ret = 0;
spin_lock(&block_group->tree_lock);
- entry = tree_search_offset(&block_group->free_space_offset, offset,
- bytes + empty_size, 1);
+ entry = find_free_space(block_group, &offset, &bytes_search, 0);
if (!entry)
- entry = tree_search_bytes(&block_group->free_space_bytes,
- offset, bytes + empty_size);
- if (entry) {
+ goto out;
+
+ ret = offset;
+ if (entry->bitmap) {
+ bitmap_clear_bits(block_group, entry, offset, bytes);
+ if (!entry->bytes) {
+ unlink_free_space(block_group, entry);
+ kfree(entry->bitmap);
+ kfree(entry);
+ block_group->total_bitmaps--;
+ recalculate_thresholds(block_group);
+ }
+ } else {
unlink_free_space(block_group, entry);
- ret = entry->offset;
entry->offset += bytes;
entry->bytes -= bytes;
-
if (!entry->bytes)
kfree(entry);
else
link_free_space(block_group, entry);
}
+
+out:
spin_unlock(&block_group->tree_lock);
return ret;
@@ -517,6 +986,54 @@ int btrfs_return_cluster_to_free_space(
return ret;
}
+static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
+ struct btrfs_free_cluster *cluster,
+ u64 bytes, u64 min_start)
+{
+ struct btrfs_free_space *entry;
+ int err;
+ u64 search_start = cluster->window_start;
+ u64 search_bytes = bytes;
+ u64 ret = 0;
+
+ spin_lock(&block_group->tree_lock);
+ spin_lock(&cluster->lock);
+
+ if (!cluster->points_to_bitmap)
+ goto out;
+
+ if (cluster->block_group != block_group)
+ goto out;
+
+ /*
+ * search_start is the beginning of the bitmap, but at some point it may
+ * be a good idea to point to the actual start of the free area in the
+ * bitmap, so do the offset_to_bitmap trick anyway, and set bitmap_only
+ * to 1 to make sure we get the bitmap entry
+ */
+ entry = tree_search_offset(block_group,
+ offset_to_bitmap(block_group, search_start),
+ 1, 0);
+ if (!entry || !entry->bitmap)
+ goto out;
+
+ search_start = min_start;
+ search_bytes = bytes;
+
+ err = search_bitmap(block_group, entry, &search_start,
+ &search_bytes);
+ if (err)
+ goto out;
+
+ ret = search_start;
+ bitmap_clear_bits(block_group, entry, ret, bytes);
+out:
+ spin_unlock(&cluster->lock);
+ spin_unlock(&block_group->tree_lock);
+
+ return ret;
+}
+
/*
* given a cluster, try to allocate 'bytes' from it, returns 0
* if it couldn't find anything suitably large, or a logical disk offset
@@ -530,6 +1047,10 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
struct rb_node *node;
u64 ret = 0;
+ if (cluster->points_to_bitmap)
+ return btrfs_alloc_from_bitmap(block_group, cluster, bytes,
+ min_start);
+
spin_lock(&cluster->lock);
if (bytes > cluster->max_size)
goto out;
@@ -567,9 +1088,73 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
}
out:
spin_unlock(&cluster->lock);
+
return ret;
}
+static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
+ struct btrfs_free_space *entry,
+ struct btrfs_free_cluster *cluster,
+ u64 offset, u64 bytes, u64 min_bytes)
+{
+ unsigned long next_zero;
+ unsigned long i;
+ unsigned long search_bits;
+ unsigned long total_bits;
+ unsigned long found_bits;
+ unsigned long start = 0;
+ unsigned long total_found = 0;
+ bool found = false;
+
+ i = offset_to_bit(entry->offset, block_group->sectorsize,
+ max_t(u64, offset, entry->offset));
+ search_bits = bytes_to_bits(min_bytes, block_group->sectorsize);
+ total_bits = bytes_to_bits(bytes, block_group->sectorsize);
+
+again:
+ found_bits = 0;
+ for (i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i);
+ i < BITS_PER_BITMAP;
+ i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i + 1)) {
+ next_zero = find_next_zero_bit(entry->bitmap,
+ BITS_PER_BITMAP, i);
+ if (next_zero - i >= search_bits) {
+ found_bits = next_zero - i;
+ break;
+ }
+ i = next_zero;
+ }
+
+ if (!found_bits)
+ return -1;
+
+ if (!found) {
+ start = i;
+ found = true;
+ }
+
+ total_found += found_bits;
+
+ if (cluster->max_size < found_bits * block_group->sectorsize)
+ cluster->max_size = found_bits * block_group->sectorsize;
+
+ if (total_found < total_bits) {
+ i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, next_zero);
+ if (i - start > total_bits * 2) {
+ total_found = 0;
+ cluster->max_size = 0;
+ found = false;
+ }
+ goto again;
+ }
+
+ cluster->window_start = start * block_group->sectorsize +
+ entry->offset;
+ cluster->points_to_bitmap = true;
+
+ return 0;
+}
+
/*
* here we try to find a cluster of blocks in a block group. The goal
* is to find at least bytes free and up to empty_size + bytes free.
@@ -587,12 +1172,12 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
struct btrfs_free_space *entry = NULL;
struct rb_node *node;
struct btrfs_free_space *next;
- struct btrfs_free_space *last;
+ struct btrfs_free_space *last = NULL;
u64 min_bytes;
u64 window_start;
u64 window_free;
u64 max_extent = 0;
- int total_retries = 0;
+ bool found_bitmap = false;
int ret;
/* for metadata, allow allocates with more holes */
@@ -620,31 +1205,80 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
goto out;
}
again:
- min_bytes = min(min_bytes, bytes + empty_size);
- entry = tree_search_bytes(&block_group->free_space_bytes,
- offset, min_bytes);
+ entry = tree_search_offset(block_group, offset, found_bitmap, 1);
if (!entry) {
ret = -ENOSPC;
goto out;
}
+
+ /*
+ * If found_bitmap is true, we exhausted our search for extent entries,
+ * and we just want to search all of the bitmaps that we can find, and
+ * ignore any extent entries we find.
+ */
+ while (entry->bitmap || found_bitmap ||
+ (!entry->bitmap && entry->bytes < min_bytes)) {
+ struct rb_node *node = rb_next(&entry->offset_index);
+
+ if (entry->bitmap && entry->bytes > bytes + empty_size) {
+ ret = btrfs_bitmap_cluster(block_group, entry, cluster,
+ offset, bytes + empty_size,
+ min_bytes);
+ if (!ret)
+ goto got_it;
+ }
+
+ if (!node) {
+ ret = -ENOSPC;
+ goto out;
+ }
+ entry = rb_entry(node, struct btrfs_free_space, offset_index);
+ }
+
+ /*
+ * We already searched all the extent entries from the passed in offset
+ * to the end and didn't find enough space for the cluster, and we also
+ * didn't find any bitmaps that met our criteria, just go ahead and exit
+ */
+ if (found_bitmap) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ cluster->points_to_bitmap = false;
window_start = entry->offset;
window_free = entry->bytes;
last = entry;
max_extent = entry->bytes;
- while(1) {
+ while (1) {
/* out window is just right, lets fill it */
if (window_free >= bytes + empty_size)
break;
node = rb_next(&last->offset_index);
if (!node) {
+ if (found_bitmap)
+ goto again;
ret = -ENOSPC;
goto out;
}
next = rb_entry(node, struct btrfs_free_space, offset_index);
/*
+ * we found a bitmap, so if this search doesn't result in a
+ * cluster, we know to go and search again for the bitmaps and
+ * start looking for space there
+ */
+ if (next->bitmap) {
+ if (!found_bitmap)
+ offset = next->offset;
+ found_bitmap = true;
+ last = next;
+ continue;
+ }
+
+ /*
* we haven't filled the empty size and the window is
* very large. reset and try again
*/
@@ -655,19 +1289,6 @@ again:
window_free = entry->bytes;
last = entry;
max_extent = 0;
- total_retries++;
- if (total_retries % 64 == 0) {
- if (min_bytes >= (bytes + empty_size)) {
- ret = -ENOSPC;
- goto out;
- }
- /*
- * grow our allocation a bit, we're not having
- * much luck
- */
- min_bytes *= 2;
- goto again;
- }
} else {
last = next;
window_free += next->bytes;
@@ -685,11 +1306,19 @@ again:
* The cluster includes an rbtree, but only uses the offset index
* of each free space cache entry.
*/
- while(1) {
+ while (1) {
node = rb_next(&entry->offset_index);
- unlink_free_space(block_group, entry);
+ if (entry->bitmap && node) {
+ entry = rb_entry(node, struct btrfs_free_space,
+ offset_index);
+ continue;
+ } else if (entry->bitmap && !node) {
+ break;
+ }
+
+ rb_erase(&entry->offset_index, &block_group->free_space_offset);
ret = tree_insert_offset(&cluster->root, entry->offset,
- &entry->offset_index);
+ &entry->offset_index, 0);
BUG_ON(ret);
if (!node || entry == last)
@@ -697,8 +1326,10 @@ again:
entry = rb_entry(node, struct btrfs_free_space, offset_index);
}
- ret = 0;
+
cluster->max_size = max_extent;
+got_it:
+ ret = 0;
atomic_inc(&block_group->count);
list_add_tail(&cluster->block_group_list, &block_group->cluster_list);
cluster->block_group = block_group;
@@ -718,6 +1349,7 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
spin_lock_init(&cluster->refill_lock);
cluster->root.rb_node = NULL;
cluster->max_size = 0;
+ cluster->points_to_bitmap = false;
INIT_LIST_HEAD(&cluster->block_group_list);
cluster->block_group = NULL;
}
diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h
index 266fb87..890a8e7 100644
--- a/fs/btrfs/free-space-cache.h
+++ b/fs/btrfs/free-space-cache.h
@@ -19,6 +19,14 @@
#ifndef __BTRFS_FREE_SPACE_CACHE
#define __BTRFS_FREE_SPACE_CACHE
+struct btrfs_free_space {
+ struct rb_node offset_index;
+ u64 offset;
+ u64 bytes;
+ unsigned long *bitmap;
+ struct list_head list;
+};
+
int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
u64 bytenr, u64 size);
int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index dbe1aab..272b9b2 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -26,7 +26,6 @@
#include <linux/time.h>
#include <linux/init.h>
#include <linux/string.h>
-#include <linux/smp_lock.h>
#include <linux/backing-dev.h>
#include <linux/mpage.h>
#include <linux/swap.h>
@@ -2604,8 +2603,8 @@ noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
if (root->ref_cows)
btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
path = btrfs_alloc_path();
- path->reada = -1;
BUG_ON(!path);
+ path->reada = -1;
/* FIXME, add redo link to tree so we don't leak on crash */
key.objectid = inode->i_ino;
@@ -3580,12 +3579,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
owner = 1;
BTRFS_I(inode)->block_group =
btrfs_find_block_group(root, 0, alloc_hint, owner);
- if ((mode & S_IFREG)) {
- if (btrfs_test_opt(root, NODATASUM))
- BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
- if (btrfs_test_opt(root, NODATACOW))
- BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
- }
key[0].objectid = objectid;
btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
@@ -3640,6 +3633,13 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
btrfs_inherit_iflags(inode, dir);
+ if ((mode & S_IFREG)) {
+ if (btrfs_test_opt(root, NODATASUM))
+ BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
+ if (btrfs_test_opt(root, NODATACOW))
+ BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
+ }
+
insert_inode_hash(inode);
inode_tree_add(inode);
return inode;
@@ -4785,8 +4785,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
* and the replacement file is large. Start IO on it now so
* we don't add too much work to the end of the transaction
*/
- if (new_inode && old_inode && S_ISREG(old_inode->i_mode) &&
- new_inode->i_size &&
+ if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size &&
old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
filemap_flush(old_inode->i_mapping);
@@ -5082,6 +5081,7 @@ static long btrfs_fallocate(struct inode *inode, int mode,
u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
struct extent_map *em;
struct btrfs_trans_handle *trans;
+ struct btrfs_root *root;
int ret;
alloc_start = offset & ~mask;
@@ -5100,6 +5100,13 @@ static long btrfs_fallocate(struct inode *inode, int mode,
goto out;
}
+ root = BTRFS_I(inode)->root;
+
+ ret = btrfs_check_data_free_space(root, inode,
+ alloc_end - alloc_start);
+ if (ret)
+ goto out;
+
locked_end = alloc_end - 1;
while (1) {
struct btrfs_ordered_extent *ordered;
@@ -5107,7 +5114,7 @@ static long btrfs_fallocate(struct inode *inode, int mode,
trans = btrfs_start_transaction(BTRFS_I(inode)->root, 1);
if (!trans) {
ret = -EIO;
- goto out;
+ goto out_free;
}
/* the extent lock is ordered inside the running
@@ -5168,6 +5175,8 @@ static long btrfs_fallocate(struct inode *inode, int mode,
GFP_NOFS);
btrfs_end_transaction(trans, BTRFS_I(inode)->root);
+out_free:
+ btrfs_free_reserved_data_space(root, inode, alloc_end - alloc_start);
out:
mutex_unlock(&inode->i_mutex);
return ret;
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index eff18f5..bd88f25 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -27,7 +27,6 @@
#include <linux/time.h>
#include <linux/init.h>
#include <linux/string.h>
-#include <linux/smp_lock.h>
#include <linux/backing-dev.h>
#include <linux/mount.h>
#include <linux/mpage.h>
@@ -1028,7 +1027,8 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
struct btrfs_file_extent_item);
comp = btrfs_file_extent_compression(leaf, extent);
type = btrfs_file_extent_type(leaf, extent);
- if (type == BTRFS_FILE_EXTENT_REG) {
+ if (type == BTRFS_FILE_EXTENT_REG ||
+ type == BTRFS_FILE_EXTENT_PREALLOC) {
disko = btrfs_file_extent_disk_bytenr(leaf,
extent);
diskl = btrfs_file_extent_disk_num_bytes(leaf,
@@ -1051,7 +1051,8 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
new_key.objectid = inode->i_ino;
new_key.offset = key.offset + destoff - off;
- if (type == BTRFS_FILE_EXTENT_REG) {
+ if (type == BTRFS_FILE_EXTENT_REG ||
+ type == BTRFS_FILE_EXTENT_PREALLOC) {
ret = btrfs_insert_empty_item(trans, root, path,
&new_key, size);
if (ret)
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index 6d6523d..0d126be 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -309,7 +309,7 @@ void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c)
}
printk(KERN_INFO "node %llu level %d total ptrs %d free spc %u\n",
(unsigned long long)btrfs_header_bytenr(c),
- btrfs_header_level(c), nr,
+ level, nr,
(u32)BTRFS_NODEPTRS_PER_BLOCK(root) - nr);
for (i = 0; i < nr; i++) {
btrfs_node_key_to_cpu(c, &key, i);
@@ -326,10 +326,10 @@ void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c)
btrfs_level_size(root, level - 1),
btrfs_node_ptr_generation(c, i));
if (btrfs_is_leaf(next) &&
- btrfs_header_level(c) != 1)
+ level != 1)
BUG();
if (btrfs_header_level(next) !=
- btrfs_header_level(c) - 1)
+ level - 1)
BUG();
btrfs_print_tree(root, next);
free_extent_buffer(next);
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index b23dc20..c04f7f2 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -670,6 +670,8 @@ again:
err = ret;
goto out;
}
+ if (ret > 0 && path2->slots[level] > 0)
+ path2->slots[level]--;
eb = path2->nodes[level];
WARN_ON(btrfs_node_blockptr(eb, path2->slots[level]) !=
@@ -1609,6 +1611,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
BUG_ON(level == 0);
path->lowest_level = level;
ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0);
+ path->lowest_level = 0;
if (ret < 0) {
btrfs_free_path(path);
return ret;
@@ -1788,7 +1791,7 @@ static void merge_func(struct btrfs_work *work)
btrfs_end_transaction(trans, root);
}
- btrfs_drop_dead_root(reloc_root);
+ btrfs_drop_snapshot(reloc_root, 0);
if (atomic_dec_and_test(async->num_pending))
complete(async->done);
@@ -2075,9 +2078,6 @@ static int do_relocation(struct btrfs_trans_handle *trans,
ret = btrfs_drop_subtree(trans, root, eb, upper->eb);
BUG_ON(ret);
-
- btrfs_tree_unlock(eb);
- free_extent_buffer(eb);
}
if (!lowest) {
btrfs_tree_unlock(upper->eb);
@@ -2553,8 +2553,13 @@ int relocate_inode_pages(struct inode *inode, u64 start, u64 len)
last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
/* make sure the dirty trick played by the caller work */
- ret = invalidate_inode_pages2_range(inode->i_mapping,
- first_index, last_index);
+ while (1) {
+ ret = invalidate_inode_pages2_range(inode->i_mapping,
+ first_index, last_index);
+ if (ret != -EBUSY)
+ break;
+ schedule_timeout(HZ/10);
+ }
if (ret)
goto out_unlock;
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 9f179d4..6d6d06c 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -26,7 +26,6 @@
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/string.h>
-#include <linux/smp_lock.h>
#include <linux/backing-dev.h>
#include <linux/mount.h>
#include <linux/mpage.h>
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 4e83457..cdbb502 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -40,6 +40,12 @@ static noinline void put_transaction(struct btrfs_transaction *transaction)
}
}
+static noinline void switch_commit_root(struct btrfs_root *root)
+{
+ free_extent_buffer(root->commit_root);
+ root->commit_root = btrfs_root_node(root);
+}
+
/*
* either allocate a new transaction or hop into the existing one
*/
@@ -444,9 +450,6 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
btrfs_write_dirty_block_groups(trans, root);
- ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
- BUG_ON(ret);
-
while (1) {
old_root_bytenr = btrfs_root_bytenr(&root->root_item);
if (old_root_bytenr == root->node->start)
@@ -457,13 +460,14 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
&root->root_key,
&root->root_item);
BUG_ON(ret);
- btrfs_write_dirty_block_groups(trans, root);
- ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+ ret = btrfs_write_dirty_block_groups(trans, root);
BUG_ON(ret);
}
- free_extent_buffer(root->commit_root);
- root->commit_root = btrfs_root_node(root);
+
+ if (root != root->fs_info->extent_root)
+ switch_commit_root(root);
+
return 0;
}
@@ -495,10 +499,12 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
root = list_entry(next, struct btrfs_root, dirty_list);
update_cowonly_root(trans, root);
-
- ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
- BUG_ON(ret);
}
+
+ down_write(&fs_info->extent_commit_sem);
+ switch_commit_root(fs_info->extent_root);
+ up_write(&fs_info->extent_commit_sem);
+
return 0;
}
@@ -544,8 +550,7 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
btrfs_update_reloc_root(trans, root);
if (root->commit_root != root->node) {
- free_extent_buffer(root->commit_root);
- root->commit_root = btrfs_root_node(root);
+ switch_commit_root(root);
btrfs_set_root_node(&root->root_item,
root->node);
}
@@ -593,6 +598,7 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
return 0;
}
+#if 0
/*
* when dropping snapshots, we generate a ton of delayed refs, and it makes
* sense not to join the transaction while it is trying to flush the current
@@ -681,6 +687,7 @@ int btrfs_drop_dead_root(struct btrfs_root *root)
btrfs_btree_balance_dirty(tree_root, nr);
return ret;
}
+#endif
/*
* new snapshots need to be created at a very specific time in the
@@ -850,6 +857,16 @@ static void update_super_roots(struct btrfs_root *root)
super->root_level = root_item->level;
}
+int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
+{
+ int ret = 0;
+ spin_lock(&info->new_trans_lock);
+ if (info->running_transaction)
+ ret = info->running_transaction->in_commit;
+ spin_unlock(&info->new_trans_lock);
+ return ret;
+}
+
int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
@@ -941,9 +958,11 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
mutex_unlock(&root->fs_info->trans_mutex);
- if (flush_on_commit || snap_pending) {
- if (flush_on_commit)
- btrfs_start_delalloc_inodes(root);
+ if (flush_on_commit) {
+ btrfs_start_delalloc_inodes(root);
+ ret = btrfs_wait_ordered_extents(root, 0);
+ BUG_ON(ret);
+ } else if (snap_pending) {
ret = btrfs_wait_ordered_extents(root, 1);
BUG_ON(ret);
}
@@ -1007,15 +1026,11 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
btrfs_set_root_node(&root->fs_info->tree_root->root_item,
root->fs_info->tree_root->node);
- free_extent_buffer(root->fs_info->tree_root->commit_root);
- root->fs_info->tree_root->commit_root =
- btrfs_root_node(root->fs_info->tree_root);
+ switch_commit_root(root->fs_info->tree_root);
btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
root->fs_info->chunk_root->node);
- free_extent_buffer(root->fs_info->chunk_root->commit_root);
- root->fs_info->chunk_root->commit_root =
- btrfs_root_node(root->fs_info->chunk_root);
+ switch_commit_root(root->fs_info->chunk_root);
update_super_roots(root);
@@ -1055,6 +1070,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
cur_trans->commit_done = 1;
root->fs_info->last_trans_committed = cur_trans->transid;
+
wake_up(&cur_trans->commit_wait);
put_transaction(cur_trans);
@@ -1081,7 +1097,7 @@ int btrfs_clean_old_snapshots(struct btrfs_root *root)
while (!list_empty(&list)) {
root = list_entry(list.next, struct btrfs_root, root_list);
list_del_init(&root->root_list);
- btrfs_drop_dead_root(root);
+ btrfs_drop_snapshot(root, 0);
}
return 0;
}
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 961c3ee..663c674 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -107,4 +107,5 @@ int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
struct extent_io_tree *dirty_pages);
+int btrfs_transaction_in_commit(struct btrfs_fs_info *info);
#endif
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index c139222..d91b0de 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -797,7 +797,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
return -ENOENT;
inode = read_one_inode(root, key->objectid);
- BUG_ON(!dir);
+ BUG_ON(!inode);
ref_ptr = btrfs_item_ptr_offset(eb, slot);
ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 3ab80e9..5dbefd1 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -721,7 +721,8 @@ error:
*/
static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans,
struct btrfs_device *device,
- u64 num_bytes, u64 *start)
+ u64 num_bytes, u64 *start,
+ u64 *max_avail)
{
struct btrfs_key key;
struct btrfs_root *root = device->dev_root;
@@ -758,9 +759,13 @@ static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans,
ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
if (ret < 0)
goto error;
- ret = btrfs_previous_item(root, path, 0, key.type);
- if (ret < 0)
- goto error;
+ if (ret > 0) {
+ ret = btrfs_previous_item(root, path, key.objectid, key.type);
+ if (ret < 0)
+ goto error;
+ if (ret > 0)
+ start_found = 1;
+ }
l = path->nodes[0];
btrfs_item_key_to_cpu(l, &key, path->slots[0]);
while (1) {
@@ -803,6 +808,10 @@ no_more_items:
if (last_byte < search_start)
last_byte = search_start;
hole_size = key.offset - last_byte;
+
+ if (hole_size > *max_avail)
+ *max_avail = hole_size;
+
if (key.offset > last_byte &&
hole_size >= num_bytes) {
*start = last_byte;
@@ -1621,6 +1630,7 @@ static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
device->fs_devices->total_rw_bytes += diff;
device->total_bytes = new_size;
+ device->disk_total_bytes = new_size;
btrfs_clear_space_info_full(device->dev_root->fs_info);
return btrfs_update_device(trans, device);
@@ -2007,7 +2017,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
goto done;
if (ret) {
ret = 0;
- goto done;
+ break;
}
l = path->nodes[0];
@@ -2015,7 +2025,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
btrfs_item_key_to_cpu(l, &key, path->slots[0]);
if (key.objectid != device->devid)
- goto done;
+ break;
dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
length = btrfs_dev_extent_length(l, dev_extent);
@@ -2171,6 +2181,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
max_chunk_size);
again:
+ max_avail = 0;
if (!map || map->num_stripes != num_stripes) {
kfree(map);
map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
@@ -2219,7 +2230,8 @@ again:
if (device->in_fs_metadata && avail >= min_free) {
ret = find_free_dev_extent(trans, device,
- min_free, &dev_offset);
+ min_free, &dev_offset,
+ &max_avail);
if (ret == 0) {
list_move_tail(&device->dev_alloc_list,
&private_devs);
@@ -2795,26 +2807,6 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
}
}
- for (i = 0; i > nr; i++) {
- struct btrfs_multi_bio *multi;
- struct btrfs_bio_stripe *stripe;
- int ret;
-
- length = 1;
- ret = btrfs_map_block(map_tree, WRITE, buf[i],
- &length, &multi, 0);
- BUG_ON(ret);
-
- stripe = multi->stripes;
- for (j = 0; j < multi->num_stripes; j++) {
- if (stripe->physical >= physical &&
- physical < stripe->physical + length)
- break;
- }
- BUG_ON(j >= multi->num_stripes);
- kfree(multi);
- }
-
*logical = buf;
*naddrs = nr;
*stripe_len = map->stripe_len;
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index ecfbce8..3e2b90e 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -208,7 +208,7 @@ int btrfs_zlib_compress_pages(struct address_space *mapping,
*total_in = 0;
workspace = find_zlib_workspace();
- if (!workspace)
+ if (IS_ERR(workspace))
return -1;
if (Z_OK != zlib_deflateInit(&workspace->def_strm, 3)) {
@@ -366,7 +366,7 @@ int btrfs_zlib_decompress_biovec(struct page **pages_in,
char *kaddr;
workspace = find_zlib_workspace();
- if (!workspace)
+ if (IS_ERR(workspace))
return -ENOMEM;
data_in = kmap(pages_in[page_in_index]);
@@ -547,7 +547,7 @@ int btrfs_zlib_decompress(unsigned char *data_in,
return -ENOMEM;
workspace = find_zlib_workspace();
- if (!workspace)
+ if (IS_ERR(workspace))
return -ENOMEM;
workspace->inf_strm.next_in = data_in;
diff --git a/fs/char_dev.c b/fs/char_dev.c
index b7c9d51..a173551 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -13,7 +13,6 @@
#include <linux/major.h>
#include <linux/errno.h>
#include <linux/module.h>
-#include <linux/smp_lock.h>
#include <linux/seq_file.h>
#include <linux/kobject.h>
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index b486898..e85b1e4 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -1,3 +1,10 @@
+Version 1.60
+-------------
+Fix memory leak in reconnect. Fix oops in DFS mount error path.
+Set s_maxbytes to smaller (the max that vfs can handle) so that
+sendfile will now work over cifs mounts again. Add noforcegid
+and noforceuid mount parameters.
+
Version 1.59
------------
Client uses server inode numbers (which are persistent) rather than
@@ -5,7 +12,11 @@ client generated ones by default (mount option "serverino" turned
on by default if server supports it). Add forceuid and forcegid
mount options (so that when negotiating unix extensions specifying
which uid mounted does not immediately force the server's reported
-uids to be overridden).
+uids to be overridden). Add support for scope mount parm. Improve
+hard link detection to use same inode for both. Do not set
+read-only dos attribute on directories (for chmod) since Windows
+explorer special cases this attribute bit for directories for
+a different purpose.
Version 1.58
------------
diff --git a/fs/cifs/README b/fs/cifs/README
index ad92921..79c1a93 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -262,11 +262,11 @@ A partial list of the supported mount options follows:
mount.
domain Set the SMB/CIFS workgroup name prepended to the
username during CIFS session establishment
- forceuid Set the default uid for inodes based on the uid
- passed in. For mounts to servers
+ forceuid Set the default uid for inodes to the uid
+ passed in on mount. For mounts to servers
which do support the CIFS Unix extensions, such as a
properly configured Samba server, the server provides
- the uid, gid and mode so this parameter should not be
+ the uid, gid and mode so this parameter should not be
specified unless the server and clients uid and gid
numbering differ. If the server and client are in the
same domain (e.g. running winbind or nss_ldap) and
@@ -278,11 +278,7 @@ A partial list of the supported mount options follows:
of existing files will be the uid (gid) of the person
who executed the mount (root, except when mount.cifs
is configured setuid for user mounts) unless the "uid="
- (gid) mount option is specified. For the uid (gid) of newly
- created files and directories, ie files created since
- the last mount of the server share, the expected uid
- (gid) is cached as long as the inode remains in
- memory on the client. Also note that permission
+ (gid) mount option is specified. Also note that permission
checks (authorization checks) on accesses to a file occur
at the server, but there are cases in which an administrator
may want to restrict at the client as well. For those
@@ -290,12 +286,15 @@ A partial list of the supported mount options follows:
(such as Windows), permissions can also be checked at the
client, and a crude form of client side permission checking
can be enabled by specifying file_mode and dir_mode on
- the client. Note that the mount.cifs helper must be
- at version 1.10 or higher to support specifying the uid
- (or gid) in non-numeric form.
- forcegid (similar to above but for the groupid instead of uid)
+ the client. (default)
+ forcegid (similar to above but for the groupid instead of uid) (default)
+ noforceuid Fill in file owner information (uid) by requesting it from
+ the server if possible. With this option, the value given in
+ the uid= option (on mount) will only be used if the server
+ can not support returning uids on inodes.
+ noforcegid (similar to above but for the group owner, gid, instead of uid)
uid Set the default uid for inodes, and indicate to the
- cifs kernel driver which local user mounted . If the server
+ cifs kernel driver which local user mounted. If the server
supports the unix extensions the default uid is
not used to fill in the owner fields of inodes (files)
unless the "forceuid" parameter is specified.
diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
index 1b09f16..20692fb 100644
--- a/fs/cifs/asn1.c
+++ b/fs/cifs/asn1.c
@@ -49,6 +49,7 @@
#define ASN1_OJI 6 /* Object Identifier */
#define ASN1_OJD 7 /* Object Description */
#define ASN1_EXT 8 /* External */
+#define ASN1_ENUM 10 /* Enumerated */
#define ASN1_SEQ 16 /* Sequence */
#define ASN1_SET 17 /* Set */
#define ASN1_NUMSTR 18 /* Numerical String */
@@ -78,10 +79,12 @@
#define SPNEGO_OID_LEN 7
#define NTLMSSP_OID_LEN 10
#define KRB5_OID_LEN 7
+#define KRB5U2U_OID_LEN 8
#define MSKRB5_OID_LEN 7
static unsigned long SPNEGO_OID[7] = { 1, 3, 6, 1, 5, 5, 2 };
static unsigned long NTLMSSP_OID[10] = { 1, 3, 6, 1, 4, 1, 311, 2, 2, 10 };
static unsigned long KRB5_OID[7] = { 1, 2, 840, 113554, 1, 2, 2 };
+static unsigned long KRB5U2U_OID[8] = { 1, 2, 840, 113554, 1, 2, 2, 3 };
static unsigned long MSKRB5_OID[7] = { 1, 2, 840, 48018, 1, 2, 2 };
/*
@@ -122,6 +125,28 @@ asn1_octet_decode(struct asn1_ctx *ctx, unsigned char *ch)
return 1;
}
+#if 0 /* will be needed later by spnego decoding/encoding of ntlmssp */
+static unsigned char
+asn1_enum_decode(struct asn1_ctx *ctx, __le32 *val)
+{
+ unsigned char ch;
+
+ if (ctx->pointer >= ctx->end) {
+ ctx->error = ASN1_ERR_DEC_EMPTY;
+ return 0;
+ }
+
+ ch = *(ctx->pointer)++; /* ch has 0xa, ptr points to lenght octet */
+ if ((ch) == ASN1_ENUM) /* if ch value is ENUM, 0xa */
+ *val = *(++(ctx->pointer)); /* value has enum value */
+ else
+ return 0;
+
+ ctx->pointer++;
+ return 1;
+}
+#endif
+
static unsigned char
asn1_tag_decode(struct asn1_ctx *ctx, unsigned int *tag)
{
@@ -476,10 +501,9 @@ decode_negTokenInit(unsigned char *security_blob, int length,
unsigned int cls, con, tag, oidlen, rc;
bool use_ntlmssp = false;
bool use_kerberos = false;
+ bool use_kerberosu2u = false;
bool use_mskerberos = false;
- *secType = NTLM; /* BB eventually make Kerberos or NLTMSSP the default*/
-
/* cifs_dump_mem(" Received SecBlob ", security_blob, length); */
asn1_open(&ctx, security_blob, length);
@@ -515,6 +539,7 @@ decode_negTokenInit(unsigned char *security_blob, int length,
return 0;
}
+ /* SPNEGO */
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
cFYI(1, ("Error decoding negTokenInit"));
return 0;
@@ -526,6 +551,7 @@ decode_negTokenInit(unsigned char *security_blob, int length,
return 0;
}
+ /* negTokenInit */
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
cFYI(1, ("Error decoding negTokenInit"));
return 0;
@@ -537,6 +563,7 @@ decode_negTokenInit(unsigned char *security_blob, int length,
return 0;
}
+ /* sequence */
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
cFYI(1, ("Error decoding 2nd part of negTokenInit"));
return 0;
@@ -548,6 +575,7 @@ decode_negTokenInit(unsigned char *security_blob, int length,
return 0;
}
+ /* sequence of */
if (asn1_header_decode
(&ctx, &sequence_end, &cls, &con, &tag) == 0) {
cFYI(1, ("Error decoding 2nd part of negTokenInit"));
@@ -560,6 +588,7 @@ decode_negTokenInit(unsigned char *security_blob, int length,
return 0;
}
+ /* list of security mechanisms */
while (!asn1_eoc_decode(&ctx, sequence_end)) {
rc = asn1_header_decode(&ctx, &end, &cls, &con, &tag);
if (!rc) {
@@ -576,11 +605,15 @@ decode_negTokenInit(unsigned char *security_blob, int length,
if (compare_oid(oid, oidlen, MSKRB5_OID,
MSKRB5_OID_LEN) &&
- !use_kerberos)
+ !use_mskerberos)
use_mskerberos = true;
+ else if (compare_oid(oid, oidlen, KRB5U2U_OID,
+ KRB5U2U_OID_LEN) &&
+ !use_kerberosu2u)
+ use_kerberosu2u = true;
else if (compare_oid(oid, oidlen, KRB5_OID,
KRB5_OID_LEN) &&
- !use_mskerberos)
+ !use_kerberos)
use_kerberos = true;
else if (compare_oid(oid, oidlen, NTLMSSP_OID,
NTLMSSP_OID_LEN))
@@ -593,7 +626,12 @@ decode_negTokenInit(unsigned char *security_blob, int length,
}
}
+ /* mechlistMIC */
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
+ /* Check if we have reached the end of the blob, but with
+ no mechListMic (e.g. NTLMSSP instead of KRB5) */
+ if (ctx.error == ASN1_ERR_DEC_EMPTY)
+ goto decode_negtoken_exit;
cFYI(1, ("Error decoding last part negTokenInit exit3"));
return 0;
} else if ((cls != ASN1_CTX) || (con != ASN1_CON)) {
@@ -602,6 +640,8 @@ decode_negTokenInit(unsigned char *security_blob, int length,
cls, con, tag, end, *end));
return 0;
}
+
+ /* sequence */
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
cFYI(1, ("Error decoding last part negTokenInit exit5"));
return 0;
@@ -611,6 +651,7 @@ decode_negTokenInit(unsigned char *security_blob, int length,
cls, con, tag, end, *end));
}
+ /* sequence of */
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
cFYI(1, ("Error decoding last part negTokenInit exit 7"));
return 0;
@@ -619,6 +660,8 @@ decode_negTokenInit(unsigned char *security_blob, int length,
cls, con, tag, end, *end));
return 0;
}
+
+ /* general string */
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
cFYI(1, ("Error decoding last part negTokenInit exit9"));
return 0;
@@ -630,13 +673,13 @@ decode_negTokenInit(unsigned char *security_blob, int length,
}
cFYI(1, ("Need to call asn1_octets_decode() function for %s",
ctx.pointer)); /* is this UTF-8 or ASCII? */
-
+decode_negtoken_exit:
if (use_kerberos)
*secType = Kerberos;
else if (use_mskerberos)
*secType = MSKerberos;
else if (use_ntlmssp)
- *secType = NTLMSSP;
+ *secType = RawNTLMSSP;
return 1;
}
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 7f19fef..42cec2a 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -261,6 +261,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
atomic_set(&tcon->num_reads, 0);
atomic_set(&tcon->num_oplock_brks, 0);
atomic_set(&tcon->num_opens, 0);
+ atomic_set(&tcon->num_posixopens, 0);
+ atomic_set(&tcon->num_posixmkdirs, 0);
atomic_set(&tcon->num_closes, 0);
atomic_set(&tcon->num_deletes, 0);
atomic_set(&tcon->num_mkdirs, 0);
@@ -347,11 +349,15 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
atomic_read(&tcon->num_locks),
atomic_read(&tcon->num_hardlinks),
atomic_read(&tcon->num_symlinks));
- seq_printf(m, "\nOpens: %d Closes: %d"
+ seq_printf(m, "\nOpens: %d Closes: %d "
"Deletes: %d",
atomic_read(&tcon->num_opens),
atomic_read(&tcon->num_closes),
atomic_read(&tcon->num_deletes));
+ seq_printf(m, "\nPosix Opens: %d "
+ "Posix Mkdirs: %d",
+ atomic_read(&tcon->num_posixopens),
+ atomic_read(&tcon->num_posixmkdirs));
seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
atomic_read(&tcon->num_mkdirs),
atomic_read(&tcon->num_rmdirs));
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index 3bb11be..606912d 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -55,7 +55,7 @@ void cifs_dfs_release_automount_timer(void)
* i.e. strips from UNC trailing path that is not part of share
* name and fixup missing '\' in the begining of DFS node refferal
* if neccessary.
- * Returns pointer to share name on success or NULL on error.
+ * Returns pointer to share name on success or ERR_PTR on error.
* Caller is responsible for freeing returned string.
*/
static char *cifs_get_share_name(const char *node_name)
@@ -68,7 +68,7 @@ static char *cifs_get_share_name(const char *node_name)
UNC = kmalloc(len+2 /*for term null and additional \ if it's missed */,
GFP_KERNEL);
if (!UNC)
- return NULL;
+ return ERR_PTR(-ENOMEM);
/* get share name and server name */
if (node_name[1] != '\\') {
@@ -87,7 +87,7 @@ static char *cifs_get_share_name(const char *node_name)
cERROR(1, ("%s: no server name end in node name: %s",
__func__, node_name));
kfree(UNC);
- return NULL;
+ return ERR_PTR(-EINVAL);
}
/* find sharename end */
@@ -133,6 +133,12 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
return ERR_PTR(-EINVAL);
*devname = cifs_get_share_name(ref->node_name);
+ if (IS_ERR(*devname)) {
+ rc = PTR_ERR(*devname);
+ *devname = NULL;
+ goto compose_mount_options_err;
+ }
+
rc = dns_resolve_server_name_to_ip(*devname, &srvIP);
if (rc != 0) {
cERROR(1, ("%s: Failed to resolve server part of %s to IP: %d",
diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c
index 4a4581c..051caec 100644
--- a/fs/cifs/cifs_spnego.c
+++ b/fs/cifs/cifs_spnego.c
@@ -86,6 +86,9 @@ struct key_type cifs_spnego_key_type = {
/* strlen of ";user=" */
#define USER_KEY_LEN 6
+/* strlen of ";pid=0x" */
+#define PID_KEY_LEN 7
+
/* get a key struct with a SPNEGO security blob, suitable for session setup */
struct key *
cifs_get_spnego_key(struct cifsSesInfo *sesInfo)
@@ -103,7 +106,8 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo)
IP_KEY_LEN + INET6_ADDRSTRLEN +
MAX_MECH_STR_LEN +
UID_KEY_LEN + (sizeof(uid_t) * 2) +
- USER_KEY_LEN + strlen(sesInfo->userName) + 1;
+ USER_KEY_LEN + strlen(sesInfo->userName) +
+ PID_KEY_LEN + (sizeof(pid_t) * 2) + 1;
spnego_key = ERR_PTR(-ENOMEM);
description = kzalloc(desc_len, GFP_KERNEL);
@@ -141,6 +145,9 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo)
dp = description + strlen(description);
sprintf(dp, ";user=%s", sesInfo->userName);
+ dp = description + strlen(description);
+ sprintf(dp, ";pid=0x%x", current->pid);
+
cFYI(1, ("key description = %s", description));
spnego_key = request_key(&cifs_spnego_key_type, description, "");
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index 60e3c42..714a542 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -44,7 +44,7 @@ cifs_ucs2_bytes(const __le16 *from, int maxbytes,
int maxwords = maxbytes / 2;
char tmp[NLS_MAX_CHARSET_SIZE];
- for (i = 0; from[i] && i < maxwords; i++) {
+ for (i = 0; i < maxwords && from[i]; i++) {
charlen = codepage->uni2char(le16_to_cpu(from[i]), tmp,
NLS_MAX_CHARSET_SIZE);
if (charlen > 0)
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index 1403b5d..6941c22 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -327,7 +327,7 @@ static void dump_ace(struct cifs_ace *pace, char *end_of_acl)
static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
struct cifs_sid *pownersid, struct cifs_sid *pgrpsid,
- struct inode *inode)
+ struct cifs_fattr *fattr)
{
int i;
int num_aces = 0;
@@ -340,7 +340,7 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
if (!pdacl) {
/* no DACL in the security descriptor, set
all the permissions for user/group/other */
- inode->i_mode |= S_IRWXUGO;
+ fattr->cf_mode |= S_IRWXUGO;
return;
}
@@ -357,7 +357,7 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
/* reset rwx permissions for user/group/other.
Also, if num_aces is 0 i.e. DACL has no ACEs,
user/group/other have no permissions */
- inode->i_mode &= ~(S_IRWXUGO);
+ fattr->cf_mode &= ~(S_IRWXUGO);
acl_base = (char *)pdacl;
acl_size = sizeof(struct cifs_acl);
@@ -379,17 +379,17 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
if (compare_sids(&(ppace[i]->sid), pownersid))
access_flags_to_mode(ppace[i]->access_req,
ppace[i]->type,
- &(inode->i_mode),
+ &fattr->cf_mode,
&user_mask);
if (compare_sids(&(ppace[i]->sid), pgrpsid))
access_flags_to_mode(ppace[i]->access_req,
ppace[i]->type,
- &(inode->i_mode),
+ &fattr->cf_mode,
&group_mask);
if (compare_sids(&(ppace[i]->sid), &sid_everyone))
access_flags_to_mode(ppace[i]->access_req,
ppace[i]->type,
- &(inode->i_mode),
+ &fattr->cf_mode,
&other_mask);
/* memcpy((void *)(&(cifscred->aces[i])),
@@ -464,7 +464,7 @@ static int parse_sid(struct cifs_sid *psid, char *end_of_acl)
/* Convert CIFS ACL to POSIX form */
static int parse_sec_desc(struct cifs_ntsd *pntsd, int acl_len,
- struct inode *inode)
+ struct cifs_fattr *fattr)
{
int rc;
struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
@@ -472,7 +472,7 @@ static int parse_sec_desc(struct cifs_ntsd *pntsd, int acl_len,
char *end_of_acl = ((char *)pntsd) + acl_len;
__u32 dacloffset;
- if ((inode == NULL) || (pntsd == NULL))
+ if (pntsd == NULL)
return -EIO;
owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
@@ -497,7 +497,7 @@ static int parse_sec_desc(struct cifs_ntsd *pntsd, int acl_len,
if (dacloffset)
parse_dacl(dacl_ptr, end_of_acl, owner_sid_ptr,
- group_sid_ptr, inode);
+ group_sid_ptr, fattr);
else
cFYI(1, ("no ACL")); /* BB grant all or default perms? */
@@ -508,7 +508,6 @@ static int parse_sec_desc(struct cifs_ntsd *pntsd, int acl_len,
memcpy((void *)(&(cifscred->gsid)), (void *)group_sid_ptr,
sizeof(struct cifs_sid)); */
-
return 0;
}
@@ -671,8 +670,9 @@ static int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
}
/* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */
-void acl_to_uid_mode(struct cifs_sb_info *cifs_sb, struct inode *inode,
- const char *path, const __u16 *pfid)
+void
+cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
+ struct inode *inode, const char *path, const __u16 *pfid)
{
struct cifs_ntsd *pntsd = NULL;
u32 acllen = 0;
@@ -687,7 +687,7 @@ void acl_to_uid_mode(struct cifs_sb_info *cifs_sb, struct inode *inode,
/* if we can retrieve the ACL, now parse Access Control Entries, ACEs */
if (pntsd)
- rc = parse_sec_desc(pntsd, acllen, inode);
+ rc = parse_sec_desc(pntsd, acllen, fattr);
if (rc)
cFYI(1, ("parse sec desc failed rc = %d", rc));
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 0d92114..84b7525 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -308,7 +308,6 @@ cifs_alloc_inode(struct super_block *sb)
if (!cifs_inode)
return NULL;
cifs_inode->cifsAttrs = 0x20; /* default */
- atomic_set(&cifs_inode->inUse, 0);
cifs_inode->time = 0;
cifs_inode->write_behind_rc = 0;
/* Until the file is open and we have gotten oplock
@@ -333,6 +332,27 @@ cifs_destroy_inode(struct inode *inode)
kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
}
+static void
+cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
+{
+ seq_printf(s, ",addr=");
+
+ switch (server->addr.sockAddr.sin_family) {
+ case AF_INET:
+ seq_printf(s, "%pI4", &server->addr.sockAddr.sin_addr.s_addr);
+ break;
+ case AF_INET6:
+ seq_printf(s, "%pI6",
+ &server->addr.sockAddr6.sin6_addr.s6_addr);
+ if (server->addr.sockAddr6.sin6_scope_id)
+ seq_printf(s, "%%%u",
+ server->addr.sockAddr6.sin6_scope_id);
+ break;
+ default:
+ seq_printf(s, "(unknown)");
+ }
+}
+
/*
* cifs_show_options() is for displaying mount options in /proc/mounts.
* Not all settable options are displayed but most of the important
@@ -343,83 +363,68 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m)
{
struct cifs_sb_info *cifs_sb;
struct cifsTconInfo *tcon;
- struct TCP_Server_Info *server;
cifs_sb = CIFS_SB(m->mnt_sb);
+ tcon = cifs_sb->tcon;
- if (cifs_sb) {
- tcon = cifs_sb->tcon;
- if (tcon) {
- seq_printf(s, ",unc=%s", cifs_sb->tcon->treeName);
- if (tcon->ses) {
- if (tcon->ses->userName)
- seq_printf(s, ",username=%s",
- tcon->ses->userName);
- if (tcon->ses->domainName)
- seq_printf(s, ",domain=%s",
- tcon->ses->domainName);
- server = tcon->ses->server;
- if (server) {
- seq_printf(s, ",addr=");
- switch (server->addr.sockAddr6.
- sin6_family) {
- case AF_INET6:
- seq_printf(s, "%pI6",
- &server->addr.sockAddr6.sin6_addr);
- break;
- case AF_INET:
- seq_printf(s, "%pI4",
- &server->addr.sockAddr.sin_addr.s_addr);
- break;
- }
- }
- }
- if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) ||
- !(tcon->unix_ext))
- seq_printf(s, ",uid=%d", cifs_sb->mnt_uid);
- if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) ||
- !(tcon->unix_ext))
- seq_printf(s, ",gid=%d", cifs_sb->mnt_gid);
- if (!tcon->unix_ext) {
- seq_printf(s, ",file_mode=0%o,dir_mode=0%o",
+ seq_printf(s, ",unc=%s", cifs_sb->tcon->treeName);
+ if (tcon->ses->userName)
+ seq_printf(s, ",username=%s", tcon->ses->userName);
+ if (tcon->ses->domainName)
+ seq_printf(s, ",domain=%s", tcon->ses->domainName);
+
+ seq_printf(s, ",uid=%d", cifs_sb->mnt_uid);
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
+ seq_printf(s, ",forceuid");
+ else
+ seq_printf(s, ",noforceuid");
+
+ seq_printf(s, ",gid=%d", cifs_sb->mnt_gid);
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
+ seq_printf(s, ",forcegid");
+ else
+ seq_printf(s, ",noforcegid");
+
+ cifs_show_address(s, tcon->ses->server);
+
+ if (!tcon->unix_ext)
+ seq_printf(s, ",file_mode=0%o,dir_mode=0%o",
cifs_sb->mnt_file_mode,
cifs_sb->mnt_dir_mode);
- }
- if (tcon->seal)
- seq_printf(s, ",seal");
- if (tcon->nocase)
- seq_printf(s, ",nocase");
- if (tcon->retry)
- seq_printf(s, ",hard");
- }
- if (cifs_sb->prepath)
- seq_printf(s, ",prepath=%s", cifs_sb->prepath);
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
- seq_printf(s, ",posixpaths");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
- seq_printf(s, ",setuids");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
- seq_printf(s, ",serverino");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
- seq_printf(s, ",directio");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
- seq_printf(s, ",nouser_xattr");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
- seq_printf(s, ",mapchars");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
- seq_printf(s, ",sfu");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
- seq_printf(s, ",nobrl");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
- seq_printf(s, ",cifsacl");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
- seq_printf(s, ",dynperm");
- if (m->mnt_sb->s_flags & MS_POSIXACL)
- seq_printf(s, ",acl");
-
- seq_printf(s, ",rsize=%d", cifs_sb->rsize);
- seq_printf(s, ",wsize=%d", cifs_sb->wsize);
- }
+ if (tcon->seal)
+ seq_printf(s, ",seal");
+ if (tcon->nocase)
+ seq_printf(s, ",nocase");
+ if (tcon->retry)
+ seq_printf(s, ",hard");
+ if (cifs_sb->prepath)
+ seq_printf(s, ",prepath=%s", cifs_sb->prepath);
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
+ seq_printf(s, ",posixpaths");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
+ seq_printf(s, ",setuids");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
+ seq_printf(s, ",serverino");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
+ seq_printf(s, ",directio");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
+ seq_printf(s, ",nouser_xattr");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
+ seq_printf(s, ",mapchars");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
+ seq_printf(s, ",sfu");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
+ seq_printf(s, ",nobrl");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
+ seq_printf(s, ",cifsacl");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
+ seq_printf(s, ",dynperm");
+ if (m->mnt_sb->s_flags & MS_POSIXACL)
+ seq_printf(s, ",acl");
+
+ seq_printf(s, ",rsize=%d", cifs_sb->rsize);
+ seq_printf(s, ",wsize=%d", cifs_sb->wsize);
+
return 0;
}
@@ -535,9 +540,14 @@ static void cifs_umount_begin(struct super_block *sb)
if (tcon == NULL)
return;
- lock_kernel();
read_lock(&cifs_tcp_ses_lock);
- if (tcon->tc_count == 1)
+ if ((tcon->tc_count > 1) || (tcon->tidStatus == CifsExiting)) {
+ /* we have other mounts to same share or we have
+ already tried to force umount this and woken up
+ all waiting network requests, nothing to do */
+ read_unlock(&cifs_tcp_ses_lock);
+ return;
+ } else if (tcon->tc_count == 1)
tcon->tidStatus = CifsExiting;
read_unlock(&cifs_tcp_ses_lock);
@@ -552,9 +562,7 @@ static void cifs_umount_begin(struct super_block *sb)
wake_up_all(&tcon->ses->server->response_q);
msleep(1);
}
-/* BB FIXME - finish add checks for tidStatus BB */
- unlock_kernel();
return;
}
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 9570a0e..6c17094 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -24,6 +24,19 @@
#define ROOT_I 2
+/*
+ * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down
+ * so that it will fit.
+ */
+static inline ino_t
+cifs_uniqueid_to_ino_t(u64 fileid)
+{
+ ino_t ino = (ino_t) fileid;
+ if (sizeof(ino_t) < sizeof(u64))
+ ino ^= fileid >> (sizeof(u64)-sizeof(ino_t)) * 8;
+ return ino;
+}
+
extern struct file_system_type cifs_fs_type;
extern const struct address_space_operations cifs_addr_ops;
extern const struct address_space_operations cifs_addr_ops_smallbuf;
@@ -100,5 +113,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
extern const struct export_operations cifs_export_ops;
#endif /* EXPERIMENTAL */
-#define CIFS_VERSION "1.59"
+#define CIFS_VERSION "1.60"
#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index a61ab77..6084d63 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -83,7 +83,7 @@ enum securityEnum {
NTLM, /* Legacy NTLM012 auth with NTLM hash */
NTLMv2, /* Legacy NTLM auth with NTLMv2 hash */
RawNTLMSSP, /* NTLMSSP without SPNEGO, NTLMv2 hash */
- NTLMSSP, /* NTLMSSP via SPNEGO, NTLMv2 hash */
+/* NTLMSSP, */ /* can use rawNTLMSSP instead of NTLMSSP via SPNEGO */
Kerberos, /* Kerberos via SPNEGO */
MSKerberos, /* MS Kerberos via SPNEGO */
};
@@ -260,6 +260,8 @@ struct cifsTconInfo {
atomic_t num_closes;
atomic_t num_deletes;
atomic_t num_mkdirs;
+ atomic_t num_posixopens;
+ atomic_t num_posixmkdirs;
atomic_t num_rmdirs;
atomic_t num_renames;
atomic_t num_t2renames;
@@ -364,13 +366,13 @@ struct cifsInodeInfo {
struct list_head openFileList;
int write_behind_rc;
__u32 cifsAttrs; /* e.g. DOS archive bit, sparse, compressed, system */
- atomic_t inUse; /* num concurrent users (local openers cifs) of file*/
unsigned long time; /* jiffies of last update/check of inode */
bool clientCanCacheRead:1; /* read oplock */
bool clientCanCacheAll:1; /* read and writebehind oplock */
bool oplockPending:1;
bool delete_pending:1; /* DELETE_ON_CLOSE is set */
u64 server_eof; /* current file size on server */
+ u64 uniqueid; /* server inode number */
struct inode vfs_inode;
};
@@ -472,6 +474,32 @@ struct dfs_info3_param {
char *node_name;
};
+/*
+ * common struct for holding inode info when searching for or updating an
+ * inode with new info
+ */
+
+#define CIFS_FATTR_DFS_REFERRAL 0x1
+#define CIFS_FATTR_DELETE_PENDING 0x2
+#define CIFS_FATTR_NEED_REVAL 0x4
+
+struct cifs_fattr {
+ u32 cf_flags;
+ u32 cf_cifsattrs;
+ u64 cf_uniqueid;
+ u64 cf_eof;
+ u64 cf_bytes;
+ uid_t cf_uid;
+ gid_t cf_gid;
+ umode_t cf_mode;
+ dev_t cf_rdev;
+ unsigned int cf_nlink;
+ unsigned int cf_dtype;
+ struct timespec cf_atime;
+ struct timespec cf_mtime;
+ struct timespec cf_ctime;
+};
+
static inline void free_dfs_info_param(struct dfs_info3_param *param)
{
if (param) {
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index a785f69..2d07f89 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -2328,19 +2328,7 @@ struct file_attrib_tag {
typedef struct {
__le32 NextEntryOffset;
__u32 ResumeKey; /* as with FileIndex - no need to convert */
- __le64 EndOfFile;
- __le64 NumOfBytes;
- __le64 LastStatusChange; /*SNIA specs DCE time for the 3 time fields */
- __le64 LastAccessTime;
- __le64 LastModificationTime;
- __le64 Uid;
- __le64 Gid;
- __le32 Type;
- __le64 DevMajor;
- __le64 DevMinor;
- __le64 UniqueId;
- __le64 Permissions;
- __le64 Nlinks;
+ FILE_UNIX_BASIC_INFO basic;
char FileName[1];
} __attribute__((packed)) FILE_UNIX_INFO; /* level 0x202 */
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index f945232..da8fbf5 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -74,7 +74,7 @@ extern unsigned int smbCalcSize(struct smb_hdr *ptr);
extern unsigned int smbCalcSize_LE(struct smb_hdr *ptr);
extern int decode_negTokenInit(unsigned char *security_blob, int length,
enum securityEnum *secType);
-extern int cifs_inet_pton(const int, const char *source, void *dst);
+extern int cifs_convert_address(char *src, void *dst);
extern int map_smb_to_linux_error(struct smb_hdr *smb, int logErr);
extern void header_assemble(struct smb_hdr *, char /* command */ ,
const struct cifsTconInfo *, int /* length of
@@ -98,9 +98,13 @@ extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time,
extern int cifs_posix_open(char *full_path, struct inode **pinode,
struct super_block *sb, int mode, int oflags,
int *poplock, __u16 *pnetfid, int xid);
-extern void posix_fill_in_inode(struct inode *tmp_inode,
- FILE_UNIX_BASIC_INFO *pData, int isNewInode);
-extern struct inode *cifs_new_inode(struct super_block *sb, __u64 *inum);
+extern void cifs_unix_basic_to_fattr(struct cifs_fattr *fattr,
+ FILE_UNIX_BASIC_INFO *info,
+ struct cifs_sb_info *cifs_sb);
+extern void cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr);
+extern struct inode *cifs_iget(struct super_block *sb,
+ struct cifs_fattr *fattr);
+
extern int cifs_get_inode_info(struct inode **pinode,
const unsigned char *search_path,
FILE_ALL_INFO *pfile_info,
@@ -108,8 +112,9 @@ extern int cifs_get_inode_info(struct inode **pinode,
extern int cifs_get_inode_info_unix(struct inode **pinode,
const unsigned char *search_path,
struct super_block *sb, int xid);
-extern void acl_to_uid_mode(struct cifs_sb_info *cifs_sb, struct inode *inode,
- const char *path, const __u16 *pfid);
+extern void cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb,
+ struct cifs_fattr *fattr, struct inode *inode,
+ const char *path, const __u16 *pfid);
extern int mode_to_acl(struct inode *inode, const char *path, __u64);
extern int cifs_mount(struct super_block *, struct cifs_sb_info *, char *,
@@ -215,7 +220,11 @@ struct cifs_unix_set_info_args {
dev_t device;
};
-extern int CIFSSMBUnixSetInfo(const int xid, struct cifsTconInfo *pTcon,
+extern int CIFSSMBUnixSetFileInfo(const int xid, struct cifsTconInfo *tcon,
+ const struct cifs_unix_set_info_args *args,
+ u16 fid, u32 pid_of_opener);
+
+extern int CIFSSMBUnixSetPathInfo(const int xid, struct cifsTconInfo *pTcon,
char *fileName,
const struct cifs_unix_set_info_args *args,
const struct nls_table *nls_codepage,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index b84c61d..1866bc2 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -594,7 +594,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
else if (secFlags & CIFSSEC_MAY_KRB5)
server->secType = Kerberos;
else if (secFlags & CIFSSEC_MAY_NTLMSSP)
- server->secType = NTLMSSP;
+ server->secType = RawNTLMSSP;
else if (secFlags & CIFSSEC_MAY_LANMAN)
server->secType = LANMAN;
/* #ifdef CONFIG_CIFS_EXPERIMENTAL
@@ -729,7 +729,7 @@ CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon)
* the tcon is no longer on the list, so no need to take lock before
* checking this.
*/
- if (tcon->need_reconnect)
+ if ((tcon->need_reconnect) || (tcon->ses->need_reconnect))
return 0;
rc = small_smb_init(SMB_COM_TREE_DISCONNECT, 0, tcon,
@@ -1113,7 +1113,10 @@ PsxCreat:
psx_create_err:
cifs_buf_release(pSMB);
- cifs_stats_inc(&tcon->num_mkdirs);
+ if (posix_flags & SMB_O_DIRECTORY)
+ cifs_stats_inc(&tcon->num_posixmkdirs);
+ else
+ cifs_stats_inc(&tcon->num_posixopens);
if (rc == -EAGAIN)
goto PsxCreat;
@@ -5074,10 +5077,114 @@ SetAttrLgcyRetry:
}
#endif /* temporarily unneeded SetAttr legacy function */
+static void
+cifs_fill_unix_set_info(FILE_UNIX_BASIC_INFO *data_offset,
+ const struct cifs_unix_set_info_args *args)
+{
+ u64 mode = args->mode;
+
+ /*
+ * Samba server ignores set of file size to zero due to bugs in some
+ * older clients, but we should be precise - we use SetFileSize to
+ * set file size and do not want to truncate file size to zero
+ * accidently as happened on one Samba server beta by putting
+ * zero instead of -1 here
+ */
+ data_offset->EndOfFile = cpu_to_le64(NO_CHANGE_64);
+ data_offset->NumOfBytes = cpu_to_le64(NO_CHANGE_64);
+ data_offset->LastStatusChange = cpu_to_le64(args->ctime);
+ data_offset->LastAccessTime = cpu_to_le64(args->atime);
+ data_offset->LastModificationTime = cpu_to_le64(args->mtime);
+ data_offset->Uid = cpu_to_le64(args->uid);
+ data_offset->Gid = cpu_to_le64(args->gid);
+ /* better to leave device as zero when it is */
+ data_offset->DevMajor = cpu_to_le64(MAJOR(args->device));
+ data_offset->DevMinor = cpu_to_le64(MINOR(args->device));
+ data_offset->Permissions = cpu_to_le64(mode);
+
+ if (S_ISREG(mode))
+ data_offset->Type = cpu_to_le32(UNIX_FILE);
+ else if (S_ISDIR(mode))
+ data_offset->Type = cpu_to_le32(UNIX_DIR);
+ else if (S_ISLNK(mode))
+ data_offset->Type = cpu_to_le32(UNIX_SYMLINK);
+ else if (S_ISCHR(mode))
+ data_offset->Type = cpu_to_le32(UNIX_CHARDEV);
+ else if (S_ISBLK(mode))
+ data_offset->Type = cpu_to_le32(UNIX_BLOCKDEV);
+ else if (S_ISFIFO(mode))
+ data_offset->Type = cpu_to_le32(UNIX_FIFO);
+ else if (S_ISSOCK(mode))
+ data_offset->Type = cpu_to_le32(UNIX_SOCKET);
+}
+
int
-CIFSSMBUnixSetInfo(const int xid, struct cifsTconInfo *tcon, char *fileName,
- const struct cifs_unix_set_info_args *args,
- const struct nls_table *nls_codepage, int remap)
+CIFSSMBUnixSetFileInfo(const int xid, struct cifsTconInfo *tcon,
+ const struct cifs_unix_set_info_args *args,
+ u16 fid, u32 pid_of_opener)
+{
+ struct smb_com_transaction2_sfi_req *pSMB = NULL;
+ FILE_UNIX_BASIC_INFO *data_offset;
+ int rc = 0;
+ u16 params, param_offset, offset, byte_count, count;
+
+ cFYI(1, ("Set Unix Info (via SetFileInfo)"));
+ rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB);
+
+ if (rc)
+ return rc;
+
+ pSMB->hdr.Pid = cpu_to_le16((__u16)pid_of_opener);
+ pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid_of_opener >> 16));
+
+ params = 6;
+ pSMB->MaxSetupCount = 0;
+ pSMB->Reserved = 0;
+ pSMB->Flags = 0;
+ pSMB->Timeout = 0;
+ pSMB->Reserved2 = 0;
+ param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
+ offset = param_offset + params;
+
+ data_offset = (FILE_UNIX_BASIC_INFO *)
+ ((char *)(&pSMB->hdr.Protocol) + offset);
+ count = sizeof(FILE_UNIX_BASIC_INFO);
+
+ pSMB->MaxParameterCount = cpu_to_le16(2);
+ /* BB find max SMB PDU from sess */
+ pSMB->MaxDataCount = cpu_to_le16(1000);
+ pSMB->SetupCount = 1;
+ pSMB->Reserved3 = 0;
+ pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION);
+ byte_count = 3 /* pad */ + params + count;
+ pSMB->DataCount = cpu_to_le16(count);
+ pSMB->ParameterCount = cpu_to_le16(params);
+ pSMB->TotalDataCount = pSMB->DataCount;
+ pSMB->TotalParameterCount = pSMB->ParameterCount;
+ pSMB->ParameterOffset = cpu_to_le16(param_offset);
+ pSMB->DataOffset = cpu_to_le16(offset);
+ pSMB->Fid = fid;
+ pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_BASIC);
+ pSMB->Reserved4 = 0;
+ pSMB->hdr.smb_buf_length += byte_count;
+ pSMB->ByteCount = cpu_to_le16(byte_count);
+
+ cifs_fill_unix_set_info(data_offset, args);
+
+ rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
+ if (rc)
+ cFYI(1, ("Send error in Set Time (SetFileInfo) = %d", rc));
+
+ /* Note: On -EAGAIN error only caller can retry on handle based calls
+ since file handle passed in no longer valid */
+
+ return rc;
+}
+
+int
+CIFSSMBUnixSetPathInfo(const int xid, struct cifsTconInfo *tcon, char *fileName,
+ const struct cifs_unix_set_info_args *args,
+ const struct nls_table *nls_codepage, int remap)
{
TRANSACTION2_SPI_REQ *pSMB = NULL;
TRANSACTION2_SPI_RSP *pSMBr = NULL;
@@ -5086,7 +5193,6 @@ CIFSSMBUnixSetInfo(const int xid, struct cifsTconInfo *tcon, char *fileName,
int bytes_returned = 0;
FILE_UNIX_BASIC_INFO *data_offset;
__u16 params, param_offset, offset, count, byte_count;
- __u64 mode = args->mode;
cFYI(1, ("In SetUID/GID/Mode"));
setPermsRetry:
@@ -5137,38 +5243,8 @@ setPermsRetry:
pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_BASIC);
pSMB->Reserved4 = 0;
pSMB->hdr.smb_buf_length += byte_count;
- /* Samba server ignores set of file size to zero due to bugs in some
- older clients, but we should be precise - we use SetFileSize to
- set file size and do not want to truncate file size to zero
- accidently as happened on one Samba server beta by putting
- zero instead of -1 here */
- data_offset->EndOfFile = cpu_to_le64(NO_CHANGE_64);
- data_offset->NumOfBytes = cpu_to_le64(NO_CHANGE_64);
- data_offset->LastStatusChange = cpu_to_le64(args->ctime);
- data_offset->LastAccessTime = cpu_to_le64(args->atime);
- data_offset->LastModificationTime = cpu_to_le64(args->mtime);
- data_offset->Uid = cpu_to_le64(args->uid);
- data_offset->Gid = cpu_to_le64(args->gid);
- /* better to leave device as zero when it is */
- data_offset->DevMajor = cpu_to_le64(MAJOR(args->device));
- data_offset->DevMinor = cpu_to_le64(MINOR(args->device));
- data_offset->Permissions = cpu_to_le64(mode);
-
- if (S_ISREG(mode))
- data_offset->Type = cpu_to_le32(UNIX_FILE);
- else if (S_ISDIR(mode))
- data_offset->Type = cpu_to_le32(UNIX_DIR);
- else if (S_ISLNK(mode))
- data_offset->Type = cpu_to_le32(UNIX_SYMLINK);
- else if (S_ISCHR(mode))
- data_offset->Type = cpu_to_le32(UNIX_CHARDEV);
- else if (S_ISBLK(mode))
- data_offset->Type = cpu_to_le32(UNIX_BLOCKDEV);
- else if (S_ISFIFO(mode))
- data_offset->Type = cpu_to_le32(UNIX_FIFO);
- else if (S_ISSOCK(mode))
- data_offset->Type = cpu_to_le32(UNIX_SOCKET);
+ cifs_fill_unix_set_info(data_offset, args);
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 97f4311..1f3345d 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -70,7 +70,6 @@ struct smb_vol {
mode_t file_mode;
mode_t dir_mode;
unsigned secFlg;
- bool rw:1;
bool retry:1;
bool intr:1;
bool setuids:1;
@@ -804,6 +803,10 @@ cifs_parse_mount_options(char *options, const char *devname,
char *data;
unsigned int temp_len, i, j;
char separator[2];
+ short int override_uid = -1;
+ short int override_gid = -1;
+ bool uid_specified = false;
+ bool gid_specified = false;
separator[0] = ',';
separator[1] = 0;
@@ -832,7 +835,6 @@ cifs_parse_mount_options(char *options, const char *devname,
vol->dir_mode = vol->file_mode = S_IRUGO | S_IXUGO | S_IWUSR;
/* vol->retry default is 0 (i.e. "soft" limited retry not hard retry) */
- vol->rw = true;
/* default is always to request posix paths. */
vol->posix_paths = 1;
/* default to using server inode numbers where available */
@@ -1095,18 +1097,20 @@ cifs_parse_mount_options(char *options, const char *devname,
"too long.\n");
return 1;
}
- } else if (strnicmp(data, "uid", 3) == 0) {
- if (value && *value)
- vol->linux_uid =
- simple_strtoul(value, &value, 0);
- } else if (strnicmp(data, "forceuid", 8) == 0) {
- vol->override_uid = 1;
- } else if (strnicmp(data, "gid", 3) == 0) {
- if (value && *value)
- vol->linux_gid =
- simple_strtoul(value, &value, 0);
- } else if (strnicmp(data, "forcegid", 8) == 0) {
- vol->override_gid = 1;
+ } else if (!strnicmp(data, "uid", 3) && value && *value) {
+ vol->linux_uid = simple_strtoul(value, &value, 0);
+ uid_specified = true;
+ } else if (!strnicmp(data, "forceuid", 8)) {
+ override_uid = 1;
+ } else if (!strnicmp(data, "noforceuid", 10)) {
+ override_uid = 0;
+ } else if (!strnicmp(data, "gid", 3) && value && *value) {
+ vol->linux_gid = simple_strtoul(value, &value, 0);
+ gid_specified = true;
+ } else if (!strnicmp(data, "forcegid", 8)) {
+ override_gid = 1;
+ } else if (!strnicmp(data, "noforcegid", 10)) {
+ override_gid = 0;
} else if (strnicmp(data, "file_mode", 4) == 0) {
if (value && *value) {
vol->file_mode =
@@ -1199,7 +1203,9 @@ cifs_parse_mount_options(char *options, const char *devname,
} else if (strnicmp(data, "guest", 5) == 0) {
/* ignore */
} else if (strnicmp(data, "rw", 2) == 0) {
- vol->rw = true;
+ /* ignore */
+ } else if (strnicmp(data, "ro", 2) == 0) {
+ /* ignore */
} else if (strnicmp(data, "noblocksend", 11) == 0) {
vol->noblocksnd = 1;
} else if (strnicmp(data, "noautotune", 10) == 0) {
@@ -1218,8 +1224,6 @@ cifs_parse_mount_options(char *options, const char *devname,
parse these options again and set anything and it
is ok to just ignore them */
continue;
- } else if (strnicmp(data, "ro", 2) == 0) {
- vol->rw = false;
} else if (strnicmp(data, "hard", 4) == 0) {
vol->retry = 1;
} else if (strnicmp(data, "soft", 4) == 0) {
@@ -1357,6 +1361,18 @@ cifs_parse_mount_options(char *options, const char *devname,
if (vol->UNCip == NULL)
vol->UNCip = &vol->UNC[2];
+ if (uid_specified)
+ vol->override_uid = override_uid;
+ else if (override_uid == 1)
+ printk(KERN_NOTICE "CIFS: ignoring forceuid mount option "
+ "specified with no uid= option.\n");
+
+ if (gid_specified)
+ vol->override_gid = override_gid;
+ else if (override_gid == 1)
+ printk(KERN_NOTICE "CIFS: ignoring forcegid mount option "
+ "specified with no gid= option.\n");
+
return 0;
}
@@ -1386,8 +1402,10 @@ cifs_find_tcp_session(struct sockaddr_storage *addr)
server->addr.sockAddr.sin_addr.s_addr))
continue;
else if (addr->ss_family == AF_INET6 &&
- !ipv6_addr_equal(&server->addr.sockAddr6.sin6_addr,
- &addr6->sin6_addr))
+ (!ipv6_addr_equal(&server->addr.sockAddr6.sin6_addr,
+ &addr6->sin6_addr) ||
+ server->addr.sockAddr6.sin6_scope_id !=
+ addr6->sin6_scope_id))
continue;
++server->srv_count;
@@ -1433,28 +1451,15 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
memset(&addr, 0, sizeof(struct sockaddr_storage));
- if (volume_info->UNCip && volume_info->UNC) {
- rc = cifs_inet_pton(AF_INET, volume_info->UNCip,
- &sin_server->sin_addr.s_addr);
-
- if (rc <= 0) {
- /* not ipv4 address, try ipv6 */
- rc = cifs_inet_pton(AF_INET6, volume_info->UNCip,
- &sin_server6->sin6_addr.in6_u);
- if (rc > 0)
- addr.ss_family = AF_INET6;
- } else {
- addr.ss_family = AF_INET;
- }
+ cFYI(1, ("UNC: %s ip: %s", volume_info->UNC, volume_info->UNCip));
- if (rc <= 0) {
+ if (volume_info->UNCip && volume_info->UNC) {
+ rc = cifs_convert_address(volume_info->UNCip, &addr);
+ if (!rc) {
/* we failed translating address */
rc = -EINVAL;
goto out_err;
}
-
- cFYI(1, ("UNC: %s ip: %s", volume_info->UNC,
- volume_info->UNCip));
} else if (volume_info->UNCip) {
/* BB using ip addr as tcp_ses name to connect to the
DFS root below */
@@ -1513,14 +1518,14 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
cFYI(1, ("attempting ipv6 connect"));
/* BB should we allow ipv6 on port 139? */
/* other OS never observed in Wild doing 139 with v6 */
+ sin_server6->sin6_port = htons(volume_info->port);
memcpy(&tcp_ses->addr.sockAddr6, sin_server6,
sizeof(struct sockaddr_in6));
- sin_server6->sin6_port = htons(volume_info->port);
rc = ipv6_connect(tcp_ses);
} else {
+ sin_server->sin_port = htons(volume_info->port);
memcpy(&tcp_ses->addr.sockAddr, sin_server,
sizeof(struct sockaddr_in));
- sin_server->sin_port = htons(volume_info->port);
rc = ipv4_connect(tcp_ses);
}
if (rc < 0) {
@@ -2465,10 +2470,10 @@ try_mount_again:
tcon->local_lease = volume_info->local_lease;
}
if (pSesInfo) {
- if (pSesInfo->capabilities & CAP_LARGE_FILES) {
- sb->s_maxbytes = (u64) 1 << 63;
- } else
- sb->s_maxbytes = (u64) 1 << 31; /* 2 GB */
+ if (pSesInfo->capabilities & CAP_LARGE_FILES)
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
+ else
+ sb->s_maxbytes = MAX_NON_LFS;
}
/* BB FIXME fix time_gran to be larger for LANMAN sessions */
@@ -2557,11 +2562,20 @@ remote_path_check:
if (mount_data != mount_data_global)
kfree(mount_data);
+
mount_data = cifs_compose_mount_options(
cifs_sb->mountdata, full_path + 1,
referrals, &fake_devname);
- kfree(fake_devname);
+
free_dfs_info_array(referrals, num_referrals);
+ kfree(fake_devname);
+ kfree(full_path);
+
+ if (IS_ERR(mount_data)) {
+ rc = PTR_ERR(mount_data);
+ mount_data = NULL;
+ goto mount_fail_check;
+ }
if (tcon)
cifs_put_tcon(tcon);
@@ -2569,8 +2583,6 @@ remote_path_check:
cifs_put_smb_ses(pSesInfo);
cleanup_volume_info(&volume_info);
- FreeXid(xid);
- kfree(full_path);
referral_walks_count++;
goto try_mount_again;
}
@@ -2739,6 +2751,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
strncpy(tcon->treeName, tree, MAX_TREE_SIZE);
/* mostly informational -- no need to fail on error here */
+ kfree(tcon->nativeFileSystem);
tcon->nativeFileSystem = cifs_strndup_from_ucs(bcc_ptr,
bytes_left, is_unicode,
nls_codepage);
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 3758965d..4326ffd 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -188,6 +188,7 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
FILE_UNIX_BASIC_INFO *presp_data;
__u32 posix_flags = 0;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+ struct cifs_fattr fattr;
cFYI(1, ("posix open %s", full_path));
@@ -236,22 +237,21 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
if (presp_data->Type == cpu_to_le32(-1))
goto posix_open_ret; /* open ok, caller does qpathinfo */
- /* get new inode and set it up */
if (!pinode)
goto posix_open_ret; /* caller does not need info */
+ cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
+
+ /* get new inode and set it up */
if (*pinode == NULL) {
- __u64 unique_id = le64_to_cpu(presp_data->UniqueId);
- *pinode = cifs_new_inode(sb, &unique_id);
+ *pinode = cifs_iget(sb, &fattr);
+ if (!*pinode) {
+ rc = -ENOMEM;
+ goto posix_open_ret;
+ }
+ } else {
+ cifs_fattr_to_inode(*pinode, &fattr);
}
- /* else an inode was passed in. Update its info, don't create one */
-
- /* We do not need to close the file if new_inode fails since
- the caller will retry qpathinfo as long as inode is null */
- if (*pinode == NULL)
- goto posix_open_ret;
-
- posix_fill_in_inode(*pinode, presp_data, 1);
cifs_fill_fileinfo(*pinode, *pnetfid, cifs_sb->tcon, write_only);
@@ -307,8 +307,9 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
+ rc = -ENOMEM;
FreeXid(xid);
- return -ENOMEM;
+ return rc;
}
if (oplockEnabled)
@@ -424,9 +425,10 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
args.uid = NO_CHANGE_64;
args.gid = NO_CHANGE_64;
}
- CIFSSMBUnixSetInfo(xid, tcon, full_path, &args,
- cifs_sb->local_nls,
- cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
+ CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args,
+ cifs_sb->local_nls,
+ cifs_sb->mnt_cifs_flags &
+ CIFS_MOUNT_MAP_SPECIAL_CHR);
} else {
/* BB implement mode setting via Windows security
descriptors e.g. */
@@ -514,10 +516,10 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
args.uid = NO_CHANGE_64;
args.gid = NO_CHANGE_64;
}
- rc = CIFSSMBUnixSetInfo(xid, pTcon, full_path,
- &args, cifs_sb->local_nls,
- cifs_sb->mnt_cifs_flags &
- CIFS_MOUNT_MAP_SPECIAL_CHR);
+ rc = CIFSSMBUnixSetPathInfo(xid, pTcon, full_path, &args,
+ cifs_sb->local_nls,
+ cifs_sb->mnt_cifs_flags &
+ CIFS_MOUNT_MAP_SPECIAL_CHR);
if (!rc) {
rc = cifs_get_inode_info_unix(&newinode, full_path,
@@ -540,8 +542,9 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
if (buf == NULL) {
kfree(full_path);
+ rc = -ENOMEM;
FreeXid(xid);
- return -ENOMEM;
+ return rc;
}
rc = CIFSSMBOpen(xid, pTcon, full_path,
@@ -641,6 +644,15 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
}
}
+ /*
+ * O_EXCL: optimize away the lookup, but don't hash the dentry. Let
+ * the VFS handle the create.
+ */
+ if (nd->flags & LOOKUP_EXCL) {
+ d_instantiate(direntry, NULL);
+ return 0;
+ }
+
/* can not grab the rename sem here since it would
deadlock in the cases (beginning of sys_rename itself)
in which we already have the sb rename sem */
diff --git a/fs/cifs/dns_resolve.c b/fs/cifs/dns_resolve.c
index df4a306..8794814 100644
--- a/fs/cifs/dns_resolve.c
+++ b/fs/cifs/dns_resolve.c
@@ -35,26 +35,11 @@
* 0 - name is not IP
*/
static int
-is_ip(const char *name)
+is_ip(char *name)
{
- int rc;
- struct sockaddr_in sin_server;
- struct sockaddr_in6 sin_server6;
-
- rc = cifs_inet_pton(AF_INET, name,
- &sin_server.sin_addr.s_addr);
-
- if (rc <= 0) {
- /* not ipv4 address, try ipv6 */
- rc = cifs_inet_pton(AF_INET6, name,
- &sin_server6.sin6_addr.in6_u);
- if (rc > 0)
- return 1;
- } else {
- return 1;
- }
- /* we failed translating address */
- return 0;
+ struct sockaddr_storage ss;
+
+ return cifs_convert_address(name, &ss);
}
static int
@@ -72,7 +57,7 @@ dns_resolver_instantiate(struct key *key, const void *data,
ip[datalen] = '\0';
/* make sure this looks like an address */
- if (!is_ip((const char *) ip)) {
+ if (!is_ip(ip)) {
kfree(ip);
return -EINVAL;
}
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 0686684..c34b7f8 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -300,14 +300,16 @@ int cifs_open(struct inode *inode, struct file *file)
pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
pCifsFile = cifs_fill_filedata(file);
if (pCifsFile) {
+ rc = 0;
FreeXid(xid);
- return 0;
+ return rc;
}
full_path = build_path_from_dentry(file->f_path.dentry);
if (full_path == NULL) {
+ rc = -ENOMEM;
FreeXid(xid);
- return -ENOMEM;
+ return rc;
}
cFYI(1, ("inode = 0x%p file flags are 0x%x for %s",
@@ -446,9 +448,9 @@ int cifs_open(struct inode *inode, struct file *file)
.mtime = NO_CHANGE_64,
.device = 0,
};
- CIFSSMBUnixSetInfo(xid, tcon, full_path, &args,
- cifs_sb->local_nls,
- cifs_sb->mnt_cifs_flags &
+ CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args,
+ cifs_sb->local_nls,
+ cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
}
}
@@ -491,11 +493,12 @@ static int cifs_reopen_file(struct file *file, bool can_flush)
return -EBADF;
xid = GetXid();
- mutex_unlock(&pCifsFile->fh_mutex);
+ mutex_lock(&pCifsFile->fh_mutex);
if (!pCifsFile->invalidHandle) {
- mutex_lock(&pCifsFile->fh_mutex);
+ mutex_unlock(&pCifsFile->fh_mutex);
+ rc = 0;
FreeXid(xid);
- return 0;
+ return rc;
}
if (file->f_path.dentry == NULL) {
@@ -524,7 +527,7 @@ static int cifs_reopen_file(struct file *file, bool can_flush)
if (full_path == NULL) {
rc = -ENOMEM;
reopen_error_exit:
- mutex_lock(&pCifsFile->fh_mutex);
+ mutex_unlock(&pCifsFile->fh_mutex);
FreeXid(xid);
return rc;
}
@@ -566,14 +569,14 @@ reopen_error_exit:
cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc) {
- mutex_lock(&pCifsFile->fh_mutex);
+ mutex_unlock(&pCifsFile->fh_mutex);
cFYI(1, ("cifs_open returned 0x%x", rc));
cFYI(1, ("oplock: %d", oplock));
} else {
reopen_success:
pCifsFile->netfid = netfid;
pCifsFile->invalidHandle = false;
- mutex_lock(&pCifsFile->fh_mutex);
+ mutex_unlock(&pCifsFile->fh_mutex);
pCifsInode = CIFS_I(inode);
if (pCifsInode) {
if (can_flush) {
@@ -845,8 +848,9 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
tcon = cifs_sb->tcon;
if (file->private_data == NULL) {
+ rc = -EBADF;
FreeXid(xid);
- return -EBADF;
+ return rc;
}
netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
@@ -1805,8 +1809,9 @@ ssize_t cifs_user_read(struct file *file, char __user *read_data,
pTcon = cifs_sb->tcon;
if (file->private_data == NULL) {
+ rc = -EBADF;
FreeXid(xid);
- return -EBADF;
+ return rc;
}
open_file = (struct cifsFileInfo *)file->private_data;
@@ -1885,8 +1890,9 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
pTcon = cifs_sb->tcon;
if (file->private_data == NULL) {
+ rc = -EBADF;
FreeXid(xid);
- return -EBADF;
+ return rc;
}
open_file = (struct cifsFileInfo *)file->private_data;
@@ -2019,8 +2025,9 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
xid = GetXid();
if (file->private_data == NULL) {
+ rc = -EBADF;
FreeXid(xid);
- return -EBADF;
+ return rc;
}
open_file = (struct cifsFileInfo *)file->private_data;
cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
@@ -2185,8 +2192,9 @@ static int cifs_readpage(struct file *file, struct page *page)
xid = GetXid();
if (file->private_data == NULL) {
+ rc = -EBADF;
FreeXid(xid);
- return -EBADF;
+ return rc;
}
cFYI(1, ("readpage %p at offset %d 0x%x\n",
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index fad882b..82d83839 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -77,239 +77,202 @@ static void cifs_set_ops(struct inode *inode, const bool is_dfs_referral)
}
}
-static void cifs_unix_info_to_inode(struct inode *inode,
- FILE_UNIX_BASIC_INFO *info, int force_uid_gid)
+/* populate an inode with info from a cifs_fattr struct */
+void
+cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
{
+ struct cifsInodeInfo *cifs_i = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
- struct cifsInodeInfo *cifsInfo = CIFS_I(inode);
- __u64 num_of_bytes = le64_to_cpu(info->NumOfBytes);
- __u64 end_of_file = le64_to_cpu(info->EndOfFile);
+ unsigned long oldtime = cifs_i->time;
+
+ inode->i_atime = fattr->cf_atime;
+ inode->i_mtime = fattr->cf_mtime;
+ inode->i_ctime = fattr->cf_ctime;
+ inode->i_rdev = fattr->cf_rdev;
+ inode->i_nlink = fattr->cf_nlink;
+ inode->i_uid = fattr->cf_uid;
+ inode->i_gid = fattr->cf_gid;
+
+ /* if dynperm is set, don't clobber existing mode */
+ if (inode->i_state & I_NEW ||
+ !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM))
+ inode->i_mode = fattr->cf_mode;
+
+ cifs_i->cifsAttrs = fattr->cf_cifsattrs;
+ cifs_i->uniqueid = fattr->cf_uniqueid;
+
+ if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL)
+ cifs_i->time = 0;
+ else
+ cifs_i->time = jiffies;
+
+ cFYI(1, ("inode 0x%p old_time=%ld new_time=%ld", inode,
+ oldtime, cifs_i->time));
- inode->i_atime = cifs_NTtimeToUnix(info->LastAccessTime);
- inode->i_mtime =
- cifs_NTtimeToUnix(info->LastModificationTime);
- inode->i_ctime = cifs_NTtimeToUnix(info->LastStatusChange);
- inode->i_mode = le64_to_cpu(info->Permissions);
+ cifs_i->delete_pending = fattr->cf_flags & CIFS_FATTR_DELETE_PENDING;
+
+ /*
+ * Can't safely change the file size here if the client is writing to
+ * it due to potential races.
+ */
+ spin_lock(&inode->i_lock);
+ if (is_size_safe_to_change(cifs_i, fattr->cf_eof)) {
+ i_size_write(inode, fattr->cf_eof);
+
+ /*
+ * i_blocks is not related to (i_size / i_blksize),
+ * but instead 512 byte (2**9) size is required for
+ * calculating num blocks.
+ */
+ inode->i_blocks = (512 - 1 + fattr->cf_bytes) >> 9;
+ }
+ spin_unlock(&inode->i_lock);
+
+ cifs_set_ops(inode, fattr->cf_flags & CIFS_FATTR_DFS_REFERRAL);
+}
+
+/* Fill a cifs_fattr struct with info from FILE_UNIX_BASIC_INFO. */
+void
+cifs_unix_basic_to_fattr(struct cifs_fattr *fattr, FILE_UNIX_BASIC_INFO *info,
+ struct cifs_sb_info *cifs_sb)
+{
+ memset(fattr, 0, sizeof(*fattr));
+ fattr->cf_uniqueid = le64_to_cpu(info->UniqueId);
+ fattr->cf_bytes = le64_to_cpu(info->NumOfBytes);
+ fattr->cf_eof = le64_to_cpu(info->EndOfFile);
+
+ fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime);
+ fattr->cf_mtime = cifs_NTtimeToUnix(info->LastModificationTime);
+ fattr->cf_ctime = cifs_NTtimeToUnix(info->LastStatusChange);
+ fattr->cf_mode = le64_to_cpu(info->Permissions);
/*
* Since we set the inode type below we need to mask off
* to avoid strange results if bits set above.
*/
- inode->i_mode &= ~S_IFMT;
+ fattr->cf_mode &= ~S_IFMT;
switch (le32_to_cpu(info->Type)) {
case UNIX_FILE:
- inode->i_mode |= S_IFREG;
+ fattr->cf_mode |= S_IFREG;
+ fattr->cf_dtype = DT_REG;
break;
case UNIX_SYMLINK:
- inode->i_mode |= S_IFLNK;
+ fattr->cf_mode |= S_IFLNK;
+ fattr->cf_dtype = DT_LNK;
break;
case UNIX_DIR:
- inode->i_mode |= S_IFDIR;
+ fattr->cf_mode |= S_IFDIR;
+ fattr->cf_dtype = DT_DIR;
break;
case UNIX_CHARDEV:
- inode->i_mode |= S_IFCHR;
- inode->i_rdev = MKDEV(le64_to_cpu(info->DevMajor),
- le64_to_cpu(info->DevMinor) & MINORMASK);
+ fattr->cf_mode |= S_IFCHR;
+ fattr->cf_dtype = DT_CHR;
+ fattr->cf_rdev = MKDEV(le64_to_cpu(info->DevMajor),
+ le64_to_cpu(info->DevMinor) & MINORMASK);
break;
case UNIX_BLOCKDEV:
- inode->i_mode |= S_IFBLK;
- inode->i_rdev = MKDEV(le64_to_cpu(info->DevMajor),
- le64_to_cpu(info->DevMinor) & MINORMASK);
+ fattr->cf_mode |= S_IFBLK;
+ fattr->cf_dtype = DT_BLK;
+ fattr->cf_rdev = MKDEV(le64_to_cpu(info->DevMajor),
+ le64_to_cpu(info->DevMinor) & MINORMASK);
break;
case UNIX_FIFO:
- inode->i_mode |= S_IFIFO;
+ fattr->cf_mode |= S_IFIFO;
+ fattr->cf_dtype = DT_FIFO;
break;
case UNIX_SOCKET:
- inode->i_mode |= S_IFSOCK;
+ fattr->cf_mode |= S_IFSOCK;
+ fattr->cf_dtype = DT_SOCK;
break;
default:
/* safest to call it a file if we do not know */
- inode->i_mode |= S_IFREG;
+ fattr->cf_mode |= S_IFREG;
+ fattr->cf_dtype = DT_REG;
cFYI(1, ("unknown type %d", le32_to_cpu(info->Type)));
break;
}
- if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) &&
- !force_uid_gid)
- inode->i_uid = cifs_sb->mnt_uid;
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
+ fattr->cf_uid = cifs_sb->mnt_uid;
else
- inode->i_uid = le64_to_cpu(info->Uid);
+ fattr->cf_uid = le64_to_cpu(info->Uid);
- if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) &&
- !force_uid_gid)
- inode->i_gid = cifs_sb->mnt_gid;
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
+ fattr->cf_gid = cifs_sb->mnt_gid;
else
- inode->i_gid = le64_to_cpu(info->Gid);
-
- inode->i_nlink = le64_to_cpu(info->Nlinks);
-
- cifsInfo->server_eof = end_of_file;
- spin_lock(&inode->i_lock);
- if (is_size_safe_to_change(cifsInfo, end_of_file)) {
- /*
- * We can not safely change the file size here if the client
- * is writing to it due to potential races.
- */
- i_size_write(inode, end_of_file);
+ fattr->cf_gid = le64_to_cpu(info->Gid);
- /*
- * i_blocks is not related to (i_size / i_blksize),
- * but instead 512 byte (2**9) size is required for
- * calculating num blocks.
- */
- inode->i_blocks = (512 - 1 + num_of_bytes) >> 9;
- }
- spin_unlock(&inode->i_lock);
+ fattr->cf_nlink = le64_to_cpu(info->Nlinks);
}
-
/*
- * Needed to setup inode data for the directory which is the
- * junction to the new submount (ie to setup the fake directory
- * which represents a DFS referral)
- */
-static void fill_fake_finddataunix(FILE_UNIX_BASIC_INFO *pfnd_dat,
- struct super_block *sb)
-{
- struct inode *pinode = NULL;
-
- memset(pfnd_dat, 0, sizeof(FILE_UNIX_BASIC_INFO));
-
-/* __le64 pfnd_dat->EndOfFile = cpu_to_le64(0);
- __le64 pfnd_dat->NumOfBytes = cpu_to_le64(0);
- __u64 UniqueId = 0; */
- pfnd_dat->LastStatusChange =
- cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
- pfnd_dat->LastAccessTime =
- cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
- pfnd_dat->LastModificationTime =
- cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
- pfnd_dat->Type = cpu_to_le32(UNIX_DIR);
- pfnd_dat->Permissions = cpu_to_le64(S_IXUGO | S_IRWXU);
- pfnd_dat->Nlinks = cpu_to_le64(2);
- if (sb->s_root)
- pinode = sb->s_root->d_inode;
- if (pinode == NULL)
- return;
-
- /* fill in default values for the remaining based on root
- inode since we can not query the server for this inode info */
- pfnd_dat->DevMajor = cpu_to_le64(MAJOR(pinode->i_rdev));
- pfnd_dat->DevMinor = cpu_to_le64(MINOR(pinode->i_rdev));
- pfnd_dat->Uid = cpu_to_le64(pinode->i_uid);
- pfnd_dat->Gid = cpu_to_le64(pinode->i_gid);
-}
-
-/**
- * cifs_new inode - create new inode, initialize, and hash it
- * @sb - pointer to superblock
- * @inum - if valid pointer and serverino is enabled, replace i_ino with val
- *
- * Create a new inode, initialize it for CIFS and hash it. Returns the new
- * inode or NULL if one couldn't be allocated.
+ * Fill a cifs_fattr struct with fake inode info.
*
- * If the share isn't mounted with "serverino" or inum is a NULL pointer then
- * we'll just use the inode number assigned by new_inode(). Note that this can
- * mean i_ino collisions since the i_ino assigned by new_inode is not
- * guaranteed to be unique.
+ * Needed to setup cifs_fattr data for the directory which is the
+ * junction to the new submount (ie to setup the fake directory
+ * which represents a DFS referral).
*/
-struct inode *
-cifs_new_inode(struct super_block *sb, __u64 *inum)
+static void
+cifs_create_dfs_fattr(struct cifs_fattr *fattr, struct super_block *sb)
{
- struct inode *inode;
-
- inode = new_inode(sb);
- if (inode == NULL)
- return NULL;
-
- /*
- * BB: Is i_ino == 0 legal? Here, we assume that it is. If it isn't we
- * stop passing inum as ptr. Are there sanity checks we can use to
- * ensure that the server is really filling in that field? Also,
- * if serverino is disabled, perhaps we should be using iunique()?
- */
- if (inum && (CIFS_SB(sb)->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM))
- inode->i_ino = (unsigned long) *inum;
-
- /*
- * must set this here instead of cifs_alloc_inode since VFS will
- * clobber i_flags
- */
- if (sb->s_flags & MS_NOATIME)
- inode->i_flags |= S_NOATIME | S_NOCMTIME;
-
- insert_inode_hash(inode);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
- return inode;
+ cFYI(1, ("creating fake fattr for DFS referral"));
+
+ memset(fattr, 0, sizeof(*fattr));
+ fattr->cf_mode = S_IFDIR | S_IXUGO | S_IRWXU;
+ fattr->cf_uid = cifs_sb->mnt_uid;
+ fattr->cf_gid = cifs_sb->mnt_gid;
+ fattr->cf_atime = CURRENT_TIME;
+ fattr->cf_ctime = CURRENT_TIME;
+ fattr->cf_mtime = CURRENT_TIME;
+ fattr->cf_nlink = 2;
+ fattr->cf_flags |= CIFS_FATTR_DFS_REFERRAL;
}
int cifs_get_inode_info_unix(struct inode **pinode,
- const unsigned char *full_path, struct super_block *sb, int xid)
+ const unsigned char *full_path,
+ struct super_block *sb, int xid)
{
- int rc = 0;
+ int rc;
FILE_UNIX_BASIC_INFO find_data;
- struct cifsTconInfo *pTcon;
- struct inode *inode;
+ struct cifs_fattr fattr;
+ struct cifsTconInfo *tcon;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
- bool is_dfs_referral = false;
- struct cifsInodeInfo *cifsInfo;
- __u64 num_of_bytes;
- __u64 end_of_file;
- pTcon = cifs_sb->tcon;
+ tcon = cifs_sb->tcon;
cFYI(1, ("Getting info on %s", full_path));
/* could have done a find first instead but this returns more info */
- rc = CIFSSMBUnixQPathInfo(xid, pTcon, full_path, &find_data,
+ rc = CIFSSMBUnixQPathInfo(xid, tcon, full_path, &find_data,
cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
- if (rc == -EREMOTE && !is_dfs_referral) {
- is_dfs_referral = true;
- cFYI(DBG2, ("DFS ref"));
- /* for DFS, server does not give us real inode data */
- fill_fake_finddataunix(&find_data, sb);
- rc = 0;
- } else if (rc)
- goto cgiiu_exit;
- num_of_bytes = le64_to_cpu(find_data.NumOfBytes);
- end_of_file = le64_to_cpu(find_data.EndOfFile);
+ if (!rc) {
+ cifs_unix_basic_to_fattr(&fattr, &find_data, cifs_sb);
+ } else if (rc == -EREMOTE) {
+ cifs_create_dfs_fattr(&fattr, sb);
+ rc = 0;
+ } else {
+ return rc;
+ }
- /* get new inode */
if (*pinode == NULL) {
- __u64 unique_id = le64_to_cpu(find_data.UniqueId);
- *pinode = cifs_new_inode(sb, &unique_id);
- if (*pinode == NULL) {
+ /* get new inode */
+ *pinode = cifs_iget(sb, &fattr);
+ if (!*pinode)
rc = -ENOMEM;
- goto cgiiu_exit;
- }
+ } else {
+ /* we already have inode, update it */
+ cifs_fattr_to_inode(*pinode, &fattr);
}
- inode = *pinode;
- cifsInfo = CIFS_I(inode);
-
- cFYI(1, ("Old time %ld", cifsInfo->time));
- cifsInfo->time = jiffies;
- cFYI(1, ("New time %ld", cifsInfo->time));
- /* this is ok to set on every inode revalidate */
- atomic_set(&cifsInfo->inUse, 1);
-
- cifs_unix_info_to_inode(inode, &find_data, 0);
-
- if (num_of_bytes < end_of_file)
- cFYI(1, ("allocation size less than end of file"));
- cFYI(1, ("Size %ld and blocks %llu",
- (unsigned long) inode->i_size,
- (unsigned long long)inode->i_blocks));
-
- cifs_set_ops(inode, is_dfs_referral);
-cgiiu_exit:
return rc;
}
-static int decode_sfu_inode(struct inode *inode, __u64 size,
- const unsigned char *path,
- struct cifs_sb_info *cifs_sb, int xid)
+static int
+cifs_sfu_type(struct cifs_fattr *fattr, const unsigned char *path,
+ struct cifs_sb_info *cifs_sb, int xid)
{
int rc;
int oplock = 0;
@@ -321,10 +284,15 @@ static int decode_sfu_inode(struct inode *inode, __u64 size,
pbuf = buf;
- if (size == 0) {
- inode->i_mode |= S_IFIFO;
+ fattr->cf_mode &= ~S_IFMT;
+
+ if (fattr->cf_eof == 0) {
+ fattr->cf_mode |= S_IFIFO;
+ fattr->cf_dtype = DT_FIFO;
return 0;
- } else if (size < 8) {
+ } else if (fattr->cf_eof < 8) {
+ fattr->cf_mode |= S_IFREG;
+ fattr->cf_dtype = DT_REG;
return -EINVAL; /* EOPNOTSUPP? */
}
@@ -336,42 +304,46 @@ static int decode_sfu_inode(struct inode *inode, __u64 size,
if (rc == 0) {
int buf_type = CIFS_NO_BUFFER;
/* Read header */
- rc = CIFSSMBRead(xid, pTcon,
- netfid,
+ rc = CIFSSMBRead(xid, pTcon, netfid,
24 /* length */, 0 /* offset */,
&bytes_read, &pbuf, &buf_type);
if ((rc == 0) && (bytes_read >= 8)) {
if (memcmp("IntxBLK", pbuf, 8) == 0) {
cFYI(1, ("Block device"));
- inode->i_mode |= S_IFBLK;
+ fattr->cf_mode |= S_IFBLK;
+ fattr->cf_dtype = DT_BLK;
if (bytes_read == 24) {
/* we have enough to decode dev num */
__u64 mjr; /* major */
__u64 mnr; /* minor */
mjr = le64_to_cpu(*(__le64 *)(pbuf+8));
mnr = le64_to_cpu(*(__le64 *)(pbuf+16));
- inode->i_rdev = MKDEV(mjr, mnr);
+ fattr->cf_rdev = MKDEV(mjr, mnr);
}
} else if (memcmp("IntxCHR", pbuf, 8) == 0) {
cFYI(1, ("Char device"));
- inode->i_mode |= S_IFCHR;
+ fattr->cf_mode |= S_IFCHR;
+ fattr->cf_dtype = DT_CHR;
if (bytes_read == 24) {
/* we have enough to decode dev num */
__u64 mjr; /* major */
__u64 mnr; /* minor */
mjr = le64_to_cpu(*(__le64 *)(pbuf+8));
mnr = le64_to_cpu(*(__le64 *)(pbuf+16));
- inode->i_rdev = MKDEV(mjr, mnr);
+ fattr->cf_rdev = MKDEV(mjr, mnr);
}
} else if (memcmp("IntxLNK", pbuf, 7) == 0) {
cFYI(1, ("Symlink"));
- inode->i_mode |= S_IFLNK;
+ fattr->cf_mode |= S_IFLNK;
+ fattr->cf_dtype = DT_LNK;
} else {
- inode->i_mode |= S_IFREG; /* file? */
+ fattr->cf_mode |= S_IFREG; /* file? */
+ fattr->cf_dtype = DT_REG;
rc = -EOPNOTSUPP;
}
} else {
- inode->i_mode |= S_IFREG; /* then it is a file */
+ fattr->cf_mode |= S_IFREG; /* then it is a file */
+ fattr->cf_dtype = DT_REG;
rc = -EOPNOTSUPP; /* or some unknown SFU type */
}
CIFSSMBClose(xid, pTcon, netfid);
@@ -381,9 +353,13 @@ static int decode_sfu_inode(struct inode *inode, __u64 size,
#define SFBITS_MASK (S_ISVTX | S_ISGID | S_ISUID) /* SETFILEBITS valid bits */
-static int get_sfu_mode(struct inode *inode,
- const unsigned char *path,
- struct cifs_sb_info *cifs_sb, int xid)
+/*
+ * Fetch mode bits as provided by SFU.
+ *
+ * FIXME: Doesn't this clobber the type bit we got from cifs_sfu_type ?
+ */
+static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path,
+ struct cifs_sb_info *cifs_sb, int xid)
{
#ifdef CONFIG_CIFS_XATTR
ssize_t rc;
@@ -391,68 +367,80 @@ static int get_sfu_mode(struct inode *inode,
__u32 mode;
rc = CIFSSMBQueryEA(xid, cifs_sb->tcon, path, "SETFILEBITS",
- ea_value, 4 /* size of buf */, cifs_sb->local_nls,
- cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
+ ea_value, 4 /* size of buf */, cifs_sb->local_nls,
+ cifs_sb->mnt_cifs_flags &
+ CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc < 0)
return (int)rc;
else if (rc > 3) {
mode = le32_to_cpu(*((__le32 *)ea_value));
- inode->i_mode &= ~SFBITS_MASK;
- cFYI(1, ("special bits 0%o org mode 0%o", mode, inode->i_mode));
- inode->i_mode = (mode & SFBITS_MASK) | inode->i_mode;
+ fattr->cf_mode &= ~SFBITS_MASK;
+ cFYI(1, ("special bits 0%o org mode 0%o", mode,
+ fattr->cf_mode));
+ fattr->cf_mode = (mode & SFBITS_MASK) | fattr->cf_mode;
cFYI(1, ("special mode bits 0%o", mode));
- return 0;
- } else {
- return 0;
}
+
+ return 0;
#else
return -EOPNOTSUPP;
#endif
}
-/*
- * Needed to setup inode data for the directory which is the
- * junction to the new submount (ie to setup the fake directory
- * which represents a DFS referral)
- */
-static void fill_fake_finddata(FILE_ALL_INFO *pfnd_dat,
- struct super_block *sb)
+/* Fill a cifs_fattr struct with info from FILE_ALL_INFO */
+static void
+cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
+ struct cifs_sb_info *cifs_sb, bool adjust_tz)
{
- memset(pfnd_dat, 0, sizeof(FILE_ALL_INFO));
-
-/* __le64 pfnd_dat->AllocationSize = cpu_to_le64(0);
- __le64 pfnd_dat->EndOfFile = cpu_to_le64(0);
- __u8 pfnd_dat->DeletePending = 0;
- __u8 pfnd_data->Directory = 0;
- __le32 pfnd_dat->EASize = 0;
- __u64 pfnd_dat->IndexNumber = 0;
- __u64 pfnd_dat->IndexNumber1 = 0; */
- pfnd_dat->CreationTime =
- cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
- pfnd_dat->LastAccessTime =
- cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
- pfnd_dat->LastWriteTime =
- cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
- pfnd_dat->ChangeTime =
- cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
- pfnd_dat->Attributes = cpu_to_le32(ATTR_DIRECTORY);
- pfnd_dat->NumberOfLinks = cpu_to_le32(2);
+ memset(fattr, 0, sizeof(*fattr));
+ fattr->cf_cifsattrs = le32_to_cpu(info->Attributes);
+ if (info->DeletePending)
+ fattr->cf_flags |= CIFS_FATTR_DELETE_PENDING;
+
+ if (info->LastAccessTime)
+ fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime);
+ else
+ fattr->cf_atime = CURRENT_TIME;
+
+ fattr->cf_ctime = cifs_NTtimeToUnix(info->ChangeTime);
+ fattr->cf_mtime = cifs_NTtimeToUnix(info->LastWriteTime);
+
+ if (adjust_tz) {
+ fattr->cf_ctime.tv_sec += cifs_sb->tcon->ses->server->timeAdj;
+ fattr->cf_mtime.tv_sec += cifs_sb->tcon->ses->server->timeAdj;
+ }
+
+ fattr->cf_eof = le64_to_cpu(info->EndOfFile);
+ fattr->cf_bytes = le64_to_cpu(info->AllocationSize);
+
+ if (fattr->cf_cifsattrs & ATTR_DIRECTORY) {
+ fattr->cf_mode = S_IFDIR | cifs_sb->mnt_dir_mode;
+ fattr->cf_dtype = DT_DIR;
+ } else {
+ fattr->cf_mode = S_IFREG | cifs_sb->mnt_file_mode;
+ fattr->cf_dtype = DT_REG;
+
+ /* clear write bits if ATTR_READONLY is set */
+ if (fattr->cf_cifsattrs & ATTR_READONLY)
+ fattr->cf_mode &= ~(S_IWUGO);
+ }
+
+ fattr->cf_nlink = le32_to_cpu(info->NumberOfLinks);
+
+ fattr->cf_uid = cifs_sb->mnt_uid;
+ fattr->cf_gid = cifs_sb->mnt_gid;
}
int cifs_get_inode_info(struct inode **pinode,
const unsigned char *full_path, FILE_ALL_INFO *pfindData,
struct super_block *sb, int xid, const __u16 *pfid)
{
- int rc = 0;
- __u32 attr;
- struct cifsInodeInfo *cifsInfo;
+ int rc = 0, tmprc;
struct cifsTconInfo *pTcon;
- struct inode *inode;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
char *buf = NULL;
bool adjustTZ = false;
- bool is_dfs_referral = false;
- umode_t default_mode;
+ struct cifs_fattr fattr;
pTcon = cifs_sb->tcon;
cFYI(1, ("Getting info on %s", full_path));
@@ -487,163 +475,85 @@ int cifs_get_inode_info(struct inode **pinode,
adjustTZ = true;
}
}
- /* dump_mem("\nQPathInfo return data",&findData, sizeof(findData)); */
- if (rc == -EREMOTE) {
- is_dfs_referral = true;
- fill_fake_finddata(pfindData, sb);
+
+ if (!rc) {
+ cifs_all_info_to_fattr(&fattr, (FILE_ALL_INFO *) pfindData,
+ cifs_sb, adjustTZ);
+ } else if (rc == -EREMOTE) {
+ cifs_create_dfs_fattr(&fattr, sb);
rc = 0;
- } else if (rc)
+ } else {
goto cgii_exit;
+ }
- attr = le32_to_cpu(pfindData->Attributes);
-
- /* get new inode */
+ /*
+ * If an inode wasn't passed in, then get the inode number
+ *
+ * Is an i_ino of zero legal? Can we use that to check if the server
+ * supports returning inode numbers? Are there other sanity checks we
+ * can use to ensure that the server is really filling in that field?
+ *
+ * We can not use the IndexNumber field by default from Windows or
+ * Samba (in ALL_INFO buf) but we can request it explicitly. The SNIA
+ * CIFS spec claims that this value is unique within the scope of a
+ * share, and the windows docs hint that it's actually unique
+ * per-machine.
+ *
+ * There may be higher info levels that work but are there Windows
+ * server or network appliances for which IndexNumber field is not
+ * guaranteed unique?
+ */
if (*pinode == NULL) {
- __u64 inode_num;
- __u64 *pinum = &inode_num;
-
- /* Is an i_ino of zero legal? Can we use that to check
- if the server supports returning inode numbers? Are
- there other sanity checks we can use to ensure that
- the server is really filling in that field? */
-
- /* We can not use the IndexNumber field by default from
- Windows or Samba (in ALL_INFO buf) but we can request
- it explicitly. It may not be unique presumably if
- the server has multiple devices mounted under one share */
-
- /* There may be higher info levels that work but are
- there Windows server or network appliances for which
- IndexNumber field is not guaranteed unique? */
-
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
int rc1 = 0;
rc1 = CIFSGetSrvInodeNumber(xid, pTcon,
- full_path, pinum,
+ full_path, &fattr.cf_uniqueid,
cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc1) {
cFYI(1, ("GetSrvInodeNum rc %d", rc1));
- pinum = NULL;
- /* BB EOPNOSUPP disable SERVER_INUM? */
+ fattr.cf_uniqueid = iunique(sb, ROOT_I);
+ /* disable serverino if call not supported */
+ if (rc1 == -EINVAL)
+ cifs_sb->mnt_cifs_flags &=
+ ~CIFS_MOUNT_SERVER_INUM;
}
} else {
- pinum = NULL;
- }
-
- *pinode = cifs_new_inode(sb, pinum);
- if (*pinode == NULL) {
- rc = -ENOMEM;
- goto cgii_exit;
+ fattr.cf_uniqueid = iunique(sb, ROOT_I);
}
- }
- inode = *pinode;
- cifsInfo = CIFS_I(inode);
- cifsInfo->cifsAttrs = attr;
- cifsInfo->delete_pending = pfindData->DeletePending ? true : false;
- cFYI(1, ("Old time %ld", cifsInfo->time));
- cifsInfo->time = jiffies;
- cFYI(1, ("New time %ld", cifsInfo->time));
-
- /* blksize needs to be multiple of two. So safer to default to
- blksize and blkbits set in superblock so 2**blkbits and blksize
- will match rather than setting to:
- (pTcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) & 0xFFFFFE00;*/
-
- /* Linux can not store file creation time so ignore it */
- if (pfindData->LastAccessTime)
- inode->i_atime = cifs_NTtimeToUnix(pfindData->LastAccessTime);
- else /* do not need to use current_fs_time - time not stored */
- inode->i_atime = CURRENT_TIME;
- inode->i_mtime = cifs_NTtimeToUnix(pfindData->LastWriteTime);
- inode->i_ctime = cifs_NTtimeToUnix(pfindData->ChangeTime);
- cFYI(DBG2, ("Attributes came in as 0x%x", attr));
- if (adjustTZ && (pTcon->ses) && (pTcon->ses->server)) {
- inode->i_ctime.tv_sec += pTcon->ses->server->timeAdj;
- inode->i_mtime.tv_sec += pTcon->ses->server->timeAdj;
- }
-
- /* get default inode mode */
- if (attr & ATTR_DIRECTORY)
- default_mode = cifs_sb->mnt_dir_mode;
- else
- default_mode = cifs_sb->mnt_file_mode;
-
- /* set permission bits */
- if (atomic_read(&cifsInfo->inUse) == 0 ||
- (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM) == 0)
- inode->i_mode = default_mode;
- else {
- /* just reenable write bits if !ATTR_READONLY */
- if ((inode->i_mode & S_IWUGO) == 0 &&
- (attr & ATTR_READONLY) == 0)
- inode->i_mode |= (S_IWUGO & default_mode);
-
- inode->i_mode &= ~S_IFMT;
- }
- /* clear write bits if ATTR_READONLY is set */
- if (attr & ATTR_READONLY)
- inode->i_mode &= ~S_IWUGO;
-
- /* set inode type */
- if ((attr & ATTR_SYSTEM) &&
- (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)) {
- /* no need to fix endianness on 0 */
- if (pfindData->EndOfFile == 0)
- inode->i_mode |= S_IFIFO;
- else if (decode_sfu_inode(inode,
- le64_to_cpu(pfindData->EndOfFile),
- full_path, cifs_sb, xid))
- cFYI(1, ("unknown SFU file type\n"));
} else {
- if (attr & ATTR_DIRECTORY)
- inode->i_mode |= S_IFDIR;
- else
- inode->i_mode |= S_IFREG;
+ fattr.cf_uniqueid = CIFS_I(*pinode)->uniqueid;
}
- cifsInfo->server_eof = le64_to_cpu(pfindData->EndOfFile);
- spin_lock(&inode->i_lock);
- if (is_size_safe_to_change(cifsInfo, cifsInfo->server_eof)) {
- /* can not safely shrink the file size here if the
- client is writing to it due to potential races */
- i_size_write(inode, cifsInfo->server_eof);
-
- /* 512 bytes (2**9) is the fake blocksize that must be
- used for this calculation */
- inode->i_blocks = (512 - 1 + le64_to_cpu(
- pfindData->AllocationSize)) >> 9;
+ /* query for SFU type info if supported and needed */
+ if (fattr.cf_cifsattrs & ATTR_SYSTEM &&
+ cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
+ tmprc = cifs_sfu_type(&fattr, full_path, cifs_sb, xid);
+ if (tmprc)
+ cFYI(1, ("cifs_sfu_type failed: %d", tmprc));
}
- spin_unlock(&inode->i_lock);
- inode->i_nlink = le32_to_cpu(pfindData->NumberOfLinks);
-
- /* BB fill in uid and gid here? with help from winbind?
- or retrieve from NTFS stream extended attribute */
#ifdef CONFIG_CIFS_EXPERIMENTAL
/* fill in 0777 bits from ACL */
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
cFYI(1, ("Getting mode bits from ACL"));
- acl_to_uid_mode(cifs_sb, inode, full_path, pfid);
+ cifs_acl_to_fattr(cifs_sb, &fattr, *pinode, full_path, pfid);
}
#endif
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
- /* fill in remaining high mode bits e.g. SUID, VTX */
- get_sfu_mode(inode, full_path, cifs_sb, xid);
- } else if (atomic_read(&cifsInfo->inUse) == 0) {
- inode->i_uid = cifs_sb->mnt_uid;
- inode->i_gid = cifs_sb->mnt_gid;
- /* set so we do not keep refreshing these fields with
- bad data after user has changed them in memory */
- atomic_set(&cifsInfo->inUse, 1);
- }
-
- cifs_set_ops(inode, is_dfs_referral);
-
+ /* fill in remaining high mode bits e.g. SUID, VTX */
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
+ cifs_sfu_mode(&fattr, full_path, cifs_sb, xid);
+ if (!*pinode) {
+ *pinode = cifs_iget(sb, &fattr);
+ if (!*pinode)
+ rc = -ENOMEM;
+ } else {
+ cifs_fattr_to_inode(*pinode, &fattr);
+ }
cgii_exit:
kfree(buf);
@@ -695,33 +605,78 @@ char *cifs_build_path_to_root(struct cifs_sb_info *cifs_sb)
return full_path;
}
+static int
+cifs_find_inode(struct inode *inode, void *opaque)
+{
+ struct cifs_fattr *fattr = (struct cifs_fattr *) opaque;
+
+ if (CIFS_I(inode)->uniqueid != fattr->cf_uniqueid)
+ return 0;
+
+ return 1;
+}
+
+static int
+cifs_init_inode(struct inode *inode, void *opaque)
+{
+ struct cifs_fattr *fattr = (struct cifs_fattr *) opaque;
+
+ CIFS_I(inode)->uniqueid = fattr->cf_uniqueid;
+ return 0;
+}
+
+/* Given fattrs, get a corresponding inode */
+struct inode *
+cifs_iget(struct super_block *sb, struct cifs_fattr *fattr)
+{
+ unsigned long hash;
+ struct inode *inode;
+
+ cFYI(1, ("looking for uniqueid=%llu", fattr->cf_uniqueid));
+
+ /* hash down to 32-bits on 32-bit arch */
+ hash = cifs_uniqueid_to_ino_t(fattr->cf_uniqueid);
+
+ inode = iget5_locked(sb, hash, cifs_find_inode, cifs_init_inode, fattr);
+
+ /* we have fattrs in hand, update the inode */
+ if (inode) {
+ cifs_fattr_to_inode(inode, fattr);
+ if (sb->s_flags & MS_NOATIME)
+ inode->i_flags |= S_NOATIME | S_NOCMTIME;
+ if (inode->i_state & I_NEW) {
+ inode->i_ino = hash;
+ unlock_new_inode(inode);
+ }
+ }
+
+ return inode;
+}
+
/* gets root inode */
struct inode *cifs_root_iget(struct super_block *sb, unsigned long ino)
{
int xid;
struct cifs_sb_info *cifs_sb;
- struct inode *inode;
+ struct inode *inode = NULL;
long rc;
char *full_path;
- inode = iget_locked(sb, ino);
- if (!inode)
- return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
- return inode;
-
- cifs_sb = CIFS_SB(inode->i_sb);
+ cifs_sb = CIFS_SB(sb);
full_path = cifs_build_path_to_root(cifs_sb);
if (full_path == NULL)
return ERR_PTR(-ENOMEM);
xid = GetXid();
if (cifs_sb->tcon->unix_ext)
- rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
- xid);
+ rc = cifs_get_inode_info_unix(&inode, full_path, sb, xid);
else
- rc = cifs_get_inode_info(&inode, full_path, NULL, inode->i_sb,
+ rc = cifs_get_inode_info(&inode, full_path, NULL, sb,
xid, NULL);
+
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+
if (rc && cifs_sb->tcon->ipc) {
cFYI(1, ("ipc connection - fake read inode"));
inode->i_mode |= S_IFDIR;
@@ -737,7 +692,6 @@ struct inode *cifs_root_iget(struct super_block *sb, unsigned long ino)
return ERR_PTR(rc);
}
- unlock_new_inode(inode);
kfree(full_path);
/* can not call macro FreeXid here since in a void func
@@ -988,8 +942,9 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
* sb->s_vfs_rename_mutex here */
full_path = build_path_from_dentry(dentry);
if (full_path == NULL) {
+ rc = -ENOMEM;
FreeXid(xid);
- return -ENOMEM;
+ return rc;
}
if ((tcon->ses->capabilities & CAP_UNIX) &&
@@ -1062,44 +1017,6 @@ out_reval:
return rc;
}
-void posix_fill_in_inode(struct inode *tmp_inode,
- FILE_UNIX_BASIC_INFO *pData, int isNewInode)
-{
- struct cifsInodeInfo *cifsInfo = CIFS_I(tmp_inode);
- loff_t local_size;
- struct timespec local_mtime;
-
- cifsInfo->time = jiffies;
- atomic_inc(&cifsInfo->inUse);
-
- /* save mtime and size */
- local_mtime = tmp_inode->i_mtime;
- local_size = tmp_inode->i_size;
-
- cifs_unix_info_to_inode(tmp_inode, pData, 1);
- cifs_set_ops(tmp_inode, false);
-
- if (!S_ISREG(tmp_inode->i_mode))
- return;
-
- /*
- * No sense invalidating pages for new inode
- * since we we have not started caching
- * readahead file data yet.
- */
- if (isNewInode)
- return;
-
- if (timespec_equal(&tmp_inode->i_mtime, &local_mtime) &&
- (local_size == tmp_inode->i_size)) {
- cFYI(1, ("inode exists but unchanged"));
- } else {
- /* file may have changed on server */
- cFYI(1, ("invalidate inode, readdir detected change"));
- invalidate_remote_inode(tmp_inode);
- }
-}
-
int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
{
int rc = 0, tmprc;
@@ -1108,6 +1025,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
struct cifsTconInfo *pTcon;
char *full_path = NULL;
struct inode *newinode = NULL;
+ struct cifs_fattr fattr;
cFYI(1, ("In cifs_mkdir, mode = 0x%x inode = 0x%p", mode, inode));
@@ -1118,8 +1036,9 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
+ rc = -ENOMEM;
FreeXid(xid);
- return -ENOMEM;
+ return rc;
}
if ((pTcon->ses->capabilities & CAP_UNIX) &&
@@ -1146,7 +1065,6 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
cFYI(1, ("posix mkdir returned 0x%x", rc));
d_drop(direntry);
} else {
- __u64 unique_id;
if (pInfo->Type == cpu_to_le32(-1)) {
/* no return info, go query for it */
kfree(pInfo);
@@ -1160,20 +1078,15 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
else
direntry->d_op = &cifs_dentry_ops;
- unique_id = le64_to_cpu(pInfo->UniqueId);
- newinode = cifs_new_inode(inode->i_sb, &unique_id);
- if (newinode == NULL) {
+ cifs_unix_basic_to_fattr(&fattr, pInfo, cifs_sb);
+ newinode = cifs_iget(inode->i_sb, &fattr);
+ if (!newinode) {
kfree(pInfo);
goto mkdir_get_info;
}
- newinode->i_nlink = 2;
d_instantiate(direntry, newinode);
- /* we already checked in POSIXCreate whether
- frame was long enough */
- posix_fill_in_inode(direntry->d_inode,
- pInfo, 1 /* NewInode */);
#ifdef CONFIG_CIFS_DEBUG2
cFYI(1, ("instantiated dentry %p %s to inode %p",
direntry, direntry->d_name.name, newinode));
@@ -1236,10 +1149,10 @@ mkdir_get_info:
args.uid = NO_CHANGE_64;
args.gid = NO_CHANGE_64;
}
- CIFSSMBUnixSetInfo(xid, pTcon, full_path, &args,
- cifs_sb->local_nls,
- cifs_sb->mnt_cifs_flags &
- CIFS_MOUNT_MAP_SPECIAL_CHR);
+ CIFSSMBUnixSetPathInfo(xid, pTcon, full_path, &args,
+ cifs_sb->local_nls,
+ cifs_sb->mnt_cifs_flags &
+ CIFS_MOUNT_MAP_SPECIAL_CHR);
} else {
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) &&
(mode & S_IWUGO) == 0) {
@@ -1303,8 +1216,9 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry)
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
+ rc = -ENOMEM;
FreeXid(xid);
- return -ENOMEM;
+ return rc;
}
rc = CIFSSMBRmDir(xid, pTcon, full_path, cifs_sb->local_nls,
@@ -1508,8 +1422,9 @@ int cifs_revalidate(struct dentry *direntry)
since that would deadlock */
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
+ rc = -ENOMEM;
FreeXid(xid);
- return -ENOMEM;
+ return rc;
}
cFYI(1, ("Revalidate: %s inode 0x%p count %d dentry: 0x%p d_time %ld "
"jiffies %ld", full_path, direntry->d_inode,
@@ -1618,6 +1533,7 @@ int cifs_getattr(struct vfsmount *mnt, struct dentry *dentry,
if (!err) {
generic_fillattr(dentry->d_inode, stat);
stat->blksize = CIFS_MAX_MSGSIZE;
+ stat->ino = CIFS_I(dentry->d_inode)->uniqueid;
}
return err;
}
@@ -1782,6 +1698,7 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifsTconInfo *pTcon = cifs_sb->tcon;
struct cifs_unix_set_info_args *args = NULL;
+ struct cifsFileInfo *open_file;
cFYI(1, ("setattr_unix on file %s attrs->ia_valid=0x%x",
direntry->d_name.name, attrs->ia_valid));
@@ -1868,10 +1785,18 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
args->ctime = NO_CHANGE_64;
args->device = 0;
- rc = CIFSSMBUnixSetInfo(xid, pTcon, full_path, args,
- cifs_sb->local_nls,
- cifs_sb->mnt_cifs_flags &
- CIFS_MOUNT_MAP_SPECIAL_CHR);
+ open_file = find_writable_file(cifsInode);
+ if (open_file) {
+ u16 nfid = open_file->netfid;
+ u32 npid = open_file->pid;
+ rc = CIFSSMBUnixSetFileInfo(xid, pTcon, args, nfid, npid);
+ atomic_dec(&open_file->wrtPending);
+ } else {
+ rc = CIFSSMBUnixSetPathInfo(xid, pTcon, full_path, args,
+ cifs_sb->local_nls,
+ cifs_sb->mnt_cifs_flags &
+ CIFS_MOUNT_MAP_SPECIAL_CHR);
+ }
if (!rc)
rc = inode_setattr(inode, attrs);
@@ -1911,8 +1836,9 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
+ rc = -ENOMEM;
FreeXid(xid);
- return -ENOMEM;
+ return rc;
}
/*
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index cd83c53..fc1e048 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -172,8 +172,9 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
+ rc = -ENOMEM;
FreeXid(xid);
- return -ENOMEM;
+ return rc;
}
cFYI(1, ("Full path: %s", full_path));
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index 32d6baa..bd6d689 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -133,10 +133,12 @@ static const struct smb_to_posix_error mapping_table_ERRHRD[] = {
{0, 0}
};
-/* Convert string containing dotted ip address to binary form */
-/* returns 0 if invalid address */
-
-int
+/*
+ * Convert a string containing text IPv4 or IPv6 address to binary form.
+ *
+ * Returns 0 on failure.
+ */
+static int
cifs_inet_pton(const int address_family, const char *cp, void *dst)
{
int ret = 0;
@@ -153,6 +155,52 @@ cifs_inet_pton(const int address_family, const char *cp, void *dst)
return ret;
}
+/*
+ * Try to convert a string to an IPv4 address and then attempt to convert
+ * it to an IPv6 address if that fails. Set the family field if either
+ * succeeds. If it's an IPv6 address and it has a '%' sign in it, try to
+ * treat the part following it as a numeric sin6_scope_id.
+ *
+ * Returns 0 on failure.
+ */
+int
+cifs_convert_address(char *src, void *dst)
+{
+ int rc;
+ char *pct, *endp;
+ struct sockaddr_in *s4 = (struct sockaddr_in *) dst;
+ struct sockaddr_in6 *s6 = (struct sockaddr_in6 *) dst;
+
+ /* IPv4 address */
+ if (cifs_inet_pton(AF_INET, src, &s4->sin_addr.s_addr)) {
+ s4->sin_family = AF_INET;
+ return 1;
+ }
+
+ /* temporarily terminate string */
+ pct = strchr(src, '%');
+ if (pct)
+ *pct = '\0';
+
+ rc = cifs_inet_pton(AF_INET6, src, &s6->sin6_addr.s6_addr);
+
+ /* repair temp termination (if any) and make pct point to scopeid */
+ if (pct)
+ *pct++ = '%';
+
+ if (!rc)
+ return rc;
+
+ s6->sin6_family = AF_INET6;
+ if (pct) {
+ s6->sin6_scope_id = (u32) simple_strtoul(pct, &endp, 0);
+ if (!*pct || *endp)
+ return 0;
+ }
+
+ return rc;
+}
+
/*****************************************************************************
convert a NT status code to a dos class/code
*****************************************************************************/
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 86d0055..f823a4a 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -63,374 +63,123 @@ static inline void dump_cifs_file_struct(struct file *file, char *label)
}
#endif /* DEBUG2 */
-/* Returns 1 if new inode created, 2 if both dentry and inode were */
-/* Might check in the future if inode number changed so we can rehash inode */
-static int
-construct_dentry(struct qstr *qstring, struct file *file,
- struct inode **ptmp_inode, struct dentry **pnew_dentry,
- __u64 *inum)
+/*
+ * Find the dentry that matches "name". If there isn't one, create one. If it's
+ * a negative dentry or the uniqueid changed, then drop it and recreate it.
+ */
+static struct dentry *
+cifs_readdir_lookup(struct dentry *parent, struct qstr *name,
+ struct cifs_fattr *fattr)
{
- struct dentry *tmp_dentry = NULL;
- struct super_block *sb = file->f_path.dentry->d_sb;
- int rc = 0;
+ struct dentry *dentry, *alias;
+ struct inode *inode;
+ struct super_block *sb = parent->d_inode->i_sb;
+
+ cFYI(1, ("For %s", name->name));
+
+ dentry = d_lookup(parent, name);
+ if (dentry) {
+ /* FIXME: check for inode number changes? */
+ if (dentry->d_inode != NULL)
+ return dentry;
+ d_drop(dentry);
+ dput(dentry);
+ }
- cFYI(1, ("For %s", qstring->name));
-
- qstring->hash = full_name_hash(qstring->name, qstring->len);
- tmp_dentry = d_lookup(file->f_path.dentry, qstring);
- if (tmp_dentry) {
- /* BB: overwrite old name? i.e. tmp_dentry->d_name and
- * tmp_dentry->d_name.len??
- */
- cFYI(0, ("existing dentry with inode 0x%p",
- tmp_dentry->d_inode));
- *ptmp_inode = tmp_dentry->d_inode;
- if (*ptmp_inode == NULL) {
- *ptmp_inode = cifs_new_inode(sb, inum);
- if (*ptmp_inode == NULL)
- return rc;
- rc = 1;
- }
- } else {
- tmp_dentry = d_alloc(file->f_path.dentry, qstring);
- if (tmp_dentry == NULL) {
- cERROR(1, ("Failed allocating dentry"));
- *ptmp_inode = NULL;
- return rc;
- }
+ dentry = d_alloc(parent, name);
+ if (dentry == NULL)
+ return NULL;
- if (CIFS_SB(sb)->tcon->nocase)
- tmp_dentry->d_op = &cifs_ci_dentry_ops;
- else
- tmp_dentry->d_op = &cifs_dentry_ops;
+ inode = cifs_iget(sb, fattr);
+ if (!inode) {
+ dput(dentry);
+ return NULL;
+ }
- *ptmp_inode = cifs_new_inode(sb, inum);
- if (*ptmp_inode == NULL)
- return rc;
- rc = 2;
+ if (CIFS_SB(sb)->tcon->nocase)
+ dentry->d_op = &cifs_ci_dentry_ops;
+ else
+ dentry->d_op = &cifs_dentry_ops;
+
+ alias = d_materialise_unique(dentry, inode);
+ if (alias != NULL) {
+ dput(dentry);
+ if (IS_ERR(alias))
+ return NULL;
+ dentry = alias;
}
- tmp_dentry->d_time = jiffies;
- *pnew_dentry = tmp_dentry;
- return rc;
+ return dentry;
}
-static void fill_in_inode(struct inode *tmp_inode, int new_buf_type,
- char *buf, unsigned int *pobject_type, int isNewInode)
+static void
+cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb)
{
- loff_t local_size;
- struct timespec local_mtime;
-
- struct cifsInodeInfo *cifsInfo = CIFS_I(tmp_inode);
- struct cifs_sb_info *cifs_sb = CIFS_SB(tmp_inode->i_sb);
- __u32 attr;
- __u64 allocation_size;
- __u64 end_of_file;
- umode_t default_mode;
-
- /* save mtime and size */
- local_mtime = tmp_inode->i_mtime;
- local_size = tmp_inode->i_size;
-
- if (new_buf_type) {
- FILE_DIRECTORY_INFO *pfindData = (FILE_DIRECTORY_INFO *)buf;
-
- attr = le32_to_cpu(pfindData->ExtFileAttributes);
- allocation_size = le64_to_cpu(pfindData->AllocationSize);
- end_of_file = le64_to_cpu(pfindData->EndOfFile);
- tmp_inode->i_atime =
- cifs_NTtimeToUnix(pfindData->LastAccessTime);
- tmp_inode->i_mtime =
- cifs_NTtimeToUnix(pfindData->LastWriteTime);
- tmp_inode->i_ctime =
- cifs_NTtimeToUnix(pfindData->ChangeTime);
- } else { /* legacy, OS2 and DOS style */
- int offset = cifs_sb->tcon->ses->server->timeAdj;
- FIND_FILE_STANDARD_INFO *pfindData =
- (FIND_FILE_STANDARD_INFO *)buf;
-
- tmp_inode->i_mtime = cnvrtDosUnixTm(pfindData->LastWriteDate,
- pfindData->LastWriteTime,
- offset);
- tmp_inode->i_atime = cnvrtDosUnixTm(pfindData->LastAccessDate,
- pfindData->LastAccessTime,
- offset);
- tmp_inode->i_ctime = cnvrtDosUnixTm(pfindData->LastWriteDate,
- pfindData->LastWriteTime,
- offset);
- attr = le16_to_cpu(pfindData->Attributes);
- allocation_size = le32_to_cpu(pfindData->AllocationSize);
- end_of_file = le32_to_cpu(pfindData->DataSize);
- }
+ fattr->cf_uid = cifs_sb->mnt_uid;
+ fattr->cf_gid = cifs_sb->mnt_gid;
- /* Linux can not store file creation time unfortunately so ignore it */
-
- cifsInfo->cifsAttrs = attr;
-#ifdef CONFIG_CIFS_EXPERIMENTAL
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
- /* get more accurate mode via ACL - so force inode refresh */
- cifsInfo->time = 0;
- } else
-#endif /* CONFIG_CIFS_EXPERIMENTAL */
- cifsInfo->time = jiffies;
-
- /* treat dos attribute of read-only as read-only mode bit e.g. 555? */
- /* 2767 perms - indicate mandatory locking */
- /* BB fill in uid and gid here? with help from winbind?
- or retrieve from NTFS stream extended attribute */
- if (atomic_read(&cifsInfo->inUse) == 0) {
- tmp_inode->i_uid = cifs_sb->mnt_uid;
- tmp_inode->i_gid = cifs_sb->mnt_gid;
- }
-
- if (attr & ATTR_DIRECTORY)
- default_mode = cifs_sb->mnt_dir_mode;
- else
- default_mode = cifs_sb->mnt_file_mode;
-
- /* set initial permissions */
- if ((atomic_read(&cifsInfo->inUse) == 0) ||
- (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM) == 0)
- tmp_inode->i_mode = default_mode;
- else {
- /* just reenable write bits if !ATTR_READONLY */
- if ((tmp_inode->i_mode & S_IWUGO) == 0 &&
- (attr & ATTR_READONLY) == 0)
- tmp_inode->i_mode |= (S_IWUGO & default_mode);
-
- tmp_inode->i_mode &= ~S_IFMT;
+ if (fattr->cf_cifsattrs & ATTR_DIRECTORY) {
+ fattr->cf_mode = S_IFDIR | cifs_sb->mnt_dir_mode;
+ fattr->cf_dtype = DT_DIR;
+ } else {
+ fattr->cf_mode = S_IFREG | cifs_sb->mnt_file_mode;
+ fattr->cf_dtype = DT_REG;
}
- /* clear write bits if ATTR_READONLY is set */
- if (attr & ATTR_READONLY)
- tmp_inode->i_mode &= ~S_IWUGO;
+ if (fattr->cf_cifsattrs & ATTR_READONLY)
+ fattr->cf_mode &= ~S_IWUGO;
- /* set inode type */
- if ((attr & ATTR_SYSTEM) &&
- (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)) {
- if (end_of_file == 0) {
- tmp_inode->i_mode |= S_IFIFO;
- *pobject_type = DT_FIFO;
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL &&
+ fattr->cf_cifsattrs & ATTR_SYSTEM) {
+ if (fattr->cf_eof == 0) {
+ fattr->cf_mode &= ~S_IFMT;
+ fattr->cf_mode |= S_IFIFO;
+ fattr->cf_dtype = DT_FIFO;
} else {
/*
- * trying to get the type can be slow, so just call
- * this a regular file for now, and mark for reval
+ * trying to get the type and mode via SFU can be slow,
+ * so just call those regular files for now, and mark
+ * for reval
*/
- tmp_inode->i_mode |= S_IFREG;
- *pobject_type = DT_REG;
- cifsInfo->time = 0;
- }
- } else {
- if (attr & ATTR_DIRECTORY) {
- tmp_inode->i_mode |= S_IFDIR;
- *pobject_type = DT_DIR;
- } else {
- tmp_inode->i_mode |= S_IFREG;
- *pobject_type = DT_REG;
+ fattr->cf_flags |= CIFS_FATTR_NEED_REVAL;
}
}
+}
- /* can not fill in nlink here as in qpathinfo version and Unx search */
- if (atomic_read(&cifsInfo->inUse) == 0)
- atomic_set(&cifsInfo->inUse, 1);
-
- cifsInfo->server_eof = end_of_file;
- spin_lock(&tmp_inode->i_lock);
- if (is_size_safe_to_change(cifsInfo, end_of_file)) {
- /* can not safely change the file size here if the
- client is writing to it due to potential races */
- i_size_write(tmp_inode, end_of_file);
-
- /* 512 bytes (2**9) is the fake blocksize that must be used */
- /* for this calculation, even though the reported blocksize is larger */
- tmp_inode->i_blocks = (512 - 1 + allocation_size) >> 9;
- }
- spin_unlock(&tmp_inode->i_lock);
-
- if (allocation_size < end_of_file)
- cFYI(1, ("May be sparse file, allocation less than file size"));
- cFYI(1, ("File Size %ld and blocks %llu",
- (unsigned long)tmp_inode->i_size,
- (unsigned long long)tmp_inode->i_blocks));
- if (S_ISREG(tmp_inode->i_mode)) {
- cFYI(1, ("File inode"));
- tmp_inode->i_op = &cifs_file_inode_ops;
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
- tmp_inode->i_fop = &cifs_file_direct_nobrl_ops;
- else
- tmp_inode->i_fop = &cifs_file_direct_ops;
- } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
- tmp_inode->i_fop = &cifs_file_nobrl_ops;
- else
- tmp_inode->i_fop = &cifs_file_ops;
-
- if ((cifs_sb->tcon) && (cifs_sb->tcon->ses) &&
- (cifs_sb->tcon->ses->server->maxBuf <
- PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE))
- tmp_inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
- else
- tmp_inode->i_data.a_ops = &cifs_addr_ops;
-
- if (isNewInode)
- return; /* No sense invalidating pages for new inode
- since have not started caching readahead file
- data yet */
-
- if (timespec_equal(&tmp_inode->i_mtime, &local_mtime) &&
- (local_size == tmp_inode->i_size)) {
- cFYI(1, ("inode exists but unchanged"));
- } else {
- /* file may have changed on server */
- cFYI(1, ("invalidate inode, readdir detected change"));
- invalidate_remote_inode(tmp_inode);
- }
- } else if (S_ISDIR(tmp_inode->i_mode)) {
- cFYI(1, ("Directory inode"));
- tmp_inode->i_op = &cifs_dir_inode_ops;
- tmp_inode->i_fop = &cifs_dir_ops;
- } else if (S_ISLNK(tmp_inode->i_mode)) {
- cFYI(1, ("Symbolic Link inode"));
- tmp_inode->i_op = &cifs_symlink_inode_ops;
- } else {
- cFYI(1, ("Init special inode"));
- init_special_inode(tmp_inode, tmp_inode->i_mode,
- tmp_inode->i_rdev);
- }
+void
+cifs_dir_info_to_fattr(struct cifs_fattr *fattr, FILE_DIRECTORY_INFO *info,
+ struct cifs_sb_info *cifs_sb)
+{
+ memset(fattr, 0, sizeof(*fattr));
+ fattr->cf_cifsattrs = le32_to_cpu(info->ExtFileAttributes);
+ fattr->cf_eof = le64_to_cpu(info->EndOfFile);
+ fattr->cf_bytes = le64_to_cpu(info->AllocationSize);
+ fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime);
+ fattr->cf_ctime = cifs_NTtimeToUnix(info->ChangeTime);
+ fattr->cf_mtime = cifs_NTtimeToUnix(info->LastWriteTime);
+
+ cifs_fill_common_info(fattr, cifs_sb);
}
-static void unix_fill_in_inode(struct inode *tmp_inode,
- FILE_UNIX_INFO *pfindData, unsigned int *pobject_type, int isNewInode)
+void
+cifs_std_info_to_fattr(struct cifs_fattr *fattr, FIND_FILE_STANDARD_INFO *info,
+ struct cifs_sb_info *cifs_sb)
{
- loff_t local_size;
- struct timespec local_mtime;
-
- struct cifsInodeInfo *cifsInfo = CIFS_I(tmp_inode);
- struct cifs_sb_info *cifs_sb = CIFS_SB(tmp_inode->i_sb);
-
- __u32 type = le32_to_cpu(pfindData->Type);
- __u64 num_of_bytes = le64_to_cpu(pfindData->NumOfBytes);
- __u64 end_of_file = le64_to_cpu(pfindData->EndOfFile);
- cifsInfo->time = jiffies;
- atomic_inc(&cifsInfo->inUse);
-
- /* save mtime and size */
- local_mtime = tmp_inode->i_mtime;
- local_size = tmp_inode->i_size;
-
- tmp_inode->i_atime =
- cifs_NTtimeToUnix(pfindData->LastAccessTime);
- tmp_inode->i_mtime =
- cifs_NTtimeToUnix(pfindData->LastModificationTime);
- tmp_inode->i_ctime =
- cifs_NTtimeToUnix(pfindData->LastStatusChange);
-
- tmp_inode->i_mode = le64_to_cpu(pfindData->Permissions);
- /* since we set the inode type below we need to mask off type
- to avoid strange results if bits above were corrupt */
- tmp_inode->i_mode &= ~S_IFMT;
- if (type == UNIX_FILE) {
- *pobject_type = DT_REG;
- tmp_inode->i_mode |= S_IFREG;
- } else if (type == UNIX_SYMLINK) {
- *pobject_type = DT_LNK;
- tmp_inode->i_mode |= S_IFLNK;
- } else if (type == UNIX_DIR) {
- *pobject_type = DT_DIR;
- tmp_inode->i_mode |= S_IFDIR;
- } else if (type == UNIX_CHARDEV) {
- *pobject_type = DT_CHR;
- tmp_inode->i_mode |= S_IFCHR;
- tmp_inode->i_rdev = MKDEV(le64_to_cpu(pfindData->DevMajor),
- le64_to_cpu(pfindData->DevMinor) & MINORMASK);
- } else if (type == UNIX_BLOCKDEV) {
- *pobject_type = DT_BLK;
- tmp_inode->i_mode |= S_IFBLK;
- tmp_inode->i_rdev = MKDEV(le64_to_cpu(pfindData->DevMajor),
- le64_to_cpu(pfindData->DevMinor) & MINORMASK);
- } else if (type == UNIX_FIFO) {
- *pobject_type = DT_FIFO;
- tmp_inode->i_mode |= S_IFIFO;
- } else if (type == UNIX_SOCKET) {
- *pobject_type = DT_SOCK;
- tmp_inode->i_mode |= S_IFSOCK;
- } else {
- /* safest to just call it a file */
- *pobject_type = DT_REG;
- tmp_inode->i_mode |= S_IFREG;
- cFYI(1, ("unknown inode type %d", type));
- }
+ int offset = cifs_sb->tcon->ses->server->timeAdj;
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
- tmp_inode->i_uid = cifs_sb->mnt_uid;
- else
- tmp_inode->i_uid = le64_to_cpu(pfindData->Uid);
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
- tmp_inode->i_gid = cifs_sb->mnt_gid;
- else
- tmp_inode->i_gid = le64_to_cpu(pfindData->Gid);
- tmp_inode->i_nlink = le64_to_cpu(pfindData->Nlinks);
-
- cifsInfo->server_eof = end_of_file;
- spin_lock(&tmp_inode->i_lock);
- if (is_size_safe_to_change(cifsInfo, end_of_file)) {
- /* can not safely change the file size here if the
- client is writing to it due to potential races */
- i_size_write(tmp_inode, end_of_file);
-
- /* 512 bytes (2**9) is the fake blocksize that must be used */
- /* for this calculation, not the real blocksize */
- tmp_inode->i_blocks = (512 - 1 + num_of_bytes) >> 9;
- }
- spin_unlock(&tmp_inode->i_lock);
+ memset(fattr, 0, sizeof(*fattr));
+ fattr->cf_atime = cnvrtDosUnixTm(info->LastAccessDate,
+ info->LastAccessTime, offset);
+ fattr->cf_ctime = cnvrtDosUnixTm(info->LastWriteDate,
+ info->LastWriteTime, offset);
+ fattr->cf_mtime = cnvrtDosUnixTm(info->LastWriteDate,
+ info->LastWriteTime, offset);
- if (S_ISREG(tmp_inode->i_mode)) {
- cFYI(1, ("File inode"));
- tmp_inode->i_op = &cifs_file_inode_ops;
+ fattr->cf_cifsattrs = le16_to_cpu(info->Attributes);
+ fattr->cf_bytes = le32_to_cpu(info->AllocationSize);
+ fattr->cf_eof = le32_to_cpu(info->DataSize);
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
- tmp_inode->i_fop = &cifs_file_direct_nobrl_ops;
- else
- tmp_inode->i_fop = &cifs_file_direct_ops;
- } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
- tmp_inode->i_fop = &cifs_file_nobrl_ops;
- else
- tmp_inode->i_fop = &cifs_file_ops;
-
- if ((cifs_sb->tcon) && (cifs_sb->tcon->ses) &&
- (cifs_sb->tcon->ses->server->maxBuf <
- PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE))
- tmp_inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
- else
- tmp_inode->i_data.a_ops = &cifs_addr_ops;
-
- if (isNewInode)
- return; /* No sense invalidating pages for new inode
- since we have not started caching readahead
- file data for it yet */
-
- if (timespec_equal(&tmp_inode->i_mtime, &local_mtime) &&
- (local_size == tmp_inode->i_size)) {
- cFYI(1, ("inode exists but unchanged"));
- } else {
- /* file may have changed on server */
- cFYI(1, ("invalidate inode, readdir detected change"));
- invalidate_remote_inode(tmp_inode);
- }
- } else if (S_ISDIR(tmp_inode->i_mode)) {
- cFYI(1, ("Directory inode"));
- tmp_inode->i_op = &cifs_dir_inode_ops;
- tmp_inode->i_fop = &cifs_dir_ops;
- } else if (S_ISLNK(tmp_inode->i_mode)) {
- cFYI(1, ("Symbolic Link inode"));
- tmp_inode->i_op = &cifs_symlink_inode_ops;
-/* tmp_inode->i_fop = *//* do not need to set to anything */
- } else {
- cFYI(1, ("Special inode"));
- init_special_inode(tmp_inode, tmp_inode->i_mode,
- tmp_inode->i_rdev);
- }
+ cifs_fill_common_info(fattr, cifs_sb);
}
/* BB eventually need to add the following helper function to
@@ -872,7 +621,7 @@ static int cifs_get_name_from_search_buf(struct qstr *pqst,
len = strnlen(filename, PATH_MAX);
}
- *pinum = le64_to_cpu(pFindData->UniqueId);
+ *pinum = le64_to_cpu(pFindData->basic.UniqueId);
} else if (level == SMB_FIND_FILE_DIRECTORY_INFO) {
FILE_DIRECTORY_INFO *pFindData =
(FILE_DIRECTORY_INFO *)current_entry;
@@ -932,11 +681,12 @@ static int cifs_filldir(char *pfindEntry, struct file *file, filldir_t filldir,
int rc = 0;
struct qstr qstring;
struct cifsFileInfo *pCifsF;
- unsigned int obj_type;
- __u64 inum;
+ u64 inum;
+ ino_t ino;
+ struct super_block *sb;
struct cifs_sb_info *cifs_sb;
- struct inode *tmp_inode;
struct dentry *tmp_dentry;
+ struct cifs_fattr fattr;
/* get filename and len into qstring */
/* get dentry */
@@ -954,60 +704,53 @@ static int cifs_filldir(char *pfindEntry, struct file *file, filldir_t filldir,
if (rc != 0)
return 0;
- cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
+ sb = file->f_path.dentry->d_sb;
+ cifs_sb = CIFS_SB(sb);
qstring.name = scratch_buf;
rc = cifs_get_name_from_search_buf(&qstring, pfindEntry,
pCifsF->srch_inf.info_level,
pCifsF->srch_inf.unicode, cifs_sb,
- max_len,
- &inum /* returned */);
+ max_len, &inum /* returned */);
if (rc)
return rc;
- /* only these two infolevels return valid inode numbers */
- if (pCifsF->srch_inf.info_level == SMB_FIND_FILE_UNIX ||
- pCifsF->srch_inf.info_level == SMB_FIND_FILE_ID_FULL_DIR_INFO)
- rc = construct_dentry(&qstring, file, &tmp_inode, &tmp_dentry,
- &inum);
- else
- rc = construct_dentry(&qstring, file, &tmp_inode, &tmp_dentry,
- NULL);
-
- if ((tmp_inode == NULL) || (tmp_dentry == NULL))
- return -ENOMEM;
-
- /* we pass in rc below, indicating whether it is a new inode,
- so we can figure out whether to invalidate the inode cached
- data if the file has changed */
if (pCifsF->srch_inf.info_level == SMB_FIND_FILE_UNIX)
- unix_fill_in_inode(tmp_inode,
- (FILE_UNIX_INFO *)pfindEntry,
- &obj_type, rc);
+ cifs_unix_basic_to_fattr(&fattr,
+ &((FILE_UNIX_INFO *) pfindEntry)->basic,
+ cifs_sb);
else if (pCifsF->srch_inf.info_level == SMB_FIND_FILE_INFO_STANDARD)
- fill_in_inode(tmp_inode, 0 /* old level 1 buffer type */,
- pfindEntry, &obj_type, rc);
+ cifs_std_info_to_fattr(&fattr, (FIND_FILE_STANDARD_INFO *)
+ pfindEntry, cifs_sb);
else
- fill_in_inode(tmp_inode, 1 /* NT */, pfindEntry, &obj_type, rc);
+ cifs_dir_info_to_fattr(&fattr, (FILE_DIRECTORY_INFO *)
+ pfindEntry, cifs_sb);
- if (rc) /* new inode - needs to be tied to dentry */ {
- d_instantiate(tmp_dentry, tmp_inode);
- if (rc == 2)
- d_rehash(tmp_dentry);
- }
+ /* FIXME: make _to_fattr functions fill this out */
+ if (pCifsF->srch_inf.info_level == SMB_FIND_FILE_ID_FULL_DIR_INFO)
+ fattr.cf_uniqueid = inum;
+ else
+ fattr.cf_uniqueid = iunique(sb, ROOT_I);
+ ino = cifs_uniqueid_to_ino_t(fattr.cf_uniqueid);
+ tmp_dentry = cifs_readdir_lookup(file->f_dentry, &qstring, &fattr);
rc = filldir(direntry, qstring.name, qstring.len, file->f_pos,
- tmp_inode->i_ino, obj_type);
+ ino, fattr.cf_dtype);
+
+ /*
+ * we can not return filldir errors to the caller since they are
+ * "normal" when the stat blocksize is too small - we return remapped
+ * error instead
+ *
+ * FIXME: This looks bogus. filldir returns -EOVERFLOW in the above
+ * case already. Why should we be clobbering other errors from it?
+ */
if (rc) {
cFYI(1, ("filldir rc = %d", rc));
- /* we can not return filldir errors to the caller
- since they are "normal" when the stat blocksize
- is too small - we return remapped error instead */
rc = -EOVERFLOW;
}
-
dput(tmp_dentry);
return rc;
}
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 897a052..7085a62 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -802,7 +802,7 @@ ssetup_ntlmssp_authenticate:
#endif /* CONFIG_CIFS_UPCALL */
} else {
#ifdef CONFIG_CIFS_EXPERIMENTAL
- if ((experimEnabled > 1) && (type == RawNTLMSSP)) {
+ if (type == RawNTLMSSP) {
if ((pSMB->req.hdr.Flags2 & SMBFLG2_UNICODE) == 0) {
cERROR(1, ("NTLMSSP requires Unicode support"));
rc = -ENOSYS;
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index e9527ee..a75afa3 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -64,8 +64,9 @@ int cifs_removexattr(struct dentry *direntry, const char *ea_name)
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
+ rc = -ENOMEM;
FreeXid(xid);
- return -ENOMEM;
+ return rc;
}
if (ea_name == NULL) {
cFYI(1, ("Null xattr names not supported"));
@@ -118,8 +119,9 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
+ rc = -ENOMEM;
FreeXid(xid);
- return -ENOMEM;
+ return rc;
}
/* return dos attributes as pseudo xattr */
/* return alt name if available as pseudo attr */
@@ -225,8 +227,9 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
+ rc = -ENOMEM;
FreeXid(xid);
- return -ENOMEM;
+ return rc;
}
/* return dos attributes as pseudo xattr */
/* return alt name if available as pseudo attr */
@@ -351,8 +354,9 @@ ssize_t cifs_listxattr(struct dentry *direntry, char *data, size_t buf_size)
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
+ rc = -ENOMEM;
FreeXid(xid);
- return -ENOMEM;
+ return rc;
}
/* return dos attributes as pseudo xattr */
/* return alt name if available as pseudo attr */
diff --git a/fs/compat.c b/fs/compat.c
index cdd51a3..94502da 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -32,7 +32,6 @@
#include <linux/smb_mount.h>
#include <linux/ncp_mount.h>
#include <linux/nfs4_mount.h>
-#include <linux/smp_lock.h>
#include <linux/syscalls.h>
#include <linux/ctype.h>
#include <linux/module.h>
@@ -1486,8 +1485,8 @@ int compat_do_execve(char * filename,
if (!bprm)
goto out_files;
- retval = mutex_lock_interruptible(&current->cred_guard_mutex);
- if (retval < 0)
+ retval = -ERESTARTNOINTR;
+ if (mutex_lock_interruptible(&current->cred_guard_mutex))
goto out_free;
current->in_execve = 1;
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 626c748..f91fd51b 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -19,6 +19,7 @@
#include <linux/compiler.h>
#include <linux/sched.h>
#include <linux/smp.h>
+#include <linux/smp_lock.h>
#include <linux/ioctl.h>
#include <linux/if.h>
#include <linux/if_bridge.h>
@@ -1904,6 +1905,7 @@ COMPATIBLE_IOCTL(FIONCLEX)
COMPATIBLE_IOCTL(FIOASYNC)
COMPATIBLE_IOCTL(FIONBIO)
COMPATIBLE_IOCTL(FIONREAD) /* This is also TIOCINQ */
+COMPATIBLE_IOCTL(FS_IOC_FIEMAP)
/* 0x00 */
COMPATIBLE_IOCTL(FIBMAP)
COMPATIBLE_IOCTL(FIGETBSZ)
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 205ec95..eb507c4 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -435,7 +435,7 @@ static int search_rsb(struct dlm_ls *ls, char *name, int len, int b,
static int find_rsb(struct dlm_ls *ls, char *name, int namelen,
unsigned int flags, struct dlm_rsb **r_ret)
{
- struct dlm_rsb *r, *tmp;
+ struct dlm_rsb *r = NULL, *tmp;
uint32_t hash, bucket;
int error = -EINVAL;
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index cdb580a..618a60f 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -902,7 +902,7 @@ static void tcp_connect_to_sock(struct connection *con)
int result = -EHOSTUNREACH;
struct sockaddr_storage saddr, src_addr;
int addr_len;
- struct socket *sock;
+ struct socket *sock = NULL;
if (con->nodeid == 0) {
log_print("attempt to connect sock 0 foiled");
@@ -962,6 +962,8 @@ out_err:
if (con->sock) {
sock_release(con->sock);
con->sock = NULL;
+ } else if (sock) {
+ sock_release(sock);
}
/*
* Some errors are fatal and this list might need adjusting. For other
diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
index 894a32d..16f682e 100644
--- a/fs/dlm/plock.c
+++ b/fs/dlm/plock.c
@@ -353,7 +353,7 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
{
struct dlm_plock_info info;
struct plock_op *op;
- int found = 0;
+ int found = 0, do_callback = 0;
if (count != sizeof(info))
return -EINVAL;
@@ -366,21 +366,24 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
spin_lock(&ops_lock);
list_for_each_entry(op, &recv_list, list) {
- if (op->info.fsid == info.fsid && op->info.number == info.number &&
+ if (op->info.fsid == info.fsid &&
+ op->info.number == info.number &&
op->info.owner == info.owner) {
+ struct plock_xop *xop = (struct plock_xop *)op;
list_del_init(&op->list);
- found = 1;
- op->done = 1;
memcpy(&op->info, &info, sizeof(info));
+ if (xop->callback)
+ do_callback = 1;
+ else
+ op->done = 1;
+ found = 1;
break;
}
}
spin_unlock(&ops_lock);
if (found) {
- struct plock_xop *xop;
- xop = (struct plock_xop *)op;
- if (xop->callback)
+ if (do_callback)
dlm_plock_callback(op);
else
wake_up(&recv_wq);
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index af737bb..259525c9 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -1303,6 +1303,13 @@ parse_tag_3_packet(struct ecryptfs_crypt_stat *crypt_stat,
}
(*new_auth_tok)->session_key.encrypted_key_size =
(body_size - (ECRYPTFS_SALT_SIZE + 5));
+ if ((*new_auth_tok)->session_key.encrypted_key_size
+ > ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES) {
+ printk(KERN_WARNING "Tag 3 packet contains key larger "
+ "than ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES\n");
+ rc = -EINVAL;
+ goto out_free;
+ }
if (unlikely(data[(*packet_size)++] != 0x04)) {
printk(KERN_WARNING "Unknown version number [%d]\n",
data[(*packet_size) - 1]);
@@ -1449,6 +1456,12 @@ parse_tag_11_packet(unsigned char *data, unsigned char *contents,
rc = -EINVAL;
goto out;
}
+ if (unlikely((*tag_11_contents_size) > max_contents_bytes)) {
+ printk(KERN_ERR "Literal data section in tag 11 packet exceeds "
+ "expected size\n");
+ rc = -EINVAL;
+ goto out;
+ }
if (data[(*packet_size)++] != 0x62) {
printk(KERN_WARNING "Unrecognizable packet\n");
rc = -EINVAL;
diff --git a/fs/eventfd.c b/fs/eventfd.c
index 3f0e197..31d12de8 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -14,35 +14,44 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/anon_inodes.h>
-#include <linux/eventfd.h>
#include <linux/syscalls.h>
#include <linux/module.h>
+#include <linux/kref.h>
+#include <linux/eventfd.h>
struct eventfd_ctx {
+ struct kref kref;
wait_queue_head_t wqh;
/*
* Every time that a write(2) is performed on an eventfd, the
* value of the __u64 being written is added to "count" and a
* wakeup is performed on "wqh". A read(2) will return the "count"
* value to userspace, and will reset "count" to zero. The kernel
- * size eventfd_signal() also, adds to the "count" counter and
+ * side eventfd_signal() also, adds to the "count" counter and
* issue a wakeup.
*/
__u64 count;
unsigned int flags;
};
-/*
- * Adds "n" to the eventfd counter "count". Returns "n" in case of
- * success, or a value lower then "n" in case of coutner overflow.
- * This function is supposed to be called by the kernel in paths
- * that do not allow sleeping. In this function we allow the counter
- * to reach the ULLONG_MAX value, and we signal this as overflow
- * condition by returining a POLLERR to poll(2).
+/**
+ * eventfd_signal - Adds @n to the eventfd counter.
+ * @ctx: [in] Pointer to the eventfd context.
+ * @n: [in] Value of the counter to be added to the eventfd internal counter.
+ * The value cannot be negative.
+ *
+ * This function is supposed to be called by the kernel in paths that do not
+ * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
+ * value, and we signal this as overflow condition by returining a POLLERR
+ * to poll(2).
+ *
+ * Returns @n in case of success, a non-negative number lower than @n in case
+ * of overflow, or the following error codes:
+ *
+ * -EINVAL : The value of @n is negative.
*/
-int eventfd_signal(struct file *file, int n)
+int eventfd_signal(struct eventfd_ctx *ctx, int n)
{
- struct eventfd_ctx *ctx = file->private_data;
unsigned long flags;
if (n < 0)
@@ -59,9 +68,45 @@ int eventfd_signal(struct file *file, int n)
}
EXPORT_SYMBOL_GPL(eventfd_signal);
+static void eventfd_free(struct kref *kref)
+{
+ struct eventfd_ctx *ctx = container_of(kref, struct eventfd_ctx, kref);
+
+ kfree(ctx);
+}
+
+/**
+ * eventfd_ctx_get - Acquires a reference to the internal eventfd context.
+ * @ctx: [in] Pointer to the eventfd context.
+ *
+ * Returns: In case of success, returns a pointer to the eventfd context.
+ */
+struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx)
+{
+ kref_get(&ctx->kref);
+ return ctx;
+}
+EXPORT_SYMBOL_GPL(eventfd_ctx_get);
+
+/**
+ * eventfd_ctx_put - Releases a reference to the internal eventfd context.
+ * @ctx: [in] Pointer to eventfd context.
+ *
+ * The eventfd context reference must have been previously acquired either
+ * with eventfd_ctx_get() or eventfd_ctx_fdget()).
+ */
+void eventfd_ctx_put(struct eventfd_ctx *ctx)
+{
+ kref_put(&ctx->kref, eventfd_free);
+}
+EXPORT_SYMBOL_GPL(eventfd_ctx_put);
+
static int eventfd_release(struct inode *inode, struct file *file)
{
- kfree(file->private_data);
+ struct eventfd_ctx *ctx = file->private_data;
+
+ wake_up_poll(&ctx->wqh, POLLHUP);
+ eventfd_ctx_put(ctx);
return 0;
}
@@ -185,6 +230,16 @@ static const struct file_operations eventfd_fops = {
.write = eventfd_write,
};
+/**
+ * eventfd_fget - Acquire a reference of an eventfd file descriptor.
+ * @fd: [in] Eventfd file descriptor.
+ *
+ * Returns a pointer to the eventfd file structure in case of success, or the
+ * following error pointer:
+ *
+ * -EBADF : Invalid @fd file descriptor.
+ * -EINVAL : The @fd file descriptor is not an eventfd file.
+ */
struct file *eventfd_fget(int fd)
{
struct file *file;
@@ -201,6 +256,48 @@ struct file *eventfd_fget(int fd)
}
EXPORT_SYMBOL_GPL(eventfd_fget);
+/**
+ * eventfd_ctx_fdget - Acquires a reference to the internal eventfd context.
+ * @fd: [in] Eventfd file descriptor.
+ *
+ * Returns a pointer to the internal eventfd context, otherwise the error
+ * pointers returned by the following functions:
+ *
+ * eventfd_fget
+ */
+struct eventfd_ctx *eventfd_ctx_fdget(int fd)
+{
+ struct file *file;
+ struct eventfd_ctx *ctx;
+
+ file = eventfd_fget(fd);
+ if (IS_ERR(file))
+ return (struct eventfd_ctx *) file;
+ ctx = eventfd_ctx_get(file->private_data);
+ fput(file);
+
+ return ctx;
+}
+EXPORT_SYMBOL_GPL(eventfd_ctx_fdget);
+
+/**
+ * eventfd_ctx_fileget - Acquires a reference to the internal eventfd context.
+ * @file: [in] Eventfd file pointer.
+ *
+ * Returns a pointer to the internal eventfd context, otherwise the error
+ * pointer:
+ *
+ * -EINVAL : The @fd file descriptor is not an eventfd file.
+ */
+struct eventfd_ctx *eventfd_ctx_fileget(struct file *file)
+{
+ if (file->f_op != &eventfd_fops)
+ return ERR_PTR(-EINVAL);
+
+ return eventfd_ctx_get(file->private_data);
+}
+EXPORT_SYMBOL_GPL(eventfd_ctx_fileget);
+
SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
{
int fd;
@@ -217,6 +314,7 @@ SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
if (!ctx)
return -ENOMEM;
+ kref_init(&ctx->kref);
init_waitqueue_head(&ctx->wqh);
ctx->count = count;
ctx->flags = flags;
diff --git a/fs/exec.c b/fs/exec.c
index e639957..4a8849e 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1277,8 +1277,8 @@ int do_execve(char * filename,
if (!bprm)
goto out_files;
- retval = mutex_lock_interruptible(&current->cred_guard_mutex);
- if (retval < 0)
+ retval = -ERESTARTNOINTR;
+ if (mutex_lock_interruptible(&current->cred_guard_mutex))
goto out_free;
current->in_execve = 1;
diff --git a/fs/exofs/common.h b/fs/exofs/common.h
index 24667ee..c6718e4 100644
--- a/fs/exofs/common.h
+++ b/fs/exofs/common.h
@@ -2,9 +2,7 @@
* common.h - Common definitions for both Kernel and user-mode utilities
*
* Copyright (C) 2005, 2006
- * Avishay Traeger (avishay@gmail.com) (avishay@il.ibm.com)
- * Copyright (C) 2005, 2006
- * International Business Machines
+ * Avishay Traeger (avishay@gmail.com)
* Copyright (C) 2008, 2009
* Boaz Harrosh <bharrosh@panasas.com>
*
diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c
index 65b0c8c..4cfab1c 100644
--- a/fs/exofs/dir.c
+++ b/fs/exofs/dir.c
@@ -1,8 +1,6 @@
/*
* Copyright (C) 2005, 2006
- * Avishay Traeger (avishay@gmail.com) (avishay@il.ibm.com)
- * Copyright (C) 2005, 2006
- * International Business Machines
+ * Avishay Traeger (avishay@gmail.com)
* Copyright (C) 2008, 2009
* Boaz Harrosh <bharrosh@panasas.com>
*
diff --git a/fs/exofs/exofs.h b/fs/exofs/exofs.h
index 0fd4c785..5ec72e0 100644
--- a/fs/exofs/exofs.h
+++ b/fs/exofs/exofs.h
@@ -1,8 +1,6 @@
/*
* Copyright (C) 2005, 2006
- * Avishay Traeger (avishay@gmail.com) (avishay@il.ibm.com)
- * Copyright (C) 2005, 2006
- * International Business Machines
+ * Avishay Traeger (avishay@gmail.com)
* Copyright (C) 2008, 2009
* Boaz Harrosh <bharrosh@panasas.com>
*
@@ -156,6 +154,9 @@ ino_t exofs_parent_ino(struct dentry *child);
int exofs_set_link(struct inode *, struct exofs_dir_entry *, struct page *,
struct inode *);
+/* super.c */
+int exofs_sync_fs(struct super_block *sb, int wait);
+
/*********************
* operation vectors *
*********************/
diff --git a/fs/exofs/file.c b/fs/exofs/file.c
index 6ed7fe4..839b9dc 100644
--- a/fs/exofs/file.c
+++ b/fs/exofs/file.c
@@ -1,8 +1,6 @@
/*
* Copyright (C) 2005, 2006
- * Avishay Traeger (avishay@gmail.com) (avishay@il.ibm.com)
- * Copyright (C) 2005, 2006
- * International Business Machines
+ * Avishay Traeger (avishay@gmail.com)
* Copyright (C) 2008, 2009
* Boaz Harrosh <bharrosh@panasas.com>
*
@@ -47,16 +45,23 @@ static int exofs_file_fsync(struct file *filp, struct dentry *dentry,
{
int ret;
struct address_space *mapping = filp->f_mapping;
+ struct inode *inode = dentry->d_inode;
+ struct super_block *sb;
ret = filemap_write_and_wait(mapping);
if (ret)
return ret;
- /*Note: file_fsync below also calles sync_blockdev, which is a no-op
- * for exofs, but other then that it does sync_inode and
- * sync_superblock which is what we need here.
- */
- return file_fsync(filp, dentry, datasync);
+ /* sync the inode attributes */
+ ret = write_inode_now(inode, 1);
+
+ /* This is a good place to write the sb */
+ /* TODO: Sechedule an sb-sync on create */
+ sb = inode->i_sb;
+ if (sb->s_dirt)
+ exofs_sync_fs(sb, 1);
+
+ return ret;
}
static int exofs_flush(struct file *file, fl_owner_t id)
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index 77d0a29..6c10f74 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -1,8 +1,6 @@
/*
* Copyright (C) 2005, 2006
- * Avishay Traeger (avishay@gmail.com) (avishay@il.ibm.com)
- * Copyright (C) 2005, 2006
- * International Business Machines
+ * Avishay Traeger (avishay@gmail.com)
* Copyright (C) 2008, 2009
* Boaz Harrosh <bharrosh@panasas.com>
*
@@ -295,6 +293,9 @@ static int read_exec(struct page_collect *pcol, bool is_sync)
err:
if (!is_sync)
_unlock_pcol_pages(pcol, ret, READ);
+ else /* Pages unlocked by caller in sync mode only free bio */
+ pcol_free(pcol);
+
kfree(pcol_copy);
if (or)
osd_end_request(or);
diff --git a/fs/exofs/namei.c b/fs/exofs/namei.c
index 77fdd76..b7dd0c2 100644
--- a/fs/exofs/namei.c
+++ b/fs/exofs/namei.c
@@ -1,8 +1,6 @@
/*
* Copyright (C) 2005, 2006
- * Avishay Traeger (avishay@gmail.com) (avishay@il.ibm.com)
- * Copyright (C) 2005, 2006
- * International Business Machines
+ * Avishay Traeger (avishay@gmail.com)
* Copyright (C) 2008, 2009
* Boaz Harrosh <bharrosh@panasas.com>
*
diff --git a/fs/exofs/osd.c b/fs/exofs/osd.c
index b3d2ccb..4372542 100644
--- a/fs/exofs/osd.c
+++ b/fs/exofs/osd.c
@@ -1,8 +1,6 @@
/*
* Copyright (C) 2005, 2006
- * Avishay Traeger (avishay@gmail.com) (avishay@il.ibm.com)
- * Copyright (C) 2005, 2006
- * International Business Machines
+ * Avishay Traeger (avishay@gmail.com)
* Copyright (C) 2008, 2009
* Boaz Harrosh <bharrosh@panasas.com>
*
diff --git a/fs/exofs/super.c b/fs/exofs/super.c
index 8216c5b..5ab10c3 100644
--- a/fs/exofs/super.c
+++ b/fs/exofs/super.c
@@ -1,8 +1,6 @@
/*
* Copyright (C) 2005, 2006
- * Avishay Traeger (avishay@gmail.com) (avishay@il.ibm.com)
- * Copyright (C) 2005, 2006
- * International Business Machines
+ * Avishay Traeger (avishay@gmail.com)
* Copyright (C) 2008, 2009
* Boaz Harrosh <bharrosh@panasas.com>
*
@@ -33,6 +31,7 @@
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include <linux/smp_lock.h>
#include <linux/string.h>
#include <linux/parser.h>
#include <linux/vfs.h>
@@ -200,7 +199,7 @@ static const struct export_operations exofs_export_ops;
/*
* Write the superblock to the OSD
*/
-static int exofs_sync_fs(struct super_block *sb, int wait)
+int exofs_sync_fs(struct super_block *sb, int wait)
{
struct exofs_sb_info *sbi;
struct exofs_fscb *fscb;
diff --git a/fs/exofs/symlink.c b/fs/exofs/symlink.c
index 36e2d7b..4dd687c 100644
--- a/fs/exofs/symlink.c
+++ b/fs/exofs/symlink.c
@@ -1,8 +1,6 @@
/*
* Copyright (C) 2005, 2006
- * Avishay Traeger (avishay@gmail.com) (avishay@il.ibm.com)
- * Copyright (C) 2005, 2006
- * International Business Machines
+ * Avishay Traeger (avishay@gmail.com)
* Copyright (C) 2008, 2009
* Boaz Harrosh <bharrosh@panasas.com>
*
diff --git a/fs/ext2/ioctl.c b/fs/ext2/ioctl.c
index 7cb4bad..e743130 100644
--- a/fs/ext2/ioctl.c
+++ b/fs/ext2/ioctl.c
@@ -13,7 +13,6 @@
#include <linux/sched.h>
#include <linux/compat.h>
#include <linux/mount.h>
-#include <linux/smp_lock.h>
#include <asm/current.h>
#include <asm/uaccess.h>
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index 6524eca..e1dedb0 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -66,8 +66,16 @@ static struct dentry *ext2_lookup(struct inode * dir, struct dentry *dentry, str
inode = NULL;
if (ino) {
inode = ext2_iget(dir->i_sb, ino);
- if (IS_ERR(inode))
- return ERR_CAST(inode);
+ if (unlikely(IS_ERR(inode))) {
+ if (PTR_ERR(inode) == -ESTALE) {
+ ext2_error(dir->i_sb, __func__,
+ "deleted inode referenced: %lu",
+ ino);
+ return ERR_PTR(-EIO);
+ } else {
+ return ERR_CAST(inode);
+ }
+ }
}
return d_splice_alias(inode, dentry);
}
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
index 3d724a9..373fa90 100644
--- a/fs/ext3/dir.c
+++ b/fs/ext3/dir.c
@@ -130,8 +130,7 @@ static int ext3_readdir(struct file * filp,
struct buffer_head *bh = NULL;
map_bh.b_state = 0;
- err = ext3_get_blocks_handle(NULL, inode, blk, 1,
- &map_bh, 0, 0);
+ err = ext3_get_blocks_handle(NULL, inode, blk, 1, &map_bh, 0);
if (err > 0) {
pgoff_t index = map_bh.b_blocknr >>
(PAGE_CACHE_SHIFT - inode->i_blkbits);
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 5f51fed..b49908a 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -788,7 +788,7 @@ err_out:
int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
sector_t iblock, unsigned long maxblocks,
struct buffer_head *bh_result,
- int create, int extend_disksize)
+ int create)
{
int err = -EIO;
int offsets[4];
@@ -911,13 +911,6 @@ int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
if (!err)
err = ext3_splice_branch(handle, inode, iblock,
partial, indirect_blks, count);
- /*
- * i_disksize growing is protected by truncate_mutex. Don't forget to
- * protect it if you're about to implement concurrent
- * ext3_get_block() -bzzz
- */
- if (!err && extend_disksize && inode->i_size > ei->i_disksize)
- ei->i_disksize = inode->i_size;
mutex_unlock(&ei->truncate_mutex);
if (err)
goto cleanup;
@@ -972,7 +965,7 @@ static int ext3_get_block(struct inode *inode, sector_t iblock,
}
ret = ext3_get_blocks_handle(handle, inode, iblock,
- max_blocks, bh_result, create, 0);
+ max_blocks, bh_result, create);
if (ret > 0) {
bh_result->b_size = (ret << inode->i_blkbits);
ret = 0;
@@ -1005,7 +998,7 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode,
dummy.b_blocknr = -1000;
buffer_trace_init(&dummy.b_history);
err = ext3_get_blocks_handle(handle, inode, block, 1,
- &dummy, create, 1);
+ &dummy, create);
/*
* ext3_get_blocks_handle() returns number of blocks
* mapped. 0 in case of a HOLE.
@@ -1193,15 +1186,16 @@ write_begin_failed:
* i_size_read because we hold i_mutex.
*
* Add inode to orphan list in case we crash before truncate
- * finishes.
+ * finishes. Do this only if ext3_can_truncate() agrees so
+ * that orphan processing code is happy.
*/
- if (pos + len > inode->i_size)
+ if (pos + len > inode->i_size && ext3_can_truncate(inode))
ext3_orphan_add(handle, inode);
ext3_journal_stop(handle);
unlock_page(page);
page_cache_release(page);
if (pos + len > inode->i_size)
- vmtruncate(inode, inode->i_size);
+ ext3_truncate(inode);
}
if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
goto retry;
@@ -1287,7 +1281,7 @@ static int ext3_ordered_write_end(struct file *file,
* There may be allocated blocks outside of i_size because
* we failed to copy some data. Prepare for truncate.
*/
- if (pos + len > inode->i_size)
+ if (pos + len > inode->i_size && ext3_can_truncate(inode))
ext3_orphan_add(handle, inode);
ret2 = ext3_journal_stop(handle);
if (!ret)
@@ -1296,7 +1290,7 @@ static int ext3_ordered_write_end(struct file *file,
page_cache_release(page);
if (pos + len > inode->i_size)
- vmtruncate(inode, inode->i_size);
+ ext3_truncate(inode);
return ret ? ret : copied;
}
@@ -1315,14 +1309,14 @@ static int ext3_writeback_write_end(struct file *file,
* There may be allocated blocks outside of i_size because
* we failed to copy some data. Prepare for truncate.
*/
- if (pos + len > inode->i_size)
+ if (pos + len > inode->i_size && ext3_can_truncate(inode))
ext3_orphan_add(handle, inode);
ret = ext3_journal_stop(handle);
unlock_page(page);
page_cache_release(page);
if (pos + len > inode->i_size)
- vmtruncate(inode, inode->i_size);
+ ext3_truncate(inode);
return ret ? ret : copied;
}
@@ -1358,7 +1352,7 @@ static int ext3_journalled_write_end(struct file *file,
* There may be allocated blocks outside of i_size because
* we failed to copy some data. Prepare for truncate.
*/
- if (pos + len > inode->i_size)
+ if (pos + len > inode->i_size && ext3_can_truncate(inode))
ext3_orphan_add(handle, inode);
EXT3_I(inode)->i_state |= EXT3_STATE_JDATA;
if (inode->i_size > EXT3_I(inode)->i_disksize) {
@@ -1375,7 +1369,7 @@ static int ext3_journalled_write_end(struct file *file,
page_cache_release(page);
if (pos + len > inode->i_size)
- vmtruncate(inode, inode->i_size);
+ ext3_truncate(inode);
return ret ? ret : copied;
}
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 0ddf7e5..9714db3 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -93,20 +93,20 @@ typedef unsigned int ext4_group_t;
struct ext4_allocation_request {
/* target inode for block we're allocating */
struct inode *inode;
+ /* how many blocks we want to allocate */
+ unsigned int len;
/* logical block in target inode */
ext4_lblk_t logical;
- /* phys. target (a hint) */
- ext4_fsblk_t goal;
/* the closest logical allocated block to the left */
ext4_lblk_t lleft;
- /* phys. block for ^^^ */
- ext4_fsblk_t pleft;
/* the closest logical allocated block to the right */
ext4_lblk_t lright;
- /* phys. block for ^^^ */
+ /* phys. target (a hint) */
+ ext4_fsblk_t goal;
+ /* phys. block for the closest logical allocated block to the left */
+ ext4_fsblk_t pleft;
+ /* phys. block for the closest logical allocated block to the right */
ext4_fsblk_t pright;
- /* how many blocks we want to allocate */
- unsigned int len;
/* flags. see above EXT4_MB_HINT_* */
unsigned int flags;
};
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index ad13a84..eb27fd0 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -43,6 +43,8 @@ int __ext4_journal_forget(const char *where, handle_t *handle,
ext4_journal_abort_handle(where, __func__, bh,
handle, err);
}
+ else
+ brelse(bh);
return err;
}
@@ -57,6 +59,8 @@ int __ext4_journal_revoke(const char *where, handle_t *handle,
ext4_journal_abort_handle(where, __func__, bh,
handle, err);
}
+ else
+ brelse(bh);
return err;
}
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index be2f426..139fb8c 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -131,9 +131,11 @@ int __ext4_journal_get_undo_access(const char *where, handle_t *handle,
int __ext4_journal_get_write_access(const char *where, handle_t *handle,
struct buffer_head *bh);
+/* When called with an invalid handle, this will still do a put on the BH */
int __ext4_journal_forget(const char *where, handle_t *handle,
struct buffer_head *bh);
+/* When called with an invalid handle, this will still do a put on the BH */
int __ext4_journal_revoke(const char *where, handle_t *handle,
ext4_fsblk_t blocknr, struct buffer_head *bh);
@@ -281,10 +283,10 @@ static inline int ext4_should_order_data(struct inode *inode)
static inline int ext4_should_writeback_data(struct inode *inode)
{
- if (EXT4_JOURNAL(inode) == NULL)
- return 0;
if (!S_ISREG(inode->i_mode))
return 0;
+ if (EXT4_JOURNAL(inode) == NULL)
+ return 1;
if (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL)
return 0;
if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 50322a0..73ebfb4 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -1977,6 +1977,7 @@ int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
*/
/* 1 bitmap, 1 block group descriptor */
ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
+ return ret;
}
}
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 2f64573..29e6dc7 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -833,7 +833,7 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode,
if (!goal)
goal = sbi->s_inode_goal;
- if (goal && goal < le32_to_cpu(sbi->s_es->s_inodes_count)) {
+ if (goal && goal <= le32_to_cpu(sbi->s_es->s_inodes_count)) {
group = (goal - 1) / EXT4_INODES_PER_GROUP(sb);
ino = (goal - 1) % EXT4_INODES_PER_GROUP(sb);
ret2 = 0;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 60a26f3..f9c642b 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -78,16 +78,14 @@ static int ext4_inode_is_fast_symlink(struct inode *inode)
* but there may still be a record of it in the journal, and that record
* still needs to be revoked.
*
- * If the handle isn't valid we're not journaling so there's nothing to do.
+ * If the handle isn't valid we're not journaling, but we still need to
+ * call into ext4_journal_revoke() to put the buffer head.
*/
int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
struct buffer_head *bh, ext4_fsblk_t blocknr)
{
int err;
- if (!ext4_handle_valid(handle))
- return 0;
-
might_sleep();
BUFFER_TRACE(bh, "enter");
@@ -1513,14 +1511,14 @@ retry:
* Add inode to orphan list in case we crash before
* truncate finishes
*/
- if (pos + len > inode->i_size)
+ if (pos + len > inode->i_size && ext4_can_truncate(inode))
ext4_orphan_add(handle, inode);
ext4_journal_stop(handle);
if (pos + len > inode->i_size) {
- vmtruncate(inode, inode->i_size);
+ ext4_truncate(inode);
/*
- * If vmtruncate failed early the inode might
+ * If truncate failed early the inode might
* still be on the orphan list; we need to
* make sure the inode is removed from the
* orphan list in that case.
@@ -1614,7 +1612,7 @@ static int ext4_ordered_write_end(struct file *file,
ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
page, fsdata);
copied = ret2;
- if (pos + len > inode->i_size)
+ if (pos + len > inode->i_size && ext4_can_truncate(inode))
/* if we have allocated more blocks and copied
* less. We will have blocks allocated outside
* inode->i_size. So truncate them
@@ -1628,9 +1626,9 @@ static int ext4_ordered_write_end(struct file *file,
ret = ret2;
if (pos + len > inode->i_size) {
- vmtruncate(inode, inode->i_size);
+ ext4_truncate(inode);
/*
- * If vmtruncate failed early the inode might still be
+ * If truncate failed early the inode might still be
* on the orphan list; we need to make sure the inode
* is removed from the orphan list in that case.
*/
@@ -1655,7 +1653,7 @@ static int ext4_writeback_write_end(struct file *file,
ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
page, fsdata);
copied = ret2;
- if (pos + len > inode->i_size)
+ if (pos + len > inode->i_size && ext4_can_truncate(inode))
/* if we have allocated more blocks and copied
* less. We will have blocks allocated outside
* inode->i_size. So truncate them
@@ -1670,9 +1668,9 @@ static int ext4_writeback_write_end(struct file *file,
ret = ret2;
if (pos + len > inode->i_size) {
- vmtruncate(inode, inode->i_size);
+ ext4_truncate(inode);
/*
- * If vmtruncate failed early the inode might still be
+ * If truncate failed early the inode might still be
* on the orphan list; we need to make sure the inode
* is removed from the orphan list in that case.
*/
@@ -1722,7 +1720,7 @@ static int ext4_journalled_write_end(struct file *file,
unlock_page(page);
page_cache_release(page);
- if (pos + len > inode->i_size)
+ if (pos + len > inode->i_size && ext4_can_truncate(inode))
/* if we have allocated more blocks and copied
* less. We will have blocks allocated outside
* inode->i_size. So truncate them
@@ -1733,9 +1731,9 @@ static int ext4_journalled_write_end(struct file *file,
if (!ret)
ret = ret2;
if (pos + len > inode->i_size) {
- vmtruncate(inode, inode->i_size);
+ ext4_truncate(inode);
/*
- * If vmtruncate failed early the inode might still be
+ * If truncate failed early the inode might still be
* on the orphan list; we need to make sure the inode
* is removed from the orphan list in that case.
*/
@@ -2305,15 +2303,9 @@ flush_it:
return;
}
-static int ext4_bh_unmapped_or_delay(handle_t *handle, struct buffer_head *bh)
+static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
{
- /*
- * unmapped buffer is possible for holes.
- * delay buffer is possible with delayed allocation.
- * We also need to consider unwritten buffer as unmapped.
- */
- return (!buffer_mapped(bh) || buffer_delay(bh) ||
- buffer_unwritten(bh)) && buffer_dirty(bh);
+ return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
}
/*
@@ -2398,9 +2390,9 @@ static int __mpage_da_writepage(struct page *page,
* We need to try to allocate
* unmapped blocks in the same page.
* Otherwise we won't make progress
- * with the page in ext4_da_writepage
+ * with the page in ext4_writepage
*/
- if (ext4_bh_unmapped_or_delay(NULL, bh)) {
+ if (ext4_bh_delay_or_unwritten(NULL, bh)) {
mpage_add_bh_to_extent(mpd, logical,
bh->b_size,
bh->b_state);
@@ -2517,7 +2509,6 @@ static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
* so call get_block_wrap with create = 0
*/
ret = ext4_get_blocks(NULL, inode, iblock, max_blocks, bh_result, 0);
- BUG_ON(create && ret == 0);
if (ret > 0) {
bh_result->b_size = (ret << inode->i_blkbits);
ret = 0;
@@ -2525,15 +2516,102 @@ static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
return ret;
}
+static int bget_one(handle_t *handle, struct buffer_head *bh)
+{
+ get_bh(bh);
+ return 0;
+}
+
+static int bput_one(handle_t *handle, struct buffer_head *bh)
+{
+ put_bh(bh);
+ return 0;
+}
+
+static int __ext4_journalled_writepage(struct page *page,
+ struct writeback_control *wbc,
+ unsigned int len)
+{
+ struct address_space *mapping = page->mapping;
+ struct inode *inode = mapping->host;
+ struct buffer_head *page_bufs;
+ handle_t *handle = NULL;
+ int ret = 0;
+ int err;
+
+ page_bufs = page_buffers(page);
+ BUG_ON(!page_bufs);
+ walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one);
+ /* As soon as we unlock the page, it can go away, but we have
+ * references to buffers so we are safe */
+ unlock_page(page);
+
+ handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ goto out;
+ }
+
+ ret = walk_page_buffers(handle, page_bufs, 0, len, NULL,
+ do_journal_get_write_access);
+
+ err = walk_page_buffers(handle, page_bufs, 0, len, NULL,
+ write_end_fn);
+ if (ret == 0)
+ ret = err;
+ err = ext4_journal_stop(handle);
+ if (!ret)
+ ret = err;
+
+ walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one);
+ EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
+out:
+ return ret;
+}
+
/*
+ * Note that we don't need to start a transaction unless we're journaling data
+ * because we should have holes filled from ext4_page_mkwrite(). We even don't
+ * need to file the inode to the transaction's list in ordered mode because if
+ * we are writing back data added by write(), the inode is already there and if
+ * we are writing back data modified via mmap(), noone guarantees in which
+ * transaction the data will hit the disk. In case we are journaling data, we
+ * cannot start transaction directly because transaction start ranks above page
+ * lock so we have to do some magic.
+ *
* This function can get called via...
* - ext4_da_writepages after taking page lock (have journal handle)
* - journal_submit_inode_data_buffers (no journal handle)
* - shrink_page_list via pdflush (no journal handle)
* - grab_page_cache when doing write_begin (have journal handle)
+ *
+ * We don't do any block allocation in this function. If we have page with
+ * multiple blocks we need to write those buffer_heads that are mapped. This
+ * is important for mmaped based write. So if we do with blocksize 1K
+ * truncate(f, 1024);
+ * a = mmap(f, 0, 4096);
+ * a[0] = 'a';
+ * truncate(f, 4096);
+ * we have in the page first buffer_head mapped via page_mkwrite call back
+ * but other bufer_heads would be unmapped but dirty(dirty done via the
+ * do_wp_page). So writepage should write the first block. If we modify
+ * the mmap area beyond 1024 we will again get a page_fault and the
+ * page_mkwrite callback will do the block allocation and mark the
+ * buffer_heads mapped.
+ *
+ * We redirty the page if we have any buffer_heads that is either delay or
+ * unwritten in the page.
+ *
+ * We can get recursively called as show below.
+ *
+ * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
+ * ext4_writepage()
+ *
+ * But since we don't do any block allocation we should not deadlock.
+ * Page also have the dirty flag cleared so we don't get recurive page_lock.
*/
-static int ext4_da_writepage(struct page *page,
- struct writeback_control *wbc)
+static int ext4_writepage(struct page *page,
+ struct writeback_control *wbc)
{
int ret = 0;
loff_t size;
@@ -2541,7 +2619,7 @@ static int ext4_da_writepage(struct page *page,
struct buffer_head *page_bufs;
struct inode *inode = page->mapping->host;
- trace_ext4_da_writepage(inode, page);
+ trace_ext4_writepage(inode, page);
size = i_size_read(inode);
if (page->index == size >> PAGE_CACHE_SHIFT)
len = size & ~PAGE_CACHE_MASK;
@@ -2551,7 +2629,7 @@ static int ext4_da_writepage(struct page *page,
if (page_has_buffers(page)) {
page_bufs = page_buffers(page);
if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
- ext4_bh_unmapped_or_delay)) {
+ ext4_bh_delay_or_unwritten)) {
/*
* We don't want to do block allocation
* So redirty the page and return
@@ -2578,13 +2656,13 @@ static int ext4_da_writepage(struct page *page,
* all are mapped and non delay. We don't want to
* do block allocation here.
*/
- ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
+ ret = block_prepare_write(page, 0, len,
noalloc_get_block_write);
if (!ret) {
page_bufs = page_buffers(page);
/* check whether all are mapped and non delay */
if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
- ext4_bh_unmapped_or_delay)) {
+ ext4_bh_delay_or_unwritten)) {
redirty_page_for_writepage(wbc, page);
unlock_page(page);
return 0;
@@ -2600,7 +2678,16 @@ static int ext4_da_writepage(struct page *page,
return 0;
}
/* now mark the buffer_heads as dirty and uptodate */
- block_commit_write(page, 0, PAGE_CACHE_SIZE);
+ block_commit_write(page, 0, len);
+ }
+
+ if (PageChecked(page) && ext4_should_journal_data(inode)) {
+ /*
+ * It's mmapped pagecache. Add buffers and journal it. There
+ * doesn't seem much point in redirtying the page here.
+ */
+ ClearPageChecked(page);
+ return __ext4_journalled_writepage(page, wbc, len);
}
if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
@@ -2907,7 +2994,7 @@ retry:
* i_size_read because we hold i_mutex.
*/
if (pos + len > inode->i_size)
- vmtruncate(inode, inode->i_size);
+ ext4_truncate(inode);
}
if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
@@ -3130,222 +3217,6 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
return generic_block_bmap(mapping, block, ext4_get_block);
}
-static int bget_one(handle_t *handle, struct buffer_head *bh)
-{
- get_bh(bh);
- return 0;
-}
-
-static int bput_one(handle_t *handle, struct buffer_head *bh)
-{
- put_bh(bh);
- return 0;
-}
-
-/*
- * Note that we don't need to start a transaction unless we're journaling data
- * because we should have holes filled from ext4_page_mkwrite(). We even don't
- * need to file the inode to the transaction's list in ordered mode because if
- * we are writing back data added by write(), the inode is already there and if
- * we are writing back data modified via mmap(), noone guarantees in which
- * transaction the data will hit the disk. In case we are journaling data, we
- * cannot start transaction directly because transaction start ranks above page
- * lock so we have to do some magic.
- *
- * In all journaling modes block_write_full_page() will start the I/O.
- *
- * Problem:
- *
- * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
- * ext4_writepage()
- *
- * Similar for:
- *
- * ext4_file_write() -> generic_file_write() -> __alloc_pages() -> ...
- *
- * Same applies to ext4_get_block(). We will deadlock on various things like
- * lock_journal and i_data_sem
- *
- * Setting PF_MEMALLOC here doesn't work - too many internal memory
- * allocations fail.
- *
- * 16May01: If we're reentered then journal_current_handle() will be
- * non-zero. We simply *return*.
- *
- * 1 July 2001: @@@ FIXME:
- * In journalled data mode, a data buffer may be metadata against the
- * current transaction. But the same file is part of a shared mapping
- * and someone does a writepage() on it.
- *
- * We will move the buffer onto the async_data list, but *after* it has
- * been dirtied. So there's a small window where we have dirty data on
- * BJ_Metadata.
- *
- * Note that this only applies to the last partial page in the file. The
- * bit which block_write_full_page() uses prepare/commit for. (That's
- * broken code anyway: it's wrong for msync()).
- *
- * It's a rare case: affects the final partial page, for journalled data
- * where the file is subject to bith write() and writepage() in the same
- * transction. To fix it we'll need a custom block_write_full_page().
- * We'll probably need that anyway for journalling writepage() output.
- *
- * We don't honour synchronous mounts for writepage(). That would be
- * disastrous. Any write() or metadata operation will sync the fs for
- * us.
- *
- */
-static int __ext4_normal_writepage(struct page *page,
- struct writeback_control *wbc)
-{
- struct inode *inode = page->mapping->host;
-
- if (test_opt(inode->i_sb, NOBH))
- return nobh_writepage(page, noalloc_get_block_write, wbc);
- else
- return block_write_full_page(page, noalloc_get_block_write,
- wbc);
-}
-
-static int ext4_normal_writepage(struct page *page,
- struct writeback_control *wbc)
-{
- struct inode *inode = page->mapping->host;
- loff_t size = i_size_read(inode);
- loff_t len;
-
- trace_ext4_normal_writepage(inode, page);
- J_ASSERT(PageLocked(page));
- if (page->index == size >> PAGE_CACHE_SHIFT)
- len = size & ~PAGE_CACHE_MASK;
- else
- len = PAGE_CACHE_SIZE;
-
- if (page_has_buffers(page)) {
- /* if page has buffers it should all be mapped
- * and allocated. If there are not buffers attached
- * to the page we know the page is dirty but it lost
- * buffers. That means that at some moment in time
- * after write_begin() / write_end() has been called
- * all buffers have been clean and thus they must have been
- * written at least once. So they are all mapped and we can
- * happily proceed with mapping them and writing the page.
- */
- BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
- ext4_bh_unmapped_or_delay));
- }
-
- if (!ext4_journal_current_handle())
- return __ext4_normal_writepage(page, wbc);
-
- redirty_page_for_writepage(wbc, page);
- unlock_page(page);
- return 0;
-}
-
-static int __ext4_journalled_writepage(struct page *page,
- struct writeback_control *wbc)
-{
- struct address_space *mapping = page->mapping;
- struct inode *inode = mapping->host;
- struct buffer_head *page_bufs;
- handle_t *handle = NULL;
- int ret = 0;
- int err;
-
- ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
- noalloc_get_block_write);
- if (ret != 0)
- goto out_unlock;
-
- page_bufs = page_buffers(page);
- walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE, NULL,
- bget_one);
- /* As soon as we unlock the page, it can go away, but we have
- * references to buffers so we are safe */
- unlock_page(page);
-
- handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
- if (IS_ERR(handle)) {
- ret = PTR_ERR(handle);
- goto out;
- }
-
- ret = walk_page_buffers(handle, page_bufs, 0,
- PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
-
- err = walk_page_buffers(handle, page_bufs, 0,
- PAGE_CACHE_SIZE, NULL, write_end_fn);
- if (ret == 0)
- ret = err;
- err = ext4_journal_stop(handle);
- if (!ret)
- ret = err;
-
- walk_page_buffers(handle, page_bufs, 0,
- PAGE_CACHE_SIZE, NULL, bput_one);
- EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
- goto out;
-
-out_unlock:
- unlock_page(page);
-out:
- return ret;
-}
-
-static int ext4_journalled_writepage(struct page *page,
- struct writeback_control *wbc)
-{
- struct inode *inode = page->mapping->host;
- loff_t size = i_size_read(inode);
- loff_t len;
-
- trace_ext4_journalled_writepage(inode, page);
- J_ASSERT(PageLocked(page));
- if (page->index == size >> PAGE_CACHE_SHIFT)
- len = size & ~PAGE_CACHE_MASK;
- else
- len = PAGE_CACHE_SIZE;
-
- if (page_has_buffers(page)) {
- /* if page has buffers it should all be mapped
- * and allocated. If there are not buffers attached
- * to the page we know the page is dirty but it lost
- * buffers. That means that at some moment in time
- * after write_begin() / write_end() has been called
- * all buffers have been clean and thus they must have been
- * written at least once. So they are all mapped and we can
- * happily proceed with mapping them and writing the page.
- */
- BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
- ext4_bh_unmapped_or_delay));
- }
-
- if (ext4_journal_current_handle())
- goto no_write;
-
- if (PageChecked(page)) {
- /*
- * It's mmapped pagecache. Add buffers and journal it. There
- * doesn't seem much point in redirtying the page here.
- */
- ClearPageChecked(page);
- return __ext4_journalled_writepage(page, wbc);
- } else {
- /*
- * It may be a page full of checkpoint-mode buffers. We don't
- * really know unless we go poke around in the buffer_heads.
- * But block_write_full_page will do the right thing.
- */
- return block_write_full_page(page, noalloc_get_block_write,
- wbc);
- }
-no_write:
- redirty_page_for_writepage(wbc, page);
- unlock_page(page);
- return 0;
-}
-
static int ext4_readpage(struct file *file, struct page *page)
{
return mpage_readpage(page, ext4_get_block);
@@ -3492,7 +3363,7 @@ static int ext4_journalled_set_page_dirty(struct page *page)
static const struct address_space_operations ext4_ordered_aops = {
.readpage = ext4_readpage,
.readpages = ext4_readpages,
- .writepage = ext4_normal_writepage,
+ .writepage = ext4_writepage,
.sync_page = block_sync_page,
.write_begin = ext4_write_begin,
.write_end = ext4_ordered_write_end,
@@ -3507,7 +3378,7 @@ static const struct address_space_operations ext4_ordered_aops = {
static const struct address_space_operations ext4_writeback_aops = {
.readpage = ext4_readpage,
.readpages = ext4_readpages,
- .writepage = ext4_normal_writepage,
+ .writepage = ext4_writepage,
.sync_page = block_sync_page,
.write_begin = ext4_write_begin,
.write_end = ext4_writeback_write_end,
@@ -3522,7 +3393,7 @@ static const struct address_space_operations ext4_writeback_aops = {
static const struct address_space_operations ext4_journalled_aops = {
.readpage = ext4_readpage,
.readpages = ext4_readpages,
- .writepage = ext4_journalled_writepage,
+ .writepage = ext4_writepage,
.sync_page = block_sync_page,
.write_begin = ext4_write_begin,
.write_end = ext4_journalled_write_end,
@@ -3536,7 +3407,7 @@ static const struct address_space_operations ext4_journalled_aops = {
static const struct address_space_operations ext4_da_aops = {
.readpage = ext4_readpage,
.readpages = ext4_readpages,
- .writepage = ext4_da_writepage,
+ .writepage = ext4_writepage,
.writepages = ext4_da_writepages,
.sync_page = block_sync_page,
.write_begin = ext4_da_write_begin,
@@ -3583,7 +3454,8 @@ int ext4_block_truncate_page(handle_t *handle,
struct page *page;
int err = 0;
- page = grab_cache_page(mapping, from >> PAGE_CACHE_SHIFT);
+ page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
+ mapping_gfp_mask(mapping) & ~__GFP_FS);
if (!page)
return -EINVAL;
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index bb415408..7050a9c 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -12,7 +12,6 @@
#include <linux/capability.h>
#include <linux/time.h>
#include <linux/compat.h>
-#include <linux/smp_lock.h>
#include <linux/mount.h>
#include <linux/file.h>
#include <asm/uaccess.h>
@@ -192,7 +191,7 @@ setversion_out:
case EXT4_IOC_GROUP_EXTEND: {
ext4_fsblk_t n_blocks_count;
struct super_block *sb = inode->i_sb;
- int err, err2;
+ int err, err2=0;
if (!capable(CAP_SYS_RESOURCE))
return -EPERM;
@@ -205,9 +204,11 @@ setversion_out:
return err;
err = ext4_group_extend(sb, EXT4_SB(sb)->s_es, n_blocks_count);
- jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
- err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
- jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
+ if (EXT4_SB(sb)->s_journal) {
+ jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
+ err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
+ jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
+ }
if (err == 0)
err = err2;
mnt_drop_write(filp->f_path.mnt);
@@ -252,7 +253,7 @@ setversion_out:
case EXT4_IOC_GROUP_ADD: {
struct ext4_new_group_data input;
struct super_block *sb = inode->i_sb;
- int err, err2;
+ int err, err2=0;
if (!capable(CAP_SYS_RESOURCE))
return -EPERM;
@@ -266,9 +267,11 @@ setversion_out:
return err;
err = ext4_group_add(sb, &input);
- jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
- err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
- jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
+ if (EXT4_SB(sb)->s_journal) {
+ jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
+ err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
+ jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
+ }
if (err == 0)
err = err2;
mnt_drop_write(filp->f_path.mnt);
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 519a0a6..cd25846 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -657,7 +657,8 @@ static void ext4_mb_mark_free_simple(struct super_block *sb,
}
}
-static void ext4_mb_generate_buddy(struct super_block *sb,
+static noinline_for_stack
+void ext4_mb_generate_buddy(struct super_block *sb,
void *buddy, void *bitmap, ext4_group_t group)
{
struct ext4_group_info *grp = ext4_get_group_info(sb, group);
@@ -1480,7 +1481,8 @@ static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
ext4_mb_check_limits(ac, e4b, 0);
}
-static int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
+static noinline_for_stack
+int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
struct ext4_buddy *e4b)
{
struct ext4_free_extent ex = ac->ac_b_ex;
@@ -1507,7 +1509,8 @@ static int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
return 0;
}
-static int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
+static noinline_for_stack
+int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
struct ext4_buddy *e4b)
{
ext4_group_t group = ac->ac_g_ex.fe_group;
@@ -1566,7 +1569,8 @@ static int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
* The routine scans buddy structures (not bitmap!) from given order
* to max order and tries to find big enough chunk to satisfy the req
*/
-static void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
+static noinline_for_stack
+void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
struct ext4_buddy *e4b)
{
struct super_block *sb = ac->ac_sb;
@@ -1609,7 +1613,8 @@ static void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
* In order to optimize scanning, caller must pass number of
* free blocks in the group, so the routine can know upper limit.
*/
-static void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
+static noinline_for_stack
+void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
struct ext4_buddy *e4b)
{
struct super_block *sb = ac->ac_sb;
@@ -1668,7 +1673,8 @@ static void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
* we try to find stripe-aligned chunks for stripe-size requests
* XXX should do so at least for multiples of stripe size as well
*/
-static void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
+static noinline_for_stack
+void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
struct ext4_buddy *e4b)
{
struct super_block *sb = ac->ac_sb;
@@ -1831,7 +1837,8 @@ void ext4_mb_put_buddy_cache_lock(struct super_block *sb,
}
-static int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
+static noinline_for_stack
+int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
{
int ret;
@@ -2902,7 +2909,11 @@ int __init init_ext4_mballoc(void)
void exit_ext4_mballoc(void)
{
- /* XXX: synchronize_rcu(); */
+ /*
+ * Wait for completion of call_rcu()'s on ext4_pspace_cachep
+ * before destroying the slab cache.
+ */
+ rcu_barrier();
kmem_cache_destroy(ext4_pspace_cachep);
kmem_cache_destroy(ext4_ac_cachep);
kmem_cache_destroy(ext4_free_ext_cachep);
@@ -3457,7 +3468,8 @@ static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
* used in in-core bitmap. buddy must be generated from this bitmap
* Need to be called with ext4 group lock held
*/
-static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
+static noinline_for_stack
+void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
ext4_group_t group)
{
struct ext4_group_info *grp = ext4_get_group_info(sb, group);
@@ -4215,14 +4227,9 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac,
ext4_get_group_no_and_offset(sb, goal, &group, &block);
/* set up allocation goals */
+ memset(ac, 0, sizeof(struct ext4_allocation_context));
ac->ac_b_ex.fe_logical = ar->logical;
- ac->ac_b_ex.fe_group = 0;
- ac->ac_b_ex.fe_start = 0;
- ac->ac_b_ex.fe_len = 0;
ac->ac_status = AC_STATUS_CONTINUE;
- ac->ac_groups_scanned = 0;
- ac->ac_ex_scanned = 0;
- ac->ac_found = 0;
ac->ac_sb = sb;
ac->ac_inode = ar->inode;
ac->ac_o_ex.fe_logical = ar->logical;
@@ -4233,15 +4240,7 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac,
ac->ac_g_ex.fe_group = group;
ac->ac_g_ex.fe_start = block;
ac->ac_g_ex.fe_len = len;
- ac->ac_f_ex.fe_len = 0;
ac->ac_flags = ar->flags;
- ac->ac_2order = 0;
- ac->ac_criteria = 0;
- ac->ac_pa = NULL;
- ac->ac_bitmap_page = NULL;
- ac->ac_buddy_page = NULL;
- ac->alloc_semp = NULL;
- ac->ac_lg = NULL;
/* we have to define context: we'll we work with a file or
* locality group. this is a policy, actually */
@@ -4509,10 +4508,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
}
ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
- if (ac) {
- ac->ac_sb = sb;
- ac->ac_inode = ar->inode;
- } else {
+ if (!ac) {
ar->len = 0;
*errp = -ENOMEM;
goto out1;
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index 38ff75a..530b4ca 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -16,7 +16,6 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/time.h>
-#include <linux/smp_lock.h>
#include <linux/buffer_head.h>
#include <linux/compat.h>
#include <asm/uaccess.h>
diff --git a/fs/fat/file.c b/fs/fat/file.c
index b28ea64..f042b96 100644
--- a/fs/fat/file.c
+++ b/fs/fat/file.c
@@ -134,7 +134,7 @@ static int fat_file_release(struct inode *inode, struct file *filp)
if ((filp->f_mode & FMODE_WRITE) &&
MSDOS_SB(inode->i_sb)->options.flush) {
fat_flush_inodes(inode->i_sb, inode, NULL);
- congestion_wait(WRITE, HZ/10);
+ congestion_wait(BLK_RW_ASYNC, HZ/10);
}
return 0;
}
diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c
index 82f8873..bbc94ae 100644
--- a/fs/fat/namei_msdos.c
+++ b/fs/fat/namei_msdos.c
@@ -9,7 +9,6 @@
#include <linux/module.h>
#include <linux/time.h>
#include <linux/buffer_head.h>
-#include <linux/smp_lock.h>
#include "fat.h"
/* Characters that are undesirable in an MS-DOS file name */
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index 73471b7..cb6e835 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -19,7 +19,6 @@
#include <linux/jiffies.h>
#include <linux/ctype.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/buffer_head.h>
#include <linux/namei.h>
#include "fat.h"
diff --git a/fs/fcntl.c b/fs/fcntl.c
index a040b76..ae41308 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -19,7 +19,6 @@
#include <linux/signal.h>
#include <linux/rcupdate.h>
#include <linux/pid_namespace.h>
-#include <linux/smp_lock.h>
#include <asm/poll.h>
#include <asm/siginfo.h>
diff --git a/fs/freevxfs/vxfs_super.c b/fs/freevxfs/vxfs_super.c
index cdbd165..1e8af93 100644
--- a/fs/freevxfs/vxfs_super.c
+++ b/fs/freevxfs/vxfs_super.c
@@ -38,6 +38,7 @@
#include <linux/buffer_head.h>
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/smp_lock.h>
#include <linux/stat.h>
#include <linux/vfs.h>
#include <linux/mount.h>
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 8fed2ed..6484eb7 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -286,8 +286,8 @@ __releases(&fc->lock)
}
if (fc->num_background == FUSE_CONGESTION_THRESHOLD &&
fc->connected && fc->bdi_initialized) {
- clear_bdi_congested(&fc->bdi, READ);
- clear_bdi_congested(&fc->bdi, WRITE);
+ clear_bdi_congested(&fc->bdi, BLK_RW_SYNC);
+ clear_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
}
fc->num_background--;
fc->active_background--;
@@ -414,8 +414,8 @@ static void fuse_request_send_nowait_locked(struct fuse_conn *fc,
fc->blocked = 1;
if (fc->num_background == FUSE_CONGESTION_THRESHOLD &&
fc->bdi_initialized) {
- set_bdi_congested(&fc->bdi, READ);
- set_bdi_congested(&fc->bdi, WRITE);
+ set_bdi_congested(&fc->bdi, BLK_RW_SYNC);
+ set_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
}
list_add_tail(&req->list, &fc->bg_queue);
flush_bg_queue(fc);
@@ -849,6 +849,81 @@ err:
return err;
}
+static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
+ struct fuse_copy_state *cs)
+{
+ struct fuse_notify_inval_inode_out outarg;
+ int err = -EINVAL;
+
+ if (size != sizeof(outarg))
+ goto err;
+
+ err = fuse_copy_one(cs, &outarg, sizeof(outarg));
+ if (err)
+ goto err;
+ fuse_copy_finish(cs);
+
+ down_read(&fc->killsb);
+ err = -ENOENT;
+ if (!fc->sb)
+ goto err_unlock;
+
+ err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
+ outarg.off, outarg.len);
+
+err_unlock:
+ up_read(&fc->killsb);
+ return err;
+
+err:
+ fuse_copy_finish(cs);
+ return err;
+}
+
+static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
+ struct fuse_copy_state *cs)
+{
+ struct fuse_notify_inval_entry_out outarg;
+ int err = -EINVAL;
+ char buf[FUSE_NAME_MAX+1];
+ struct qstr name;
+
+ if (size < sizeof(outarg))
+ goto err;
+
+ err = fuse_copy_one(cs, &outarg, sizeof(outarg));
+ if (err)
+ goto err;
+
+ err = -ENAMETOOLONG;
+ if (outarg.namelen > FUSE_NAME_MAX)
+ goto err;
+
+ name.name = buf;
+ name.len = outarg.namelen;
+ err = fuse_copy_one(cs, buf, outarg.namelen + 1);
+ if (err)
+ goto err;
+ fuse_copy_finish(cs);
+ buf[outarg.namelen] = 0;
+ name.hash = full_name_hash(name.name, name.len);
+
+ down_read(&fc->killsb);
+ err = -ENOENT;
+ if (!fc->sb)
+ goto err_unlock;
+
+ err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
+
+err_unlock:
+ up_read(&fc->killsb);
+ return err;
+
+err:
+ fuse_copy_finish(cs);
+ return err;
+}
+
static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
unsigned int size, struct fuse_copy_state *cs)
{
@@ -856,6 +931,12 @@ static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
case FUSE_NOTIFY_POLL:
return fuse_notify_poll(fc, size, cs);
+ case FUSE_NOTIFY_INVAL_INODE:
+ return fuse_notify_inval_inode(fc, size, cs);
+
+ case FUSE_NOTIFY_INVAL_ENTRY:
+ return fuse_notify_inval_entry(fc, size, cs);
+
default:
fuse_copy_finish(cs);
return -EINVAL;
@@ -910,7 +991,7 @@ static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
int err;
- unsigned nbytes = iov_length(iov, nr_segs);
+ size_t nbytes = iov_length(iov, nr_segs);
struct fuse_req *req;
struct fuse_out_header oh;
struct fuse_copy_state cs;
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index b3089a0..e703654 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -375,7 +375,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
struct fuse_conn *fc = get_fuse_conn(dir);
struct fuse_req *req;
struct fuse_req *forget_req;
- struct fuse_open_in inarg;
+ struct fuse_create_in inarg;
struct fuse_open_out outopen;
struct fuse_entry_out outentry;
struct fuse_file *ff;
@@ -399,15 +399,20 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
if (!ff)
goto out_put_request;
+ if (!fc->dont_mask)
+ mode &= ~current_umask();
+
flags &= ~O_NOCTTY;
memset(&inarg, 0, sizeof(inarg));
memset(&outentry, 0, sizeof(outentry));
inarg.flags = flags;
inarg.mode = mode;
+ inarg.umask = current_umask();
req->in.h.opcode = FUSE_CREATE;
req->in.h.nodeid = get_node_id(dir);
req->in.numargs = 2;
- req->in.args[0].size = sizeof(inarg);
+ req->in.args[0].size = fc->minor < 12 ? sizeof(struct fuse_open_in) :
+ sizeof(inarg);
req->in.args[0].value = &inarg;
req->in.args[1].size = entry->d_name.len + 1;
req->in.args[1].value = entry->d_name.name;
@@ -546,12 +551,17 @@ static int fuse_mknod(struct inode *dir, struct dentry *entry, int mode,
if (IS_ERR(req))
return PTR_ERR(req);
+ if (!fc->dont_mask)
+ mode &= ~current_umask();
+
memset(&inarg, 0, sizeof(inarg));
inarg.mode = mode;
inarg.rdev = new_encode_dev(rdev);
+ inarg.umask = current_umask();
req->in.h.opcode = FUSE_MKNOD;
req->in.numargs = 2;
- req->in.args[0].size = sizeof(inarg);
+ req->in.args[0].size = fc->minor < 12 ? FUSE_COMPAT_MKNOD_IN_SIZE :
+ sizeof(inarg);
req->in.args[0].value = &inarg;
req->in.args[1].size = entry->d_name.len + 1;
req->in.args[1].value = entry->d_name.name;
@@ -578,8 +588,12 @@ static int fuse_mkdir(struct inode *dir, struct dentry *entry, int mode)
if (IS_ERR(req))
return PTR_ERR(req);
+ if (!fc->dont_mask)
+ mode &= ~current_umask();
+
memset(&inarg, 0, sizeof(inarg));
inarg.mode = mode;
+ inarg.umask = current_umask();
req->in.h.opcode = FUSE_MKDIR;
req->in.numargs = 2;
req->in.args[0].size = sizeof(inarg);
@@ -845,6 +859,43 @@ int fuse_update_attributes(struct inode *inode, struct kstat *stat,
return err;
}
+int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid,
+ struct qstr *name)
+{
+ int err = -ENOTDIR;
+ struct inode *parent;
+ struct dentry *dir;
+ struct dentry *entry;
+
+ parent = ilookup5(sb, parent_nodeid, fuse_inode_eq, &parent_nodeid);
+ if (!parent)
+ return -ENOENT;
+
+ mutex_lock(&parent->i_mutex);
+ if (!S_ISDIR(parent->i_mode))
+ goto unlock;
+
+ err = -ENOENT;
+ dir = d_find_alias(parent);
+ if (!dir)
+ goto unlock;
+
+ entry = d_lookup(dir, name);
+ dput(dir);
+ if (!entry)
+ goto unlock;
+
+ fuse_invalidate_attr(parent);
+ fuse_invalidate_entry(entry);
+ dput(entry);
+ err = 0;
+
+ unlock:
+ mutex_unlock(&parent->i_mutex);
+ iput(parent);
+ return err;
+}
+
/*
* Calling into a user-controlled filesystem gives the filesystem
* daemon ptrace-like capabilities over the requester process. This
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index fce6ce6..cbc4640 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1922,7 +1922,7 @@ unsigned fuse_file_poll(struct file *file, poll_table *wait)
req = fuse_get_req(fc);
if (IS_ERR(req))
- return PTR_ERR(req);
+ return POLLERR;
req->in.h.opcode = FUSE_POLL;
req->in.h.nodeid = ff->nodeid;
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index aaf2f9f..52b641f 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -446,6 +446,9 @@ struct fuse_conn {
/** Do multi-page cached writes */
unsigned big_writes:1;
+ /** Don't apply umask to creation modes */
+ unsigned dont_mask:1;
+
/** The number of requests waiting for completion */
atomic_t num_waiting;
@@ -481,6 +484,12 @@ struct fuse_conn {
/** Called on final put */
void (*release)(struct fuse_conn *);
+
+ /** Super block for this connection. */
+ struct super_block *sb;
+
+ /** Read/write semaphore to hold when accessing sb. */
+ struct rw_semaphore killsb;
};
static inline struct fuse_conn *get_fuse_conn_super(struct super_block *sb)
@@ -509,6 +518,11 @@ extern const struct file_operations fuse_dev_operations;
extern const struct dentry_operations fuse_dentry_operations;
/**
+ * Inode to nodeid comparison.
+ */
+int fuse_inode_eq(struct inode *inode, void *_nodeidp);
+
+/**
* Get a filled in inode
*/
struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
@@ -708,6 +722,19 @@ void fuse_release_nowrite(struct inode *inode);
u64 fuse_get_attr_version(struct fuse_conn *fc);
+/**
+ * File-system tells the kernel to invalidate cache for the given node id.
+ */
+int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid,
+ loff_t offset, loff_t len);
+
+/**
+ * File-system tells the kernel to invalidate parent attributes and
+ * the dentry matching parent/name.
+ */
+int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid,
+ struct qstr *name);
+
int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
bool isdir);
ssize_t fuse_direct_io(struct file *file, const char __user *buf,
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index d8673cc..f91ccc4 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -206,7 +206,7 @@ static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr)
BUG();
}
-static int fuse_inode_eq(struct inode *inode, void *_nodeidp)
+int fuse_inode_eq(struct inode *inode, void *_nodeidp)
{
u64 nodeid = *(u64 *) _nodeidp;
if (get_node_id(inode) == nodeid)
@@ -257,6 +257,31 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
return inode;
}
+int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid,
+ loff_t offset, loff_t len)
+{
+ struct inode *inode;
+ pgoff_t pg_start;
+ pgoff_t pg_end;
+
+ inode = ilookup5(sb, nodeid, fuse_inode_eq, &nodeid);
+ if (!inode)
+ return -ENOENT;
+
+ fuse_invalidate_attr(inode);
+ if (offset >= 0) {
+ pg_start = offset >> PAGE_CACHE_SHIFT;
+ if (len <= 0)
+ pg_end = -1;
+ else
+ pg_end = (offset + len - 1) >> PAGE_CACHE_SHIFT;
+ invalidate_inode_pages2_range(inode->i_mapping,
+ pg_start, pg_end);
+ }
+ iput(inode);
+ return 0;
+}
+
static void fuse_umount_begin(struct super_block *sb)
{
fuse_abort_conn(get_fuse_conn_super(sb));
@@ -480,6 +505,7 @@ void fuse_conn_init(struct fuse_conn *fc)
memset(fc, 0, sizeof(*fc));
spin_lock_init(&fc->lock);
mutex_init(&fc->inst_mutex);
+ init_rwsem(&fc->killsb);
atomic_set(&fc->count, 1);
init_waitqueue_head(&fc->waitq);
init_waitqueue_head(&fc->blocked_waitq);
@@ -725,6 +751,8 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
}
if (arg->flags & FUSE_BIG_WRITES)
fc->big_writes = 1;
+ if (arg->flags & FUSE_DONT_MASK)
+ fc->dont_mask = 1;
} else {
ra_pages = fc->max_read / PAGE_CACHE_SIZE;
fc->no_lock = 1;
@@ -748,7 +776,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
arg->minor = FUSE_KERNEL_MINOR_VERSION;
arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE;
arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC |
- FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES;
+ FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK;
req->in.h.opcode = FUSE_INIT;
req->in.numargs = 1;
req->in.args[0].size = sizeof(*arg);
@@ -860,10 +888,16 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
fuse_conn_init(fc);
fc->dev = sb->s_dev;
+ fc->sb = sb;
err = fuse_bdi_init(fc, sb);
if (err)
goto err_put_conn;
+ /* Handle umasking inside the fuse code */
+ if (sb->s_flags & MS_POSIXACL)
+ fc->dont_mask = 1;
+ sb->s_flags |= MS_POSIXACL;
+
fc->release = fuse_free_conn;
fc->flags = d.flags;
fc->user_id = d.user_id;
@@ -941,12 +975,25 @@ static int fuse_get_sb(struct file_system_type *fs_type,
return get_sb_nodev(fs_type, flags, raw_data, fuse_fill_super, mnt);
}
+static void fuse_kill_sb_anon(struct super_block *sb)
+{
+ struct fuse_conn *fc = get_fuse_conn_super(sb);
+
+ if (fc) {
+ down_write(&fc->killsb);
+ fc->sb = NULL;
+ up_write(&fc->killsb);
+ }
+
+ kill_anon_super(sb);
+}
+
static struct file_system_type fuse_fs_type = {
.owner = THIS_MODULE,
.name = "fuse",
.fs_flags = FS_HAS_SUBTYPE,
.get_sb = fuse_get_sb,
- .kill_sb = kill_anon_super,
+ .kill_sb = fuse_kill_sb_anon,
};
#ifdef CONFIG_BLOCK
@@ -958,11 +1005,24 @@ static int fuse_get_sb_blk(struct file_system_type *fs_type,
mnt);
}
+static void fuse_kill_sb_blk(struct super_block *sb)
+{
+ struct fuse_conn *fc = get_fuse_conn_super(sb);
+
+ if (fc) {
+ down_write(&fc->killsb);
+ fc->sb = NULL;
+ up_write(&fc->killsb);
+ }
+
+ kill_block_super(sb);
+}
+
static struct file_system_type fuseblk_fs_type = {
.owner = THIS_MODULE,
.name = "fuseblk",
.get_sb = fuse_get_sb_blk,
- .kill_sb = kill_block_super,
+ .kill_sb = fuse_kill_sb_blk,
.fs_flags = FS_REQUIRES_DEV | FS_HAS_SUBTYPE,
};
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 03ebb43..7ebae9a 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -624,6 +624,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
{
struct gfs2_inode *ip = GFS2_I(mapping->host);
struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
+ struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
int alloc_required;
int error = 0;
@@ -637,6 +638,14 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
error = gfs2_glock_nq(&ip->i_gh);
if (unlikely(error))
goto out_uninit;
+ if (&ip->i_inode == sdp->sd_rindex) {
+ error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE,
+ GL_NOCACHE, &m_ip->i_gh);
+ if (unlikely(error)) {
+ gfs2_glock_dq(&ip->i_gh);
+ goto out_uninit;
+ }
+ }
error = gfs2_write_alloc_required(ip, pos, len, &alloc_required);
if (error)
@@ -667,6 +676,8 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
rblocks += data_blocks ? data_blocks : 1;
if (ind_blocks || data_blocks)
rblocks += RES_STATFS + RES_QUOTA;
+ if (&ip->i_inode == sdp->sd_rindex)
+ rblocks += 2 * RES_STATFS;
error = gfs2_trans_begin(sdp, rblocks,
PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
@@ -712,6 +723,10 @@ out_alloc_put:
gfs2_alloc_put(ip);
}
out_unlock:
+ if (&ip->i_inode == sdp->sd_rindex) {
+ gfs2_glock_dq(&m_ip->i_gh);
+ gfs2_holder_uninit(&m_ip->i_gh);
+ }
gfs2_glock_dq(&ip->i_gh);
out_uninit:
gfs2_holder_uninit(&ip->i_gh);
@@ -725,14 +740,21 @@ out_uninit:
static void adjust_fs_space(struct inode *inode)
{
struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
+ struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
+ struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
+ struct buffer_head *m_bh, *l_bh;
u64 fs_total, new_free;
/* Total up the file system space, according to the latest rindex. */
fs_total = gfs2_ri_total(sdp);
+ if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
+ return;
spin_lock(&sdp->sd_statfs_spin);
+ gfs2_statfs_change_in(m_sc, m_bh->b_data +
+ sizeof(struct gfs2_dinode));
if (fs_total > (m_sc->sc_total + l_sc->sc_total))
new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
else
@@ -741,6 +763,13 @@ static void adjust_fs_space(struct inode *inode)
fs_warn(sdp, "File system extended by %llu blocks.\n",
(unsigned long long)new_free);
gfs2_statfs_change(sdp, new_free, new_free, 0);
+
+ if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0)
+ goto out;
+ update_statfs(sdp, m_bh, l_bh);
+ brelse(l_bh);
+out:
+ brelse(m_bh);
}
/**
@@ -763,6 +792,7 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
{
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
+ struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
u64 to = pos + copied;
void *kaddr;
unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode);
@@ -794,6 +824,10 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
brelse(dibh);
gfs2_trans_end(sdp);
+ if (inode == sdp->sd_rindex) {
+ gfs2_glock_dq(&m_ip->i_gh);
+ gfs2_holder_uninit(&m_ip->i_gh);
+ }
gfs2_glock_dq(&ip->i_gh);
gfs2_holder_uninit(&ip->i_gh);
return copied;
@@ -823,6 +857,7 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
struct inode *inode = page->mapping->host;
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
+ struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
struct buffer_head *dibh;
struct gfs2_alloc *al = ip->i_alloc;
unsigned int from = pos & (PAGE_CACHE_SIZE - 1);
@@ -865,6 +900,10 @@ failed:
gfs2_quota_unlock(ip);
gfs2_alloc_put(ip);
}
+ if (inode == sdp->sd_rindex) {
+ gfs2_glock_dq(&m_ip->i_gh);
+ gfs2_holder_uninit(&m_ip->i_gh);
+ }
gfs2_glock_dq(&ip->i_gh);
gfs2_holder_uninit(&ip->i_gh);
return ret;
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 297421c..8b674b1 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -63,6 +63,7 @@ static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int
static DECLARE_RWSEM(gfs2_umount_flush_sem);
static struct dentry *gfs2_root;
static struct workqueue_struct *glock_workqueue;
+struct workqueue_struct *gfs2_delete_workqueue;
static LIST_HEAD(lru_list);
static atomic_t lru_count = ATOMIC_INIT(0);
static DEFINE_SPINLOCK(lru_lock);
@@ -167,13 +168,33 @@ static void glock_free(struct gfs2_glock *gl)
*
*/
-static void gfs2_glock_hold(struct gfs2_glock *gl)
+void gfs2_glock_hold(struct gfs2_glock *gl)
{
GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0);
atomic_inc(&gl->gl_ref);
}
/**
+ * demote_ok - Check to see if it's ok to unlock a glock
+ * @gl: the glock
+ *
+ * Returns: 1 if it's ok
+ */
+
+static int demote_ok(const struct gfs2_glock *gl)
+{
+ const struct gfs2_glock_operations *glops = gl->gl_ops;
+
+ if (gl->gl_state == LM_ST_UNLOCKED)
+ return 0;
+ if (!list_empty(&gl->gl_holders))
+ return 0;
+ if (glops->go_demote_ok)
+ return glops->go_demote_ok(gl);
+ return 1;
+}
+
+/**
* gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
* @gl: the glock
*
@@ -181,8 +202,13 @@ static void gfs2_glock_hold(struct gfs2_glock *gl)
static void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
{
+ int may_reclaim;
+ may_reclaim = (demote_ok(gl) &&
+ (atomic_read(&gl->gl_ref) == 1 ||
+ (gl->gl_name.ln_type == LM_TYPE_INODE &&
+ atomic_read(&gl->gl_ref) <= 2)));
spin_lock(&lru_lock);
- if (list_empty(&gl->gl_lru) && gl->gl_state != LM_ST_UNLOCKED) {
+ if (list_empty(&gl->gl_lru) && may_reclaim) {
list_add_tail(&gl->gl_lru, &lru_list);
atomic_inc(&lru_count);
}
@@ -190,6 +216,21 @@ static void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
}
/**
+ * gfs2_glock_put_nolock() - Decrement reference count on glock
+ * @gl: The glock to put
+ *
+ * This function should only be used if the caller has its own reference
+ * to the glock, in addition to the one it is dropping.
+ */
+
+void gfs2_glock_put_nolock(struct gfs2_glock *gl)
+{
+ if (atomic_dec_and_test(&gl->gl_ref))
+ GLOCK_BUG_ON(gl, 1);
+ gfs2_glock_schedule_for_reclaim(gl);
+}
+
+/**
* gfs2_glock_put() - Decrement reference count on glock
* @gl: The glock to put
*
@@ -214,9 +255,9 @@ int gfs2_glock_put(struct gfs2_glock *gl)
rv = 1;
goto out;
}
- /* 1 for being hashed, 1 for having state != LM_ST_UNLOCKED */
- if (atomic_read(&gl->gl_ref) == 2)
- gfs2_glock_schedule_for_reclaim(gl);
+ spin_lock(&gl->gl_spin);
+ gfs2_glock_schedule_for_reclaim(gl);
+ spin_unlock(&gl->gl_spin);
write_unlock(gl_lock_addr(gl->gl_hash));
out:
return rv;
@@ -398,7 +439,7 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state)
if (held2)
gfs2_glock_hold(gl);
else
- gfs2_glock_put(gl);
+ gfs2_glock_put_nolock(gl);
}
gl->gl_state = new_state;
@@ -633,12 +674,35 @@ out:
out_sched:
gfs2_glock_hold(gl);
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
- gfs2_glock_put(gl);
+ gfs2_glock_put_nolock(gl);
out_unlock:
clear_bit(GLF_LOCK, &gl->gl_flags);
goto out;
}
+static void delete_work_func(struct work_struct *work)
+{
+ struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
+ struct gfs2_sbd *sdp = gl->gl_sbd;
+ struct gfs2_inode *ip = NULL;
+ struct inode *inode;
+ u64 no_addr = 0;
+
+ spin_lock(&gl->gl_spin);
+ ip = (struct gfs2_inode *)gl->gl_object;
+ if (ip)
+ no_addr = ip->i_no_addr;
+ spin_unlock(&gl->gl_spin);
+ if (ip) {
+ inode = gfs2_ilookup(sdp->sd_vfs, no_addr);
+ if (inode) {
+ d_prune_aliases(inode);
+ iput(inode);
+ }
+ }
+ gfs2_glock_put(gl);
+}
+
static void glock_work_func(struct work_struct *work)
{
unsigned long delay = 0;
@@ -717,6 +781,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
gl->gl_sbd = sdp;
gl->gl_aspace = NULL;
INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
+ INIT_WORK(&gl->gl_delete, delete_work_func);
/* If this glock protects actual on-disk data or metadata blocks,
create a VFS inode to manage the pages/buffers holding them. */
@@ -858,6 +923,8 @@ static void handle_callback(struct gfs2_glock *gl, unsigned int state,
gl->gl_demote_state != state) {
gl->gl_demote_state = LM_ST_UNLOCKED;
}
+ if (gl->gl_ops->go_callback)
+ gl->gl_ops->go_callback(gl);
trace_gfs2_demote_rq(gl);
}
@@ -1274,33 +1341,12 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
gfs2_glock_put(gl);
}
-/**
- * demote_ok - Check to see if it's ok to unlock a glock
- * @gl: the glock
- *
- * Returns: 1 if it's ok
- */
-
-static int demote_ok(const struct gfs2_glock *gl)
-{
- const struct gfs2_glock_operations *glops = gl->gl_ops;
-
- if (gl->gl_state == LM_ST_UNLOCKED)
- return 0;
- if (!list_empty(&gl->gl_holders))
- return 0;
- if (glops->go_demote_ok)
- return glops->go_demote_ok(gl);
- return 1;
-}
-
static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask)
{
struct gfs2_glock *gl;
int may_demote;
int nr_skipped = 0;
- int got_ref = 0;
LIST_HEAD(skipped);
if (nr == 0)
@@ -1315,37 +1361,29 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask)
list_del_init(&gl->gl_lru);
atomic_dec(&lru_count);
+ /* Check if glock is about to be freed */
+ if (atomic_read(&gl->gl_ref) == 0)
+ continue;
+
/* Test for being demotable */
if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
gfs2_glock_hold(gl);
- got_ref = 1;
spin_unlock(&lru_lock);
spin_lock(&gl->gl_spin);
may_demote = demote_ok(gl);
- spin_unlock(&gl->gl_spin);
- clear_bit(GLF_LOCK, &gl->gl_flags);
if (may_demote) {
handle_callback(gl, LM_ST_UNLOCKED, 0);
nr--;
- if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
- gfs2_glock_put(gl);
- got_ref = 0;
}
+ if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
+ gfs2_glock_put_nolock(gl);
+ spin_unlock(&gl->gl_spin);
+ clear_bit(GLF_LOCK, &gl->gl_flags);
spin_lock(&lru_lock);
- if (may_demote)
- continue;
- }
- if (list_empty(&gl->gl_lru) &&
- (atomic_read(&gl->gl_ref) <= (2 + got_ref))) {
- nr_skipped++;
- list_add(&gl->gl_lru, &skipped);
- }
- if (got_ref) {
- spin_unlock(&lru_lock);
- gfs2_glock_put(gl);
- spin_lock(&lru_lock);
- got_ref = 0;
+ continue;
}
+ nr_skipped++;
+ list_add(&gl->gl_lru, &skipped);
}
list_splice(&skipped, &lru_list);
atomic_add(nr_skipped, &lru_count);
@@ -1727,6 +1765,11 @@ int __init gfs2_glock_init(void)
glock_workqueue = create_workqueue("glock_workqueue");
if (IS_ERR(glock_workqueue))
return PTR_ERR(glock_workqueue);
+ gfs2_delete_workqueue = create_workqueue("delete_workqueue");
+ if (IS_ERR(gfs2_delete_workqueue)) {
+ destroy_workqueue(glock_workqueue);
+ return PTR_ERR(gfs2_delete_workqueue);
+ }
register_shrinker(&glock_shrinker);
@@ -1737,6 +1780,7 @@ void gfs2_glock_exit(void)
{
unregister_shrinker(&glock_shrinker);
destroy_workqueue(glock_workqueue);
+ destroy_workqueue(gfs2_delete_workqueue);
}
static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index a602a28..c609894 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -143,6 +143,7 @@ struct lm_lockops {
#define GLR_TRYFAILED 13
+extern struct workqueue_struct *gfs2_delete_workqueue;
static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl)
{
struct gfs2_holder *gh;
@@ -191,6 +192,8 @@ static inline int gfs2_glock_is_blocking(struct gfs2_glock *gl)
int gfs2_glock_get(struct gfs2_sbd *sdp,
u64 number, const struct gfs2_glock_operations *glops,
int create, struct gfs2_glock **glp);
+void gfs2_glock_hold(struct gfs2_glock *gl);
+void gfs2_glock_put_nolock(struct gfs2_glock *gl);
int gfs2_glock_put(struct gfs2_glock *gl);
void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
struct gfs2_holder *gh);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index d5e4ab1..6985eef 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -323,6 +323,7 @@ static void trans_go_sync(struct gfs2_glock *gl)
if (gl->gl_state != LM_ST_UNLOCKED &&
test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
+ flush_workqueue(gfs2_delete_workqueue);
gfs2_meta_syncfs(sdp);
gfs2_log_shutdown(sdp);
}
@@ -372,6 +373,25 @@ static int trans_go_demote_ok(const struct gfs2_glock *gl)
return 0;
}
+/**
+ * iopen_go_callback - schedule the dcache entry for the inode to be deleted
+ * @gl: the glock
+ *
+ * gl_spin lock is held while calling this
+ */
+static void iopen_go_callback(struct gfs2_glock *gl)
+{
+ struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object;
+
+ if (gl->gl_demote_state == LM_ST_UNLOCKED &&
+ gl->gl_state == LM_ST_SHARED &&
+ ip && test_bit(GIF_USER, &ip->i_flags)) {
+ gfs2_glock_hold(gl);
+ if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
+ gfs2_glock_put_nolock(gl);
+ }
+}
+
const struct gfs2_glock_operations gfs2_meta_glops = {
.go_type = LM_TYPE_META,
};
@@ -406,6 +426,7 @@ const struct gfs2_glock_operations gfs2_trans_glops = {
const struct gfs2_glock_operations gfs2_iopen_glops = {
.go_type = LM_TYPE_IOPEN,
+ .go_callback = iopen_go_callback,
};
const struct gfs2_glock_operations gfs2_flock_glops = {
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 225347f..61801ad 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -159,6 +159,7 @@ struct gfs2_glock_operations {
int (*go_lock) (struct gfs2_holder *gh);
void (*go_unlock) (struct gfs2_holder *gh);
int (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl);
+ void (*go_callback) (struct gfs2_glock *gl);
const int go_type;
const unsigned long go_min_hold_time;
};
@@ -228,6 +229,7 @@ struct gfs2_glock {
struct list_head gl_ail_list;
atomic_t gl_ail_count;
struct delayed_work gl_work;
+ struct work_struct gl_delete;
};
#define GFS2_MIN_LVB_SIZE 32 /* Min size of LVB that gfs2 supports */
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index daa4ae3..fba7957 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -285,27 +285,19 @@ void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
}
tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes;
- if (count[1] + count[2] != tmp) {
+ if (count[1] != tmp) {
if (gfs2_consist_rgrpd(rgd))
fs_err(sdp, "used data mismatch: %u != %u\n",
count[1], tmp);
return;
}
- if (count[3] != rgd->rd_dinodes) {
+ if (count[2] + count[3] != rgd->rd_dinodes) {
if (gfs2_consist_rgrpd(rgd))
fs_err(sdp, "used metadata mismatch: %u != %u\n",
- count[3], rgd->rd_dinodes);
+ count[2] + count[3], rgd->rd_dinodes);
return;
}
-
- if (count[2] > count[3]) {
- if (gfs2_consist_rgrpd(rgd))
- fs_err(sdp, "unlinked inodes > inodes: %u\n",
- count[2]);
- return;
- }
-
}
static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block)
@@ -961,7 +953,8 @@ static int try_rgrp_fit(struct gfs2_rgrpd *rgd, struct gfs2_alloc *al)
* Returns: The inode, if one has been found
*/
-static struct inode *try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked)
+static struct inode *try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked,
+ u64 skip)
{
struct inode *inode;
u32 goal = 0, block;
@@ -985,6 +978,8 @@ static struct inode *try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked)
goal++;
if (*last_unlinked != NO_BLOCK && no_addr <= *last_unlinked)
continue;
+ if (no_addr == skip)
+ continue;
*last_unlinked = no_addr;
inode = gfs2_inode_lookup(rgd->rd_sbd->sd_vfs, DT_UNKNOWN,
no_addr, -1, 1);
@@ -1104,7 +1099,7 @@ static struct inode *get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
if (try_rgrp_fit(rgd, al))
goto out;
if (rgd->rd_flags & GFS2_RDF_CHECK)
- inode = try_rgrp_unlink(rgd, last_unlinked);
+ inode = try_rgrp_unlink(rgd, last_unlinked, ip->i_no_addr);
if (!rg_locked)
gfs2_glock_dq_uninit(&al->al_rgd_gh);
if (inode)
@@ -1138,7 +1133,7 @@ static struct inode *get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
if (try_rgrp_fit(rgd, al))
goto out;
if (rgd->rd_flags & GFS2_RDF_CHECK)
- inode = try_rgrp_unlink(rgd, last_unlinked);
+ inode = try_rgrp_unlink(rgd, last_unlinked, ip->i_no_addr);
if (!rg_locked)
gfs2_glock_dq_uninit(&al->al_rgd_gh);
if (inode)
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 0a68013..f522bb0 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -353,7 +353,7 @@ fail:
return error;
}
-static void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf)
+void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf)
{
const struct gfs2_statfs_change *str = buf;
@@ -441,6 +441,29 @@ void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
brelse(l_bh);
}
+void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
+ struct buffer_head *l_bh)
+{
+ struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
+ struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
+ struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
+ struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
+
+ gfs2_trans_add_bh(l_ip->i_gl, l_bh, 1);
+
+ spin_lock(&sdp->sd_statfs_spin);
+ m_sc->sc_total += l_sc->sc_total;
+ m_sc->sc_free += l_sc->sc_free;
+ m_sc->sc_dinodes += l_sc->sc_dinodes;
+ memset(l_sc, 0, sizeof(struct gfs2_statfs_change));
+ memset(l_bh->b_data + sizeof(struct gfs2_dinode),
+ 0, sizeof(struct gfs2_statfs_change));
+ spin_unlock(&sdp->sd_statfs_spin);
+
+ gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1);
+ gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
+}
+
int gfs2_statfs_sync(struct gfs2_sbd *sdp)
{
struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
@@ -477,19 +500,7 @@ int gfs2_statfs_sync(struct gfs2_sbd *sdp)
if (error)
goto out_bh2;
- gfs2_trans_add_bh(l_ip->i_gl, l_bh, 1);
-
- spin_lock(&sdp->sd_statfs_spin);
- m_sc->sc_total += l_sc->sc_total;
- m_sc->sc_free += l_sc->sc_free;
- m_sc->sc_dinodes += l_sc->sc_dinodes;
- memset(l_sc, 0, sizeof(struct gfs2_statfs_change));
- memset(l_bh->b_data + sizeof(struct gfs2_dinode),
- 0, sizeof(struct gfs2_statfs_change));
- spin_unlock(&sdp->sd_statfs_spin);
-
- gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1);
- gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
+ update_statfs(sdp, m_bh, l_bh);
gfs2_trans_end(sdp);
@@ -680,6 +691,7 @@ static int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
struct gfs2_holder t_gh;
int error;
+ flush_workqueue(gfs2_delete_workqueue);
gfs2_quota_sync(sdp);
gfs2_statfs_sync(sdp);
diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h
index b56413e..22e0417 100644
--- a/fs/gfs2/super.h
+++ b/fs/gfs2/super.h
@@ -40,6 +40,10 @@ extern int gfs2_make_fs_rw(struct gfs2_sbd *sdp);
extern int gfs2_statfs_init(struct gfs2_sbd *sdp);
extern void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
s64 dinodes);
+extern void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc,
+ const void *buf);
+extern void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
+ struct buffer_head *l_bh);
extern int gfs2_statfs_sync(struct gfs2_sbd *sdp);
extern int gfs2_freeze_fs(struct gfs2_sbd *sdp);
diff --git a/fs/gfs2/trace_gfs2.h b/fs/gfs2/trace_gfs2.h
index 98d6ef1..148d55c 100644
--- a/fs/gfs2/trace_gfs2.h
+++ b/fs/gfs2/trace_gfs2.h
@@ -1,12 +1,11 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM gfs2
+
#if !defined(_TRACE_GFS2_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_GFS2_H
#include <linux/tracepoint.h>
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM gfs2
-#define TRACE_INCLUDE_FILE trace_gfs2
-
#include <linux/fs.h>
#include <linux/buffer_head.h>
#include <linux/dlmconstants.h>
@@ -403,5 +402,6 @@ TRACE_EVENT(gfs2_block_alloc,
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_gfs2
#include <trace/define_trace.h>
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index 6f833dc..f7fcbe4 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -19,6 +19,7 @@
#include <linux/nls.h>
#include <linux/parser.h>
#include <linux/seq_file.h>
+#include <linux/smp_lock.h>
#include <linux/vfs.h>
#include "hfs_fs.h"
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 9fc3af0c..c0759fe 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -12,6 +12,7 @@
#include <linux/pagemap.h>
#include <linux/fs.h>
#include <linux/slab.h>
+#include <linux/smp_lock.h>
#include <linux/vfs.h>
#include <linux/nls.h>
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index fe02ad4..032604e 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -972,6 +972,7 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent)
sb->s_blocksize_bits = 10;
sb->s_magic = HOSTFS_SUPER_MAGIC;
sb->s_op = &hostfs_sbops;
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
/* NULL is printed as <NULL> by sprintf: avoid that. */
if (req_root == NULL)
diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c
index 6916c41..8865c94 100644
--- a/fs/hpfs/dir.c
+++ b/fs/hpfs/dir.c
@@ -6,6 +6,7 @@
* directory VFS functions
*/
+#include <linux/smp_lock.h>
#include "hpfs_fn.h"
static int hpfs_dir_release(struct inode *inode, struct file *filp)
diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c
index 64ab522..3efabff 100644
--- a/fs/hpfs/file.c
+++ b/fs/hpfs/file.c
@@ -6,6 +6,7 @@
* file VFS functions
*/
+#include <linux/smp_lock.h>
#include "hpfs_fn.h"
#define BLOCKS(size) (((size) + 511) >> 9)
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
index c2ea31b..701ca54 100644
--- a/fs/hpfs/hpfs_fn.h
+++ b/fs/hpfs/hpfs_fn.h
@@ -13,7 +13,6 @@
#include <linux/pagemap.h>
#include <linux/buffer_head.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include "hpfs.h"
diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c
index 39a1bfb..fe703ae 100644
--- a/fs/hpfs/inode.c
+++ b/fs/hpfs/inode.c
@@ -6,6 +6,7 @@
* inode VFS functions
*/
+#include <linux/smp_lock.h>
#include "hpfs_fn.h"
void hpfs_init_inode(struct inode *i)
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
index b649232..82b9c4b 100644
--- a/fs/hpfs/namei.c
+++ b/fs/hpfs/namei.c
@@ -6,6 +6,7 @@
* adding & removing files & directories
*/
#include <linux/sched.h>
+#include <linux/smp_lock.h>
#include "hpfs_fn.h"
static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
diff --git a/fs/inode.c b/fs/inode.c
index 901bad1..ae7b67e 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -120,12 +120,11 @@ static void wake_up_inode(struct inode *inode)
* These are initializations that need to be done on every inode
* allocation as the fields are not initialised by slab allocation.
*/
-struct inode *inode_init_always(struct super_block *sb, struct inode *inode)
+int inode_init_always(struct super_block *sb, struct inode *inode)
{
static const struct address_space_operations empty_aops;
static struct inode_operations empty_iops;
static const struct file_operations empty_fops;
-
struct address_space *const mapping = &inode->i_data;
inode->i_sb = sb;
@@ -152,7 +151,7 @@ struct inode *inode_init_always(struct super_block *sb, struct inode *inode)
inode->dirtied_when = 0;
if (security_inode_alloc(inode))
- goto out_free_inode;
+ goto out;
/* allocate and initialize an i_integrity */
if (ima_inode_alloc(inode))
@@ -198,16 +197,12 @@ struct inode *inode_init_always(struct super_block *sb, struct inode *inode)
inode->i_fsnotify_mask = 0;
#endif
- return inode;
+ return 0;
out_free_security:
security_inode_free(inode);
-out_free_inode:
- if (inode->i_sb->s_op->destroy_inode)
- inode->i_sb->s_op->destroy_inode(inode);
- else
- kmem_cache_free(inode_cachep, (inode));
- return NULL;
+out:
+ return -ENOMEM;
}
EXPORT_SYMBOL(inode_init_always);
@@ -220,12 +215,21 @@ static struct inode *alloc_inode(struct super_block *sb)
else
inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);
- if (inode)
- return inode_init_always(sb, inode);
- return NULL;
+ if (!inode)
+ return NULL;
+
+ if (unlikely(inode_init_always(sb, inode))) {
+ if (inode->i_sb->s_op->destroy_inode)
+ inode->i_sb->s_op->destroy_inode(inode);
+ else
+ kmem_cache_free(inode_cachep, inode);
+ return NULL;
+ }
+
+ return inode;
}
-void destroy_inode(struct inode *inode)
+void __destroy_inode(struct inode *inode)
{
BUG_ON(inode_has_buffers(inode));
ima_inode_free(inode);
@@ -237,13 +241,17 @@ void destroy_inode(struct inode *inode)
if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED)
posix_acl_release(inode->i_default_acl);
#endif
+}
+EXPORT_SYMBOL(__destroy_inode);
+
+void destroy_inode(struct inode *inode)
+{
+ __destroy_inode(inode);
if (inode->i_sb->s_op->destroy_inode)
inode->i_sb->s_op->destroy_inode(inode);
else
kmem_cache_free(inode_cachep, (inode));
}
-EXPORT_SYMBOL(destroy_inode);
-
/*
* These are initializations that only need to be done
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index 58a7963..85f96bc 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -142,6 +142,7 @@ static const struct dentry_operations isofs_dentry_ops[] = {
struct iso9660_options{
unsigned int rock:1;
+ unsigned int joliet:1;
unsigned int cruft:1;
unsigned int hide:1;
unsigned int showassoc:1;
@@ -151,7 +152,6 @@ struct iso9660_options{
unsigned int gid_set:1;
unsigned int utf8:1;
unsigned char map;
- char joliet;
unsigned char check;
unsigned int blocksize;
mode_t fmode;
@@ -632,7 +632,7 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent)
else if (isonum_711(vdp->type) == ISO_VD_SUPPLEMENTARY) {
sec = (struct iso_supplementary_descriptor *)vdp;
if (sec->escape[0] == 0x25 && sec->escape[1] == 0x2f) {
- if (opt.joliet == 'y') {
+ if (opt.joliet) {
if (sec->escape[2] == 0x40)
joliet_level = 1;
else if (sec->escape[2] == 0x43)
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index 737f724..f96f850 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -287,6 +287,7 @@ int journal_write_metadata_buffer(transaction_t *transaction,
struct page *new_page;
unsigned int new_offset;
struct buffer_head *bh_in = jh2bh(jh_in);
+ journal_t *journal = transaction->t_journal;
/*
* The buffer really shouldn't be locked: only the current committing
@@ -300,6 +301,11 @@ int journal_write_metadata_buffer(transaction_t *transaction,
J_ASSERT_BH(bh_in, buffer_jbddirty(bh_in));
new_bh = alloc_buffer_head(GFP_NOFS|__GFP_NOFAIL);
+ /* keep subsequent assertions sane */
+ new_bh->b_state = 0;
+ init_buffer(new_bh, NULL, NULL);
+ atomic_set(&new_bh->b_count, 1);
+ new_jh = journal_add_journal_head(new_bh); /* This sleeps */
/*
* If a new transaction has already done a buffer copy-out, then
@@ -361,14 +367,6 @@ repeat:
kunmap_atomic(mapped_data, KM_USER0);
}
- /* keep subsequent assertions sane */
- new_bh->b_state = 0;
- init_buffer(new_bh, NULL, NULL);
- atomic_set(&new_bh->b_count, 1);
- jbd_unlock_bh_state(bh_in);
-
- new_jh = journal_add_journal_head(new_bh); /* This sleeps */
-
set_bh_page(new_bh, new_page, new_offset);
new_jh->b_transaction = NULL;
new_bh->b_size = jh2bh(jh_in)->b_size;
@@ -385,7 +383,11 @@ repeat:
* copying is moved to the transaction's shadow queue.
*/
JBUFFER_TRACE(jh_in, "file as BJ_Shadow");
- journal_file_buffer(jh_in, transaction, BJ_Shadow);
+ spin_lock(&journal->j_list_lock);
+ __journal_file_buffer(jh_in, transaction, BJ_Shadow);
+ spin_unlock(&journal->j_list_lock);
+ jbd_unlock_bh_state(bh_in);
+
JBUFFER_TRACE(new_jh, "file as BJ_IO");
journal_file_buffer(new_jh, transaction, BJ_IO);
@@ -848,6 +850,12 @@ static int journal_reset(journal_t *journal)
first = be32_to_cpu(sb->s_first);
last = be32_to_cpu(sb->s_maxlen);
+ if (first + JFS_MIN_JOURNAL_BLOCKS > last + 1) {
+ printk(KERN_ERR "JBD: Journal too short (blocks %lu-%lu).\n",
+ first, last);
+ journal_fail_superblock(journal);
+ return -EINVAL;
+ }
journal->j_first = first;
journal->j_last = last;
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index 73242ba..c03ac11 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -489,34 +489,15 @@ void journal_unlock_updates (journal_t *journal)
wake_up(&journal->j_wait_transaction_locked);
}
-/*
- * Report any unexpected dirty buffers which turn up. Normally those
- * indicate an error, but they can occur if the user is running (say)
- * tune2fs to modify the live filesystem, so we need the option of
- * continuing as gracefully as possible. #
- *
- * The caller should already hold the journal lock and
- * j_list_lock spinlock: most callers will need those anyway
- * in order to probe the buffer's journaling state safely.
- */
-static void jbd_unexpected_dirty_buffer(struct journal_head *jh)
+static void warn_dirty_buffer(struct buffer_head *bh)
{
- int jlist;
-
- /* If this buffer is one which might reasonably be dirty
- * --- ie. data, or not part of this journal --- then
- * we're OK to leave it alone, but otherwise we need to
- * move the dirty bit to the journal's own internal
- * JBDDirty bit. */
- jlist = jh->b_jlist;
+ char b[BDEVNAME_SIZE];
- if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
- jlist == BJ_Shadow || jlist == BJ_Forget) {
- struct buffer_head *bh = jh2bh(jh);
-
- if (test_clear_buffer_dirty(bh))
- set_buffer_jbddirty(bh);
- }
+ printk(KERN_WARNING
+ "JBD: Spotted dirty metadata buffer (dev = %s, blocknr = %llu). "
+ "There's a risk of filesystem corruption in case of system "
+ "crash.\n",
+ bdevname(bh->b_bdev, b), (unsigned long long)bh->b_blocknr);
}
/*
@@ -583,14 +564,16 @@ repeat:
if (jh->b_next_transaction)
J_ASSERT_JH(jh, jh->b_next_transaction ==
transaction);
+ warn_dirty_buffer(bh);
}
/*
* In any case we need to clean the dirty flag and we must
* do it under the buffer lock to be sure we don't race
* with running write-out.
*/
- JBUFFER_TRACE(jh, "Unexpected dirty buffer");
- jbd_unexpected_dirty_buffer(jh);
+ JBUFFER_TRACE(jh, "Journalling dirty buffer");
+ clear_buffer_dirty(bh);
+ set_buffer_jbddirty(bh);
}
unlock_buffer(bh);
@@ -826,6 +809,15 @@ int journal_get_create_access(handle_t *handle, struct buffer_head *bh)
J_ASSERT_JH(jh, buffer_locked(jh2bh(jh)));
if (jh->b_transaction == NULL) {
+ /*
+ * Previous journal_forget() could have left the buffer
+ * with jbddirty bit set because it was being committed. When
+ * the commit finished, we've filed the buffer for
+ * checkpointing and marked it dirty. Now we are reallocating
+ * the buffer so the transaction freeing it must have
+ * committed and so it's safe to clear the dirty bit.
+ */
+ clear_buffer_dirty(jh2bh(jh));
jh->b_transaction = transaction;
/* first access by this transaction */
@@ -1782,8 +1774,13 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
if (jh->b_cp_transaction) {
JBUFFER_TRACE(jh, "on running+cp transaction");
+ /*
+ * We don't want to write the buffer anymore, clear the
+ * bit so that we don't confuse checks in
+ * __journal_file_buffer
+ */
+ clear_buffer_dirty(bh);
__journal_file_buffer(jh, transaction, BJ_Forget);
- clear_buffer_jbddirty(bh);
may_free = 0;
} else {
JBUFFER_TRACE(jh, "on running transaction");
@@ -2041,12 +2038,17 @@ void __journal_file_buffer(struct journal_head *jh,
if (jh->b_transaction && jh->b_jlist == jlist)
return;
- /* The following list of buffer states needs to be consistent
- * with __jbd_unexpected_dirty_buffer()'s handling of dirty
- * state. */
-
if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
jlist == BJ_Shadow || jlist == BJ_Forget) {
+ /*
+ * For metadata buffers, we track dirty bit in buffer_jbddirty
+ * instead of buffer_dirty. We should not see a dirty bit set
+ * here because we clear it in do_get_write_access but e.g.
+ * tune2fs can modify the sb and set the dirty bit at any time
+ * so we try to gracefully handle that.
+ */
+ if (buffer_dirty(bh))
+ warn_dirty_buffer(bh);
if (test_clear_buffer_dirty(bh) ||
test_clear_buffer_jbddirty(bh))
was_dirty = 1;
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 18bfd5d..e378cb3 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -297,6 +297,7 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
unsigned int new_offset;
struct buffer_head *bh_in = jh2bh(jh_in);
struct jbd2_buffer_trigger_type *triggers;
+ journal_t *journal = transaction->t_journal;
/*
* The buffer really shouldn't be locked: only the current committing
@@ -310,6 +311,11 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
J_ASSERT_BH(bh_in, buffer_jbddirty(bh_in));
new_bh = alloc_buffer_head(GFP_NOFS|__GFP_NOFAIL);
+ /* keep subsequent assertions sane */
+ new_bh->b_state = 0;
+ init_buffer(new_bh, NULL, NULL);
+ atomic_set(&new_bh->b_count, 1);
+ new_jh = jbd2_journal_add_journal_head(new_bh); /* This sleeps */
/*
* If a new transaction has already done a buffer copy-out, then
@@ -388,14 +394,6 @@ repeat:
kunmap_atomic(mapped_data, KM_USER0);
}
- /* keep subsequent assertions sane */
- new_bh->b_state = 0;
- init_buffer(new_bh, NULL, NULL);
- atomic_set(&new_bh->b_count, 1);
- jbd_unlock_bh_state(bh_in);
-
- new_jh = jbd2_journal_add_journal_head(new_bh); /* This sleeps */
-
set_bh_page(new_bh, new_page, new_offset);
new_jh->b_transaction = NULL;
new_bh->b_size = jh2bh(jh_in)->b_size;
@@ -412,7 +410,11 @@ repeat:
* copying is moved to the transaction's shadow queue.
*/
JBUFFER_TRACE(jh_in, "file as BJ_Shadow");
- jbd2_journal_file_buffer(jh_in, transaction, BJ_Shadow);
+ spin_lock(&journal->j_list_lock);
+ __jbd2_journal_file_buffer(jh_in, transaction, BJ_Shadow);
+ spin_unlock(&journal->j_list_lock);
+ jbd_unlock_bh_state(bh_in);
+
JBUFFER_TRACE(new_jh, "file as BJ_IO");
jbd2_journal_file_buffer(new_jh, transaction, BJ_IO);
@@ -2410,6 +2412,7 @@ const char *jbd2_dev_to_name(dev_t device)
int i = hash_32(device, CACHE_SIZE_BITS);
char *ret;
struct block_device *bd;
+ static struct devname_cache *new_dev;
rcu_read_lock();
if (devcache[i] && devcache[i]->device == device) {
@@ -2419,20 +2422,20 @@ const char *jbd2_dev_to_name(dev_t device)
}
rcu_read_unlock();
+ new_dev = kmalloc(sizeof(struct devname_cache), GFP_KERNEL);
+ if (!new_dev)
+ return "NODEV-ALLOCFAILURE"; /* Something non-NULL */
spin_lock(&devname_cache_lock);
if (devcache[i]) {
if (devcache[i]->device == device) {
+ kfree(new_dev);
ret = devcache[i]->devname;
spin_unlock(&devname_cache_lock);
return ret;
}
call_rcu(&devcache[i]->rcu, free_devcache);
}
- devcache[i] = kmalloc(sizeof(struct devname_cache), GFP_KERNEL);
- if (!devcache[i]) {
- spin_unlock(&devname_cache_lock);
- return "NODEV-ALLOCFAILURE"; /* Something non-NULL */
- }
+ devcache[i] = new_dev;
devcache[i]->device = device;
bd = bdget(device);
if (bd) {
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 494501e..6213ac7 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -499,34 +499,15 @@ void jbd2_journal_unlock_updates (journal_t *journal)
wake_up(&journal->j_wait_transaction_locked);
}
-/*
- * Report any unexpected dirty buffers which turn up. Normally those
- * indicate an error, but they can occur if the user is running (say)
- * tune2fs to modify the live filesystem, so we need the option of
- * continuing as gracefully as possible. #
- *
- * The caller should already hold the journal lock and
- * j_list_lock spinlock: most callers will need those anyway
- * in order to probe the buffer's journaling state safely.
- */
-static void jbd_unexpected_dirty_buffer(struct journal_head *jh)
+static void warn_dirty_buffer(struct buffer_head *bh)
{
- int jlist;
-
- /* If this buffer is one which might reasonably be dirty
- * --- ie. data, or not part of this journal --- then
- * we're OK to leave it alone, but otherwise we need to
- * move the dirty bit to the journal's own internal
- * JBDDirty bit. */
- jlist = jh->b_jlist;
+ char b[BDEVNAME_SIZE];
- if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
- jlist == BJ_Shadow || jlist == BJ_Forget) {
- struct buffer_head *bh = jh2bh(jh);
-
- if (test_clear_buffer_dirty(bh))
- set_buffer_jbddirty(bh);
- }
+ printk(KERN_WARNING
+ "JBD: Spotted dirty metadata buffer (dev = %s, blocknr = %llu). "
+ "There's a risk of filesystem corruption in case of system "
+ "crash.\n",
+ bdevname(bh->b_bdev, b), (unsigned long long)bh->b_blocknr);
}
/*
@@ -593,14 +574,16 @@ repeat:
if (jh->b_next_transaction)
J_ASSERT_JH(jh, jh->b_next_transaction ==
transaction);
+ warn_dirty_buffer(bh);
}
/*
* In any case we need to clean the dirty flag and we must
* do it under the buffer lock to be sure we don't race
* with running write-out.
*/
- JBUFFER_TRACE(jh, "Unexpected dirty buffer");
- jbd_unexpected_dirty_buffer(jh);
+ JBUFFER_TRACE(jh, "Journalling dirty buffer");
+ clear_buffer_dirty(bh);
+ set_buffer_jbddirty(bh);
}
unlock_buffer(bh);
@@ -843,6 +826,15 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
J_ASSERT_JH(jh, buffer_locked(jh2bh(jh)));
if (jh->b_transaction == NULL) {
+ /*
+ * Previous jbd2_journal_forget() could have left the buffer
+ * with jbddirty bit set because it was being committed. When
+ * the commit finished, we've filed the buffer for
+ * checkpointing and marked it dirty. Now we are reallocating
+ * the buffer so the transaction freeing it must have
+ * committed and so it's safe to clear the dirty bit.
+ */
+ clear_buffer_dirty(jh2bh(jh));
jh->b_transaction = transaction;
/* first access by this transaction */
@@ -1644,8 +1636,13 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
if (jh->b_cp_transaction) {
JBUFFER_TRACE(jh, "on running+cp transaction");
+ /*
+ * We don't want to write the buffer anymore, clear the
+ * bit so that we don't confuse checks in
+ * __journal_file_buffer
+ */
+ clear_buffer_dirty(bh);
__jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
- clear_buffer_jbddirty(bh);
may_free = 0;
} else {
JBUFFER_TRACE(jh, "on running transaction");
@@ -1896,12 +1893,17 @@ void __jbd2_journal_file_buffer(struct journal_head *jh,
if (jh->b_transaction && jh->b_jlist == jlist)
return;
- /* The following list of buffer states needs to be consistent
- * with __jbd_unexpected_dirty_buffer()'s handling of dirty
- * state. */
-
if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
jlist == BJ_Shadow || jlist == BJ_Forget) {
+ /*
+ * For metadata buffers, we track dirty bit in buffer_jbddirty
+ * instead of buffer_dirty. We should not see a dirty bit set
+ * here because we clear it in do_get_write_access but e.g.
+ * tune2fs can modify the sb and set the dirty bit at any time
+ * so we try to gracefully handle that.
+ */
+ if (buffer_dirty(bh))
+ warn_dirty_buffer(bh);
if (test_clear_buffer_dirty(bh) ||
test_clear_buffer_jbddirty(bh))
was_dirty = 1;
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
index a024474..b47679b 100644
--- a/fs/jffs2/erase.c
+++ b/fs/jffs2/erase.c
@@ -270,19 +270,21 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c,
D2({
int i=0;
struct jffs2_raw_node_ref *this;
- printk(KERN_DEBUG "After remove_node_refs_from_ino_list: \n" KERN_DEBUG);
+ printk(KERN_DEBUG "After remove_node_refs_from_ino_list: \n");
this = ic->nodes;
+ printk(KERN_DEBUG);
while(this) {
- printk( "0x%08x(%d)->", ref_offset(this), ref_flags(this));
+ printk(KERN_CONT "0x%08x(%d)->",
+ ref_offset(this), ref_flags(this));
if (++i == 5) {
- printk("\n" KERN_DEBUG);
+ printk(KERN_DEBUG);
i=0;
}
this = this->next_in_ino;
}
- printk("\n");
+ printk(KERN_CONT "\n");
});
switch (ic->class) {
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index 5edc2bf..23c9475 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -99,7 +99,7 @@ static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
kunmap(pg);
D2(printk(KERN_DEBUG "readpage finished\n"));
- return 0;
+ return ret;
}
int jffs2_do_readpage_unlock(struct inode *inode, struct page *pg)
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
index 7515e73..696686c 100644
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -130,9 +130,9 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
if (jffs2_sum_active()) {
s = kzalloc(sizeof(struct jffs2_summary), GFP_KERNEL);
if (!s) {
- kfree(flashbuf);
JFFS2_WARNING("Can't allocate memory for summary\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out;
}
}
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index 07a22ca..0035c02 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/smp_lock.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/fs.h>
diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
index 91fa3ad..a29c7c3 100644
--- a/fs/jfs/acl.c
+++ b/fs/jfs/acl.c
@@ -67,10 +67,8 @@ static struct posix_acl *jfs_get_acl(struct inode *inode, int type)
acl = posix_acl_from_xattr(value, size);
}
kfree(value);
- if (!IS_ERR(acl)) {
+ if (!IS_ERR(acl))
set_cached_acl(inode, type, acl);
- posix_acl_release(acl);
- }
return acl;
}
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index f2fdcbc..4336adb 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -7,6 +7,7 @@
*/
#include <linux/module.h>
+#include <linux/smp_lock.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/fs.h>
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index 99d737b..7cb076a 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -87,18 +87,6 @@ static unsigned int nlm_hash_address(const struct sockaddr *sap)
return hash & (NLM_HOST_NRHASH - 1);
}
-static void nlm_clear_port(struct sockaddr *sap)
-{
- switch (sap->sa_family) {
- case AF_INET:
- ((struct sockaddr_in *)sap)->sin_port = 0;
- break;
- case AF_INET6:
- ((struct sockaddr_in6 *)sap)->sin6_port = 0;
- break;
- }
-}
-
/*
* Common host lookup routine for server & client
*/
@@ -177,7 +165,7 @@ static struct nlm_host *nlm_lookup_host(struct nlm_lookup_host_info *ni)
host->h_addrbuf = nsm->sm_addrbuf;
memcpy(nlm_addr(host), ni->sap, ni->salen);
host->h_addrlen = ni->salen;
- nlm_clear_port(nlm_addr(host));
+ rpc_set_port(nlm_addr(host), 0);
memcpy(nlm_srcaddr(host), ni->src_sap, ni->src_len);
host->h_version = ni->version;
host->h_proto = ni->protocol;
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
index 7fce1b5..30c9331 100644
--- a/fs/lockd/mon.c
+++ b/fs/lockd/mon.c
@@ -61,43 +61,6 @@ static inline struct sockaddr *nsm_addr(const struct nsm_handle *nsm)
return (struct sockaddr *)&nsm->sm_addr;
}
-static void nsm_display_ipv4_address(const struct sockaddr *sap, char *buf,
- const size_t len)
-{
- const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
- snprintf(buf, len, "%pI4", &sin->sin_addr.s_addr);
-}
-
-static void nsm_display_ipv6_address(const struct sockaddr *sap, char *buf,
- const size_t len)
-{
- const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
-
- if (ipv6_addr_v4mapped(&sin6->sin6_addr))
- snprintf(buf, len, "%pI4", &sin6->sin6_addr.s6_addr32[3]);
- else if (sin6->sin6_scope_id != 0)
- snprintf(buf, len, "%pI6%%%u", &sin6->sin6_addr,
- sin6->sin6_scope_id);
- else
- snprintf(buf, len, "%pI6", &sin6->sin6_addr);
-}
-
-static void nsm_display_address(const struct sockaddr *sap,
- char *buf, const size_t len)
-{
- switch (sap->sa_family) {
- case AF_INET:
- nsm_display_ipv4_address(sap, buf, len);
- break;
- case AF_INET6:
- nsm_display_ipv6_address(sap, buf, len);
- break;
- default:
- snprintf(buf, len, "unsupported address family");
- break;
- }
-}
-
static struct rpc_clnt *nsm_create(void)
{
struct sockaddr_in sin = {
@@ -307,8 +270,11 @@ static struct nsm_handle *nsm_create_handle(const struct sockaddr *sap,
memcpy(nsm_addr(new), sap, salen);
new->sm_addrlen = salen;
nsm_init_private(new);
- nsm_display_address((const struct sockaddr *)&new->sm_addr,
- new->sm_addrbuf, sizeof(new->sm_addrbuf));
+
+ if (rpc_ntop(nsm_addr(new), new->sm_addrbuf,
+ sizeof(new->sm_addrbuf)) == 0)
+ (void)snprintf(new->sm_addrbuf, sizeof(new->sm_addrbuf),
+ "unsupported address family");
memcpy(new->sm_name, hostname, hostname_len);
new->sm_name[hostname_len] = '\0';
diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c
index 17250373..bd173a6 100644
--- a/fs/lockd/svc4proc.c
+++ b/fs/lockd/svc4proc.c
@@ -10,6 +10,7 @@
#include <linux/types.h>
#include <linux/time.h>
#include <linux/slab.h>
+#include <linux/smp_lock.h>
#include <linux/in.h>
#include <linux/sunrpc/svc.h>
#include <linux/sunrpc/clnt.h>
diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
index 3688e55..e1d28dd 100644
--- a/fs/lockd/svcproc.c
+++ b/fs/lockd/svcproc.c
@@ -10,6 +10,7 @@
#include <linux/types.h>
#include <linux/time.h>
#include <linux/slab.h>
+#include <linux/smp_lock.h>
#include <linux/in.h>
#include <linux/sunrpc/svc.h>
#include <linux/sunrpc/clnt.h>
diff --git a/fs/namei.c b/fs/namei.c
index 5b961eb..f3c5b27 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1761,6 +1761,10 @@ do_last:
goto exit;
}
filp = nameidata_to_filp(&nd, open_flag);
+ if (IS_ERR(filp))
+ ima_counts_put(&nd.path,
+ acc_mode & (MAY_READ | MAY_WRITE |
+ MAY_EXEC));
mnt_drop_write(nd.path.mnt);
if (nd.root.mnt)
path_put(&nd.root);
@@ -1817,6 +1821,9 @@ ok:
goto exit;
}
filp = nameidata_to_filp(&nd, open_flag);
+ if (IS_ERR(filp))
+ ima_counts_put(&nd.path,
+ acc_mode & (MAY_READ | MAY_WRITE | MAY_EXEC));
/*
* It is now safe to drop the mnt write
* because the filp has had a write taken
diff --git a/fs/namespace.c b/fs/namespace.c
index 3dc283f..7230787 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -22,6 +22,7 @@
#include <linux/seq_file.h>
#include <linux/mnt_namespace.h>
#include <linux/namei.h>
+#include <linux/nsproxy.h>
#include <linux/security.h>
#include <linux/mount.h>
#include <linux/ramfs.h>
@@ -315,7 +316,8 @@ EXPORT_SYMBOL_GPL(mnt_clone_write);
*/
int mnt_want_write_file(struct file *file)
{
- if (!(file->f_mode & FMODE_WRITE))
+ struct inode *inode = file->f_dentry->d_inode;
+ if (!(file->f_mode & FMODE_WRITE) || special_file(inode->i_mode))
return mnt_want_write(file->f_path.mnt);
else
return mnt_clone_write(file->f_path.mnt);
diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
index 8451598..da7fda6 100644
--- a/fs/nfs/Makefile
+++ b/fs/nfs/Makefile
@@ -6,7 +6,8 @@ obj-$(CONFIG_NFS_FS) += nfs.o
nfs-y := client.o dir.o file.o getroot.o inode.o super.o nfs2xdr.o \
direct.o pagelist.o proc.o read.o symlink.o unlink.o \
- write.o namespace.o mount_clnt.o
+ write.o namespace.o mount_clnt.o \
+ dns_resolve.o cache_lib.o
nfs-$(CONFIG_ROOT_NFS) += nfsroot.o
nfs-$(CONFIG_NFS_V3) += nfs3proc.o nfs3xdr.o
nfs-$(CONFIG_NFS_V3_ACL) += nfs3acl.o
diff --git a/fs/nfs/cache_lib.c b/fs/nfs/cache_lib.c
new file mode 100644
index 0000000..b4ffd01
--- /dev/null
+++ b/fs/nfs/cache_lib.c
@@ -0,0 +1,140 @@
+/*
+ * linux/fs/nfs/cache_lib.c
+ *
+ * Helper routines for the NFS client caches
+ *
+ * Copyright (c) 2009 Trond Myklebust <Trond.Myklebust@netapp.com>
+ */
+#include <linux/kmod.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <linux/sunrpc/cache.h>
+#include <linux/sunrpc/rpc_pipe_fs.h>
+
+#include "cache_lib.h"
+
+#define NFS_CACHE_UPCALL_PATHLEN 256
+#define NFS_CACHE_UPCALL_TIMEOUT 15
+
+static char nfs_cache_getent_prog[NFS_CACHE_UPCALL_PATHLEN] =
+ "/sbin/nfs_cache_getent";
+static unsigned long nfs_cache_getent_timeout = NFS_CACHE_UPCALL_TIMEOUT;
+
+module_param_string(cache_getent, nfs_cache_getent_prog,
+ sizeof(nfs_cache_getent_prog), 0600);
+MODULE_PARM_DESC(cache_getent, "Path to the client cache upcall program");
+module_param_named(cache_getent_timeout, nfs_cache_getent_timeout, ulong, 0600);
+MODULE_PARM_DESC(cache_getent_timeout, "Timeout (in seconds) after which "
+ "the cache upcall is assumed to have failed");
+
+int nfs_cache_upcall(struct cache_detail *cd, char *entry_name)
+{
+ static char *envp[] = { "HOME=/",
+ "TERM=linux",
+ "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
+ NULL
+ };
+ char *argv[] = {
+ nfs_cache_getent_prog,
+ cd->name,
+ entry_name,
+ NULL
+ };
+ int ret = -EACCES;
+
+ if (nfs_cache_getent_prog[0] == '\0')
+ goto out;
+ ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
+ /*
+ * Disable the upcall mechanism if we're getting an ENOENT or
+ * EACCES error. The admin can re-enable it on the fly by using
+ * sysfs to set the 'cache_getent' parameter once the problem
+ * has been fixed.
+ */
+ if (ret == -ENOENT || ret == -EACCES)
+ nfs_cache_getent_prog[0] = '\0';
+out:
+ return ret > 0 ? 0 : ret;
+}
+
+/*
+ * Deferred request handling
+ */
+void nfs_cache_defer_req_put(struct nfs_cache_defer_req *dreq)
+{
+ if (atomic_dec_and_test(&dreq->count))
+ kfree(dreq);
+}
+
+static void nfs_dns_cache_revisit(struct cache_deferred_req *d, int toomany)
+{
+ struct nfs_cache_defer_req *dreq;
+
+ dreq = container_of(d, struct nfs_cache_defer_req, deferred_req);
+
+ complete_all(&dreq->completion);
+ nfs_cache_defer_req_put(dreq);
+}
+
+static struct cache_deferred_req *nfs_dns_cache_defer(struct cache_req *req)
+{
+ struct nfs_cache_defer_req *dreq;
+
+ dreq = container_of(req, struct nfs_cache_defer_req, req);
+ dreq->deferred_req.revisit = nfs_dns_cache_revisit;
+ atomic_inc(&dreq->count);
+
+ return &dreq->deferred_req;
+}
+
+struct nfs_cache_defer_req *nfs_cache_defer_req_alloc(void)
+{
+ struct nfs_cache_defer_req *dreq;
+
+ dreq = kzalloc(sizeof(*dreq), GFP_KERNEL);
+ if (dreq) {
+ init_completion(&dreq->completion);
+ atomic_set(&dreq->count, 1);
+ dreq->req.defer = nfs_dns_cache_defer;
+ }
+ return dreq;
+}
+
+int nfs_cache_wait_for_upcall(struct nfs_cache_defer_req *dreq)
+{
+ if (wait_for_completion_timeout(&dreq->completion,
+ nfs_cache_getent_timeout * HZ) == 0)
+ return -ETIMEDOUT;
+ return 0;
+}
+
+int nfs_cache_register(struct cache_detail *cd)
+{
+ struct nameidata nd;
+ struct vfsmount *mnt;
+ int ret;
+
+ mnt = rpc_get_mount();
+ if (IS_ERR(mnt))
+ return PTR_ERR(mnt);
+ ret = vfs_path_lookup(mnt->mnt_root, mnt, "/cache", 0, &nd);
+ if (ret)
+ goto err;
+ ret = sunrpc_cache_register_pipefs(nd.path.dentry,
+ cd->name, 0600, cd);
+ path_put(&nd.path);
+ if (!ret)
+ return ret;
+err:
+ rpc_put_mount();
+ return ret;
+}
+
+void nfs_cache_unregister(struct cache_detail *cd)
+{
+ sunrpc_cache_unregister_pipefs(cd);
+ rpc_put_mount();
+}
+
diff --git a/fs/nfs/cache_lib.h b/fs/nfs/cache_lib.h
new file mode 100644
index 0000000..76f856e
--- /dev/null
+++ b/fs/nfs/cache_lib.h
@@ -0,0 +1,27 @@
+/*
+ * Helper routines for the NFS client caches
+ *
+ * Copyright (c) 2009 Trond Myklebust <Trond.Myklebust@netapp.com>
+ */
+
+#include <linux/completion.h>
+#include <linux/sunrpc/cache.h>
+#include <asm/atomic.h>
+
+/*
+ * Deferred request handling
+ */
+struct nfs_cache_defer_req {
+ struct cache_req req;
+ struct cache_deferred_req deferred_req;
+ struct completion completion;
+ atomic_t count;
+};
+
+extern int nfs_cache_upcall(struct cache_detail *cd, char *entry_name);
+extern struct nfs_cache_defer_req *nfs_cache_defer_req_alloc(void);
+extern void nfs_cache_defer_req_put(struct nfs_cache_defer_req *dreq);
+extern int nfs_cache_wait_for_upcall(struct nfs_cache_defer_req *dreq);
+
+extern int nfs_cache_register(struct cache_detail *cd);
+extern void nfs_cache_unregister(struct cache_detail *cd);
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 7f604c7..293fa05 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -43,21 +43,29 @@ static struct svc_program nfs4_callback_program;
unsigned int nfs_callback_set_tcpport;
unsigned short nfs_callback_tcpport;
unsigned short nfs_callback_tcpport6;
-static const int nfs_set_port_min = 0;
-static const int nfs_set_port_max = 65535;
+#define NFS_CALLBACK_MAXPORTNR (65535U)
-static int param_set_port(const char *val, struct kernel_param *kp)
+static int param_set_portnr(const char *val, struct kernel_param *kp)
{
- char *endp;
- int num = simple_strtol(val, &endp, 0);
- if (endp == val || *endp || num < nfs_set_port_min || num > nfs_set_port_max)
+ unsigned long num;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+ ret = strict_strtoul(val, 0, &num);
+ if (ret == -EINVAL || num > NFS_CALLBACK_MAXPORTNR)
return -EINVAL;
- *((int *)kp->arg) = num;
+ *((unsigned int *)kp->arg) = num;
return 0;
}
-module_param_call(callback_tcpport, param_set_port, param_get_int,
- &nfs_callback_set_tcpport, 0644);
+static int param_get_portnr(char *buffer, struct kernel_param *kp)
+{
+ return param_get_uint(buffer, kp);
+}
+#define param_check_portnr(name, p) __param_check(name, p, unsigned int);
+
+module_param_named(callback_tcpport, nfs_callback_set_tcpport, portnr, 0644);
/*
* This is the NFSv4 callback kernel thread.
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index c2d0616..d36925f 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -809,6 +809,9 @@ static int nfs_init_server(struct nfs_server *server,
/* Initialise the client representation from the mount data */
server->flags = data->flags;
server->options = data->options;
+ server->caps |= NFS_CAP_HARDLINKS|NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
+ NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|NFS_CAP_OWNER_GROUP|
+ NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME;
if (data->rsize)
server->rsize = nfs_block_size(data->rsize, NULL);
@@ -1074,10 +1077,6 @@ struct nfs_server *nfs_create_server(const struct nfs_parsed_mount_data *data,
(unsigned long long) server->fsid.major,
(unsigned long long) server->fsid.minor);
- BUG_ON(!server->nfs_client);
- BUG_ON(!server->nfs_client->rpc_ops);
- BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops);
-
spin_lock(&nfs_client_lock);
list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks);
list_add_tail(&server->master_link, &nfs_volume_list);
@@ -1242,20 +1241,6 @@ error:
return error;
}
-/*
- * Initialize a session.
- * Note: save the mount rsize and wsize for create_server negotiation.
- */
-static void nfs4_init_session(struct nfs_client *clp,
- unsigned int wsize, unsigned int rsize)
-{
-#if defined(CONFIG_NFS_V4_1)
- if (nfs4_has_session(clp)) {
- clp->cl_session->fc_attrs.max_rqst_sz = wsize;
- clp->cl_session->fc_attrs.max_resp_sz = rsize;
- }
-#endif /* CONFIG_NFS_V4_1 */
-}
/*
* Session has been established, and the client marked ready.
@@ -1288,7 +1273,7 @@ static int nfs4_init_server(struct nfs_server *server,
/* Initialise the client representation from the mount data */
server->flags = data->flags;
- server->caps |= NFS_CAP_ATOMIC_OPEN;
+ server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR;
server->options = data->options;
/* Get a client record */
@@ -1350,7 +1335,9 @@ struct nfs_server *nfs4_create_server(const struct nfs_parsed_mount_data *data,
BUG_ON(!server->nfs_client->rpc_ops);
BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops);
- nfs4_init_session(server->nfs_client, server->wsize, server->rsize);
+ error = nfs4_init_session(server);
+ if (error < 0)
+ goto error;
/* Probe the root fh to retrieve its FSID */
error = nfs4_path_walk(server, mntfh, data->nfs_server.export_path);
@@ -1371,10 +1358,6 @@ struct nfs_server *nfs4_create_server(const struct nfs_parsed_mount_data *data,
if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN)
server->namelen = NFS4_MAXNAMLEN;
- BUG_ON(!server->nfs_client);
- BUG_ON(!server->nfs_client->rpc_ops);
- BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops);
-
spin_lock(&nfs_client_lock);
list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks);
list_add_tail(&server->master_link, &nfs_volume_list);
@@ -1412,7 +1395,7 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
/* Initialise the client representation from the parent server */
nfs_server_copy_userdata(server, parent_server);
- server->caps |= NFS_CAP_ATOMIC_OPEN;
+ server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR;
/* Get a client representation.
* Note: NFSv4 always uses TCP, */
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index af05b91..6dd48a4 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -10,6 +10,7 @@
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/sched.h>
+#include <linux/smp_lock.h>
#include <linux/spinlock.h>
#include <linux/nfs4.h>
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 89f98e9..32062c3 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -29,7 +29,6 @@
#include <linux/nfs_fs.h>
#include <linux/nfs_mount.h>
#include <linux/pagemap.h>
-#include <linux/smp_lock.h>
#include <linux/pagevec.h>
#include <linux/namei.h>
#include <linux/mount.h>
@@ -1026,12 +1025,12 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry
res = NULL;
goto out;
/* This turned out not to be a regular file */
- case -EISDIR:
case -ENOTDIR:
goto no_open;
case -ELOOP:
if (!(nd->intent.open.flags & O_NOFOLLOW))
goto no_open;
+ /* case -EISDIR: */
/* case -EINVAL: */
default:
goto out;
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 489fc01..6c32100 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -255,7 +255,7 @@ static void nfs_direct_read_release(void *calldata)
if (put_dreq(dreq))
nfs_direct_complete(dreq);
- nfs_readdata_release(calldata);
+ nfs_readdata_free(data);
}
static const struct rpc_call_ops nfs_read_direct_ops = {
@@ -314,14 +314,14 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
data->npages, 1, 0, data->pagevec, NULL);
up_read(&current->mm->mmap_sem);
if (result < 0) {
- nfs_readdata_release(data);
+ nfs_readdata_free(data);
break;
}
if ((unsigned)result < data->npages) {
bytes = result * PAGE_SIZE;
if (bytes <= pgbase) {
nfs_direct_release_pages(data->pagevec, result);
- nfs_readdata_release(data);
+ nfs_readdata_free(data);
break;
}
bytes -= pgbase;
@@ -334,7 +334,7 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
data->inode = inode;
data->cred = msg.rpc_cred;
data->args.fh = NFS_FH(inode);
- data->args.context = get_nfs_open_context(ctx);
+ data->args.context = ctx;
data->args.offset = pos;
data->args.pgbase = pgbase;
data->args.pages = data->pagevec;
@@ -441,7 +441,7 @@ static void nfs_direct_free_writedata(struct nfs_direct_req *dreq)
struct nfs_write_data *data = list_entry(dreq->rewrite_list.next, struct nfs_write_data, pages);
list_del(&data->pages);
nfs_direct_release_pages(data->pagevec, data->npages);
- nfs_writedata_release(data);
+ nfs_writedata_free(data);
}
}
@@ -534,7 +534,7 @@ static void nfs_direct_commit_release(void *calldata)
dprintk("NFS: %5u commit returned %d\n", data->task.tk_pid, status);
nfs_direct_write_complete(dreq, data->inode);
- nfs_commitdata_release(calldata);
+ nfs_commit_free(data);
}
static const struct rpc_call_ops nfs_commit_direct_ops = {
@@ -570,7 +570,7 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
data->args.fh = NFS_FH(data->inode);
data->args.offset = 0;
data->args.count = 0;
- data->args.context = get_nfs_open_context(dreq->ctx);
+ data->args.context = dreq->ctx;
data->res.count = 0;
data->res.fattr = &data->fattr;
data->res.verf = &data->verf;
@@ -734,14 +734,14 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
data->npages, 0, 0, data->pagevec, NULL);
up_read(&current->mm->mmap_sem);
if (result < 0) {
- nfs_writedata_release(data);
+ nfs_writedata_free(data);
break;
}
if ((unsigned)result < data->npages) {
bytes = result * PAGE_SIZE;
if (bytes <= pgbase) {
nfs_direct_release_pages(data->pagevec, result);
- nfs_writedata_release(data);
+ nfs_writedata_free(data);
break;
}
bytes -= pgbase;
@@ -756,7 +756,7 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
data->inode = inode;
data->cred = msg.rpc_cred;
data->args.fh = NFS_FH(inode);
- data->args.context = get_nfs_open_context(ctx);
+ data->args.context = ctx;
data->args.offset = pos;
data->args.pgbase = pgbase;
data->args.pages = data->pagevec;
@@ -934,9 +934,6 @@ out:
* back into its cache. We let the server do generic write
* parameter checking and report problems.
*
- * We also avoid an unnecessary invocation of generic_osync_inode(),
- * as it is fairly meaningless to sync the metadata of an NFS file.
- *
* We eliminate local atime updates, see direct read above.
*
* We avoid unnecessary page cache invalidations for normal cached
diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c
new file mode 100644
index 0000000..f4d54ba
--- /dev/null
+++ b/fs/nfs/dns_resolve.c
@@ -0,0 +1,335 @@
+/*
+ * linux/fs/nfs/dns_resolve.c
+ *
+ * Copyright (c) 2009 Trond Myklebust <Trond.Myklebust@netapp.com>
+ *
+ * Resolves DNS hostnames into valid ip addresses
+ */
+
+#include <linux/hash.h>
+#include <linux/string.h>
+#include <linux/kmod.h>
+#include <linux/module.h>
+#include <linux/socket.h>
+#include <linux/seq_file.h>
+#include <linux/inet.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/cache.h>
+#include <linux/sunrpc/svcauth.h>
+
+#include "dns_resolve.h"
+#include "cache_lib.h"
+
+#define NFS_DNS_HASHBITS 4
+#define NFS_DNS_HASHTBL_SIZE (1 << NFS_DNS_HASHBITS)
+
+static struct cache_head *nfs_dns_table[NFS_DNS_HASHTBL_SIZE];
+
+struct nfs_dns_ent {
+ struct cache_head h;
+
+ char *hostname;
+ size_t namelen;
+
+ struct sockaddr_storage addr;
+ size_t addrlen;
+};
+
+
+static void nfs_dns_ent_init(struct cache_head *cnew,
+ struct cache_head *ckey)
+{
+ struct nfs_dns_ent *new;
+ struct nfs_dns_ent *key;
+
+ new = container_of(cnew, struct nfs_dns_ent, h);
+ key = container_of(ckey, struct nfs_dns_ent, h);
+
+ kfree(new->hostname);
+ new->hostname = kstrndup(key->hostname, key->namelen, GFP_KERNEL);
+ if (new->hostname) {
+ new->namelen = key->namelen;
+ memcpy(&new->addr, &key->addr, key->addrlen);
+ new->addrlen = key->addrlen;
+ } else {
+ new->namelen = 0;
+ new->addrlen = 0;
+ }
+}
+
+static void nfs_dns_ent_put(struct kref *ref)
+{
+ struct nfs_dns_ent *item;
+
+ item = container_of(ref, struct nfs_dns_ent, h.ref);
+ kfree(item->hostname);
+ kfree(item);
+}
+
+static struct cache_head *nfs_dns_ent_alloc(void)
+{
+ struct nfs_dns_ent *item = kmalloc(sizeof(*item), GFP_KERNEL);
+
+ if (item != NULL) {
+ item->hostname = NULL;
+ item->namelen = 0;
+ item->addrlen = 0;
+ return &item->h;
+ }
+ return NULL;
+};
+
+static unsigned int nfs_dns_hash(const struct nfs_dns_ent *key)
+{
+ return hash_str(key->hostname, NFS_DNS_HASHBITS);
+}
+
+static void nfs_dns_request(struct cache_detail *cd,
+ struct cache_head *ch,
+ char **bpp, int *blen)
+{
+ struct nfs_dns_ent *key = container_of(ch, struct nfs_dns_ent, h);
+
+ qword_add(bpp, blen, key->hostname);
+ (*bpp)[-1] = '\n';
+}
+
+static int nfs_dns_upcall(struct cache_detail *cd,
+ struct cache_head *ch)
+{
+ struct nfs_dns_ent *key = container_of(ch, struct nfs_dns_ent, h);
+ int ret;
+
+ ret = nfs_cache_upcall(cd, key->hostname);
+ if (ret)
+ ret = sunrpc_cache_pipe_upcall(cd, ch, nfs_dns_request);
+ return ret;
+}
+
+static int nfs_dns_match(struct cache_head *ca,
+ struct cache_head *cb)
+{
+ struct nfs_dns_ent *a;
+ struct nfs_dns_ent *b;
+
+ a = container_of(ca, struct nfs_dns_ent, h);
+ b = container_of(cb, struct nfs_dns_ent, h);
+
+ if (a->namelen == 0 || a->namelen != b->namelen)
+ return 0;
+ return memcmp(a->hostname, b->hostname, a->namelen) == 0;
+}
+
+static int nfs_dns_show(struct seq_file *m, struct cache_detail *cd,
+ struct cache_head *h)
+{
+ struct nfs_dns_ent *item;
+ long ttl;
+
+ if (h == NULL) {
+ seq_puts(m, "# ip address hostname ttl\n");
+ return 0;
+ }
+ item = container_of(h, struct nfs_dns_ent, h);
+ ttl = (long)item->h.expiry_time - (long)get_seconds();
+ if (ttl < 0)
+ ttl = 0;
+
+ if (!test_bit(CACHE_NEGATIVE, &h->flags)) {
+ char buf[INET6_ADDRSTRLEN+IPV6_SCOPE_ID_LEN+1];
+
+ rpc_ntop((struct sockaddr *)&item->addr, buf, sizeof(buf));
+ seq_printf(m, "%15s ", buf);
+ } else
+ seq_puts(m, "<none> ");
+ seq_printf(m, "%15s %ld\n", item->hostname, ttl);
+ return 0;
+}
+
+struct nfs_dns_ent *nfs_dns_lookup(struct cache_detail *cd,
+ struct nfs_dns_ent *key)
+{
+ struct cache_head *ch;
+
+ ch = sunrpc_cache_lookup(cd,
+ &key->h,
+ nfs_dns_hash(key));
+ if (!ch)
+ return NULL;
+ return container_of(ch, struct nfs_dns_ent, h);
+}
+
+struct nfs_dns_ent *nfs_dns_update(struct cache_detail *cd,
+ struct nfs_dns_ent *new,
+ struct nfs_dns_ent *key)
+{
+ struct cache_head *ch;
+
+ ch = sunrpc_cache_update(cd,
+ &new->h, &key->h,
+ nfs_dns_hash(key));
+ if (!ch)
+ return NULL;
+ return container_of(ch, struct nfs_dns_ent, h);
+}
+
+static int nfs_dns_parse(struct cache_detail *cd, char *buf, int buflen)
+{
+ char buf1[NFS_DNS_HOSTNAME_MAXLEN+1];
+ struct nfs_dns_ent key, *item;
+ unsigned long ttl;
+ ssize_t len;
+ int ret = -EINVAL;
+
+ if (buf[buflen-1] != '\n')
+ goto out;
+ buf[buflen-1] = '\0';
+
+ len = qword_get(&buf, buf1, sizeof(buf1));
+ if (len <= 0)
+ goto out;
+ key.addrlen = rpc_pton(buf1, len,
+ (struct sockaddr *)&key.addr,
+ sizeof(key.addr));
+
+ len = qword_get(&buf, buf1, sizeof(buf1));
+ if (len <= 0)
+ goto out;
+
+ key.hostname = buf1;
+ key.namelen = len;
+ memset(&key.h, 0, sizeof(key.h));
+
+ ttl = get_expiry(&buf);
+ if (ttl == 0)
+ goto out;
+ key.h.expiry_time = ttl + get_seconds();
+
+ ret = -ENOMEM;
+ item = nfs_dns_lookup(cd, &key);
+ if (item == NULL)
+ goto out;
+
+ if (key.addrlen == 0)
+ set_bit(CACHE_NEGATIVE, &key.h.flags);
+
+ item = nfs_dns_update(cd, &key, item);
+ if (item == NULL)
+ goto out;
+
+ ret = 0;
+ cache_put(&item->h, cd);
+out:
+ return ret;
+}
+
+static struct cache_detail nfs_dns_resolve = {
+ .owner = THIS_MODULE,
+ .hash_size = NFS_DNS_HASHTBL_SIZE,
+ .hash_table = nfs_dns_table,
+ .name = "dns_resolve",
+ .cache_put = nfs_dns_ent_put,
+ .cache_upcall = nfs_dns_upcall,
+ .cache_parse = nfs_dns_parse,
+ .cache_show = nfs_dns_show,
+ .match = nfs_dns_match,
+ .init = nfs_dns_ent_init,
+ .update = nfs_dns_ent_init,
+ .alloc = nfs_dns_ent_alloc,
+};
+
+static int do_cache_lookup(struct cache_detail *cd,
+ struct nfs_dns_ent *key,
+ struct nfs_dns_ent **item,
+ struct nfs_cache_defer_req *dreq)
+{
+ int ret = -ENOMEM;
+
+ *item = nfs_dns_lookup(cd, key);
+ if (*item) {
+ ret = cache_check(cd, &(*item)->h, &dreq->req);
+ if (ret)
+ *item = NULL;
+ }
+ return ret;
+}
+
+static int do_cache_lookup_nowait(struct cache_detail *cd,
+ struct nfs_dns_ent *key,
+ struct nfs_dns_ent **item)
+{
+ int ret = -ENOMEM;
+
+ *item = nfs_dns_lookup(cd, key);
+ if (!*item)
+ goto out_err;
+ ret = -ETIMEDOUT;
+ if (!test_bit(CACHE_VALID, &(*item)->h.flags)
+ || (*item)->h.expiry_time < get_seconds()
+ || cd->flush_time > (*item)->h.last_refresh)
+ goto out_put;
+ ret = -ENOENT;
+ if (test_bit(CACHE_NEGATIVE, &(*item)->h.flags))
+ goto out_put;
+ return 0;
+out_put:
+ cache_put(&(*item)->h, cd);
+out_err:
+ *item = NULL;
+ return ret;
+}
+
+static int do_cache_lookup_wait(struct cache_detail *cd,
+ struct nfs_dns_ent *key,
+ struct nfs_dns_ent **item)
+{
+ struct nfs_cache_defer_req *dreq;
+ int ret = -ENOMEM;
+
+ dreq = nfs_cache_defer_req_alloc();
+ if (!dreq)
+ goto out;
+ ret = do_cache_lookup(cd, key, item, dreq);
+ if (ret == -EAGAIN) {
+ ret = nfs_cache_wait_for_upcall(dreq);
+ if (!ret)
+ ret = do_cache_lookup_nowait(cd, key, item);
+ }
+ nfs_cache_defer_req_put(dreq);
+out:
+ return ret;
+}
+
+ssize_t nfs_dns_resolve_name(char *name, size_t namelen,
+ struct sockaddr *sa, size_t salen)
+{
+ struct nfs_dns_ent key = {
+ .hostname = name,
+ .namelen = namelen,
+ };
+ struct nfs_dns_ent *item = NULL;
+ ssize_t ret;
+
+ ret = do_cache_lookup_wait(&nfs_dns_resolve, &key, &item);
+ if (ret == 0) {
+ if (salen >= item->addrlen) {
+ memcpy(sa, &item->addr, item->addrlen);
+ ret = item->addrlen;
+ } else
+ ret = -EOVERFLOW;
+ cache_put(&item->h, &nfs_dns_resolve);
+ } else if (ret == -ENOENT)
+ ret = -ESRCH;
+ return ret;
+}
+
+int nfs_dns_resolver_init(void)
+{
+ return nfs_cache_register(&nfs_dns_resolve);
+}
+
+void nfs_dns_resolver_destroy(void)
+{
+ nfs_cache_unregister(&nfs_dns_resolve);
+}
+
diff --git a/fs/nfs/dns_resolve.h b/fs/nfs/dns_resolve.h
new file mode 100644
index 0000000..a3f0938
--- /dev/null
+++ b/fs/nfs/dns_resolve.h
@@ -0,0 +1,14 @@
+/*
+ * Resolve DNS hostnames into valid ip addresses
+ */
+#ifndef __LINUX_FS_NFS_DNS_RESOLVE_H
+#define __LINUX_FS_NFS_DNS_RESOLVE_H
+
+#define NFS_DNS_HOSTNAME_MAXLEN (128)
+
+extern int nfs_dns_resolver_init(void);
+extern void nfs_dns_resolver_destroy(void);
+extern ssize_t nfs_dns_resolve_name(char *name, size_t namelen,
+ struct sockaddr *sa, size_t salen);
+
+#endif
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 0055b81..5021b75 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -26,7 +26,6 @@
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
-#include <linux/smp_lock.h>
#include <linux/aio.h>
#include <asm/uaccess.h>
@@ -329,6 +328,42 @@ nfs_file_fsync(struct file *file, struct dentry *dentry, int datasync)
}
/*
+ * Decide whether a read/modify/write cycle may be more efficient
+ * then a modify/write/read cycle when writing to a page in the
+ * page cache.
+ *
+ * The modify/write/read cycle may occur if a page is read before
+ * being completely filled by the writer. In this situation, the
+ * page must be completely written to stable storage on the server
+ * before it can be refilled by reading in the page from the server.
+ * This can lead to expensive, small, FILE_SYNC mode writes being
+ * done.
+ *
+ * It may be more efficient to read the page first if the file is
+ * open for reading in addition to writing, the page is not marked
+ * as Uptodate, it is not dirty or waiting to be committed,
+ * indicating that it was previously allocated and then modified,
+ * that there were valid bytes of data in that range of the file,
+ * and that the new data won't completely replace the old data in
+ * that range of the file.
+ */
+static int nfs_want_read_modify_write(struct file *file, struct page *page,
+ loff_t pos, unsigned len)
+{
+ unsigned int pglen = nfs_page_length(page);
+ unsigned int offset = pos & (PAGE_CACHE_SIZE - 1);
+ unsigned int end = offset + len;
+
+ if ((file->f_mode & FMODE_READ) && /* open for read? */
+ !PageUptodate(page) && /* Uptodate? */
+ !PagePrivate(page) && /* i/o request already? */
+ pglen && /* valid bytes of file? */
+ (end < pglen || offset)) /* replace all valid bytes? */
+ return 1;
+ return 0;
+}
+
+/*
* This does the "real" work of the write. We must allocate and lock the
* page to be sent back to the generic routine, which then copies the
* data from user space.
@@ -341,15 +376,16 @@ static int nfs_write_begin(struct file *file, struct address_space *mapping,
struct page **pagep, void **fsdata)
{
int ret;
- pgoff_t index;
+ pgoff_t index = pos >> PAGE_CACHE_SHIFT;
struct page *page;
- index = pos >> PAGE_CACHE_SHIFT;
+ int once_thru = 0;
dfprintk(PAGECACHE, "NFS: write_begin(%s/%s(%ld), %u@%lld)\n",
file->f_path.dentry->d_parent->d_name.name,
file->f_path.dentry->d_name.name,
mapping->host->i_ino, len, (long long) pos);
+start:
/*
* Prevent starvation issues if someone is doing a consistency
* sync-to-disk
@@ -368,6 +404,13 @@ static int nfs_write_begin(struct file *file, struct address_space *mapping,
if (ret) {
unlock_page(page);
page_cache_release(page);
+ } else if (!once_thru &&
+ nfs_want_read_modify_write(file, page, pos, len)) {
+ once_thru = 1;
+ ret = nfs_readpage(file, page);
+ page_cache_release(page);
+ if (!ret)
+ goto start;
}
return ret;
}
@@ -480,6 +523,7 @@ const struct address_space_operations nfs_file_aops = {
.invalidatepage = nfs_invalidate_page,
.releasepage = nfs_release_page,
.direct_IO = nfs_direct_IO,
+ .migratepage = nfs_migrate_page,
.launder_page = nfs_launder_page,
};
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
index 46177cb..b35d2a6 100644
--- a/fs/nfs/getroot.c
+++ b/fs/nfs/getroot.c
@@ -30,7 +30,6 @@
#include <linux/nfs_idmap.h>
#include <linux/vfs.h>
#include <linux/namei.h>
-#include <linux/mnt_namespace.h>
#include <linux/security.h>
#include <asm/system.h>
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
index 86147b0..21a84d45 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/idmap.c
@@ -101,7 +101,7 @@ static void idmap_pipe_destroy_msg(struct rpc_pipe_msg *);
static unsigned int fnvhash32(const void *, size_t);
-static struct rpc_pipe_ops idmap_upcall_ops = {
+static const struct rpc_pipe_ops idmap_upcall_ops = {
.upcall = idmap_pipe_upcall,
.downcall = idmap_pipe_downcall,
.destroy_msg = idmap_pipe_destroy_msg,
@@ -119,8 +119,8 @@ nfs_idmap_new(struct nfs_client *clp)
if (idmap == NULL)
return -ENOMEM;
- idmap->idmap_dentry = rpc_mkpipe(clp->cl_rpcclient->cl_dentry, "idmap",
- idmap, &idmap_upcall_ops, 0);
+ idmap->idmap_dentry = rpc_mkpipe(clp->cl_rpcclient->cl_path.dentry,
+ "idmap", idmap, &idmap_upcall_ops, 0);
if (IS_ERR(idmap->idmap_dentry)) {
error = PTR_ERR(idmap->idmap_dentry);
kfree(idmap);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 64f87194d..060022b 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -30,7 +30,6 @@
#include <linux/nfs_mount.h>
#include <linux/nfs4_mount.h>
#include <linux/lockd/bind.h>
-#include <linux/smp_lock.h>
#include <linux/seq_file.h>
#include <linux/mount.h>
#include <linux/nfs_idmap.h>
@@ -47,6 +46,7 @@
#include "iostat.h"
#include "internal.h"
#include "fscache.h"
+#include "dns_resolve.h"
#define NFSDBG_FACILITY NFSDBG_VFS
@@ -287,6 +287,11 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
/* We can't support update_atime(), since the server will reset it */
inode->i_flags |= S_NOATIME|S_NOCMTIME;
inode->i_mode = fattr->mode;
+ if ((fattr->valid & NFS_ATTR_FATTR_MODE) == 0
+ && nfs_server_capable(inode, NFS_CAP_MODE))
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_ACCESS
+ | NFS_INO_INVALID_ACL;
/* Why so? Because we want revalidate for devices/FIFOs, and
* that's precisely what we have in nfs_file_inode_operations.
*/
@@ -331,20 +336,46 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
nfsi->attr_gencount = fattr->gencount;
if (fattr->valid & NFS_ATTR_FATTR_ATIME)
inode->i_atime = fattr->atime;
+ else if (nfs_server_capable(inode, NFS_CAP_ATIME))
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
if (fattr->valid & NFS_ATTR_FATTR_MTIME)
inode->i_mtime = fattr->mtime;
+ else if (nfs_server_capable(inode, NFS_CAP_MTIME))
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_DATA;
if (fattr->valid & NFS_ATTR_FATTR_CTIME)
inode->i_ctime = fattr->ctime;
+ else if (nfs_server_capable(inode, NFS_CAP_CTIME))
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_ACCESS
+ | NFS_INO_INVALID_ACL;
if (fattr->valid & NFS_ATTR_FATTR_CHANGE)
nfsi->change_attr = fattr->change_attr;
+ else if (nfs_server_capable(inode, NFS_CAP_CHANGE_ATTR))
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_DATA;
if (fattr->valid & NFS_ATTR_FATTR_SIZE)
inode->i_size = nfs_size_to_loff_t(fattr->size);
+ else
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_DATA
+ | NFS_INO_REVAL_PAGECACHE;
if (fattr->valid & NFS_ATTR_FATTR_NLINK)
inode->i_nlink = fattr->nlink;
+ else if (nfs_server_capable(inode, NFS_CAP_NLINK))
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
if (fattr->valid & NFS_ATTR_FATTR_OWNER)
inode->i_uid = fattr->uid;
+ else if (nfs_server_capable(inode, NFS_CAP_OWNER))
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_ACCESS
+ | NFS_INO_INVALID_ACL;
if (fattr->valid & NFS_ATTR_FATTR_GROUP)
inode->i_gid = fattr->gid;
+ else if (nfs_server_capable(inode, NFS_CAP_OWNER_GROUP))
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_ACCESS
+ | NFS_INO_INVALID_ACL;
if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
inode->i_blocks = fattr->du.nfs2.blocks;
if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) {
@@ -1146,6 +1177,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
loff_t cur_isize, new_isize;
unsigned long invalid = 0;
unsigned long now = jiffies;
+ unsigned long save_cache_validity;
dfprintk(VFS, "NFS: %s(%s/%ld ct=%d info=0x%x)\n",
__func__, inode->i_sb->s_id, inode->i_ino,
@@ -1172,10 +1204,11 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
*/
nfsi->read_cache_jiffies = fattr->time_start;
- if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) || (fattr->valid & (NFS_ATTR_FATTR_MTIME|NFS_ATTR_FATTR_CTIME)))
- nfsi->cache_validity &= ~(NFS_INO_INVALID_ATTR
- | NFS_INO_INVALID_ATIME
- | NFS_INO_REVAL_PAGECACHE);
+ save_cache_validity = nfsi->cache_validity;
+ nfsi->cache_validity &= ~(NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_ATIME
+ | NFS_INO_REVAL_FORCED
+ | NFS_INO_REVAL_PAGECACHE);
/* Do atomic weak cache consistency updates */
nfs_wcc_update_inode(inode, fattr);
@@ -1190,7 +1223,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
nfs_force_lookup_revalidate(inode);
nfsi->change_attr = fattr->change_attr;
}
- }
+ } else if (server->caps & NFS_CAP_CHANGE_ATTR)
+ invalid |= save_cache_validity;
if (fattr->valid & NFS_ATTR_FATTR_MTIME) {
/* NFSv2/v3: Check if the mtime agrees */
@@ -1202,7 +1236,12 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
nfs_force_lookup_revalidate(inode);
memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
}
- }
+ } else if (server->caps & NFS_CAP_MTIME)
+ invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_DATA
+ | NFS_INO_REVAL_PAGECACHE
+ | NFS_INO_REVAL_FORCED);
+
if (fattr->valid & NFS_ATTR_FATTR_CTIME) {
/* If ctime has changed we should definitely clear access+acl caches */
if (!timespec_equal(&inode->i_ctime, &fattr->ctime)) {
@@ -1216,7 +1255,11 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
}
memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
}
- }
+ } else if (server->caps & NFS_CAP_CTIME)
+ invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_ACCESS
+ | NFS_INO_INVALID_ACL
+ | NFS_INO_REVAL_FORCED);
/* Check if our cached file size is stale */
if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
@@ -1232,30 +1275,50 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
dprintk("NFS: isize change on server for file %s/%ld\n",
inode->i_sb->s_id, inode->i_ino);
}
- }
+ } else
+ invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
+ | NFS_INO_REVAL_PAGECACHE
+ | NFS_INO_REVAL_FORCED);
if (fattr->valid & NFS_ATTR_FATTR_ATIME)
memcpy(&inode->i_atime, &fattr->atime, sizeof(inode->i_atime));
+ else if (server->caps & NFS_CAP_ATIME)
+ invalid |= save_cache_validity & (NFS_INO_INVALID_ATIME
+ | NFS_INO_REVAL_FORCED);
if (fattr->valid & NFS_ATTR_FATTR_MODE) {
if ((inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)) {
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
inode->i_mode = fattr->mode;
}
- }
+ } else if (server->caps & NFS_CAP_MODE)
+ invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_ACCESS
+ | NFS_INO_INVALID_ACL
+ | NFS_INO_REVAL_FORCED);
+
if (fattr->valid & NFS_ATTR_FATTR_OWNER) {
if (inode->i_uid != fattr->uid) {
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
inode->i_uid = fattr->uid;
}
- }
+ } else if (server->caps & NFS_CAP_OWNER)
+ invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_ACCESS
+ | NFS_INO_INVALID_ACL
+ | NFS_INO_REVAL_FORCED);
+
if (fattr->valid & NFS_ATTR_FATTR_GROUP) {
if (inode->i_gid != fattr->gid) {
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
inode->i_gid = fattr->gid;
}
- }
+ } else if (server->caps & NFS_CAP_OWNER_GROUP)
+ invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_ACCESS
+ | NFS_INO_INVALID_ACL
+ | NFS_INO_REVAL_FORCED);
if (fattr->valid & NFS_ATTR_FATTR_NLINK) {
if (inode->i_nlink != fattr->nlink) {
@@ -1264,7 +1327,9 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
invalid |= NFS_INO_INVALID_DATA;
inode->i_nlink = fattr->nlink;
}
- }
+ } else if (server->caps & NFS_CAP_NLINK)
+ invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
+ | NFS_INO_REVAL_FORCED);
if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) {
/*
@@ -1294,9 +1359,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
|| S_ISLNK(inode->i_mode)))
invalid &= ~NFS_INO_INVALID_DATA;
if (!nfs_have_delegation(inode, FMODE_READ) ||
- (nfsi->cache_validity & NFS_INO_REVAL_FORCED))
+ (save_cache_validity & NFS_INO_REVAL_FORCED))
nfsi->cache_validity |= invalid;
- nfsi->cache_validity &= ~NFS_INO_REVAL_FORCED;
return 0;
out_changed:
@@ -1443,6 +1507,10 @@ static int __init init_nfs_fs(void)
{
int err;
+ err = nfs_dns_resolver_init();
+ if (err < 0)
+ goto out8;
+
err = nfs_fscache_register();
if (err < 0)
goto out7;
@@ -1501,6 +1569,8 @@ out5:
out6:
nfs_fscache_unregister();
out7:
+ nfs_dns_resolver_destroy();
+out8:
return err;
}
@@ -1512,6 +1582,7 @@ static void __exit exit_nfs_fs(void)
nfs_destroy_inodecache();
nfs_destroy_nfspagecache();
nfs_fscache_unregister();
+ nfs_dns_resolver_destroy();
#ifdef CONFIG_PROC_FS
rpc_proc_unregister("nfs");
#endif
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 7dd90a6..2e48567 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -102,6 +102,7 @@ struct nfs_mount_request {
};
extern int nfs_mount(struct nfs_mount_request *info);
+extern void nfs_umount(const struct nfs_mount_request *info);
/* client.c */
extern struct rpc_program nfs_program;
@@ -213,7 +214,6 @@ void nfs_zap_acl_cache(struct inode *inode);
extern int nfs_wait_bit_killable(void *word);
/* super.c */
-void nfs_parse_ip_address(char *, size_t, struct sockaddr *, size_t *);
extern struct file_system_type nfs_xdev_fs_type;
#ifdef CONFIG_NFS_V4
extern struct file_system_type nfs4_xdev_fs_type;
@@ -248,6 +248,12 @@ extern void nfs_read_prepare(struct rpc_task *task, void *calldata);
/* write.c */
extern void nfs_write_prepare(struct rpc_task *task, void *calldata);
+#ifdef CONFIG_MIGRATION
+extern int nfs_migrate_page(struct address_space *,
+ struct page *, struct page *);
+#else
+#define nfs_migrate_page NULL
+#endif
/* nfs4proc.c */
extern int _nfs4_call_sync(struct nfs_server *server,
@@ -368,24 +374,3 @@ unsigned int nfs_page_array_len(unsigned int base, size_t len)
return ((unsigned long)len + (unsigned long)base +
PAGE_SIZE - 1) >> PAGE_SHIFT;
}
-
-#define IPV6_SCOPE_DELIMITER '%'
-
-/*
- * Set the port number in an address. Be agnostic about the address
- * family.
- */
-static inline void nfs_set_port(struct sockaddr *sap, unsigned short port)
-{
- struct sockaddr_in *ap = (struct sockaddr_in *)sap;
- struct sockaddr_in6 *ap6 = (struct sockaddr_in6 *)sap;
-
- switch (sap->sa_family) {
- case AF_INET:
- ap->sin_port = htons(port);
- break;
- case AF_INET6:
- ap6->sin6_port = htons(port);
- break;
- }
-}
diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c
index 38ef9ea..0adefc4 100644
--- a/fs/nfs/mount_clnt.c
+++ b/fs/nfs/mount_clnt.c
@@ -209,6 +209,71 @@ out_mnt_err:
goto out;
}
+/**
+ * nfs_umount - Notify a server that we have unmounted this export
+ * @info: pointer to umount request arguments
+ *
+ * MOUNTPROC_UMNT is advisory, so we set a short timeout, and always
+ * use UDP.
+ */
+void nfs_umount(const struct nfs_mount_request *info)
+{
+ static const struct rpc_timeout nfs_umnt_timeout = {
+ .to_initval = 1 * HZ,
+ .to_maxval = 3 * HZ,
+ .to_retries = 2,
+ };
+ struct rpc_create_args args = {
+ .protocol = IPPROTO_UDP,
+ .address = info->sap,
+ .addrsize = info->salen,
+ .timeout = &nfs_umnt_timeout,
+ .servername = info->hostname,
+ .program = &mnt_program,
+ .version = info->version,
+ .authflavor = RPC_AUTH_UNIX,
+ .flags = RPC_CLNT_CREATE_NOPING,
+ };
+ struct mountres result;
+ struct rpc_message msg = {
+ .rpc_argp = info->dirpath,
+ .rpc_resp = &result,
+ };
+ struct rpc_clnt *clnt;
+ int status;
+
+ if (info->noresvport)
+ args.flags |= RPC_CLNT_CREATE_NONPRIVPORT;
+
+ clnt = rpc_create(&args);
+ if (unlikely(IS_ERR(clnt)))
+ goto out_clnt_err;
+
+ dprintk("NFS: sending UMNT request for %s:%s\n",
+ (info->hostname ? info->hostname : "server"), info->dirpath);
+
+ if (info->version == NFS_MNT3_VERSION)
+ msg.rpc_proc = &clnt->cl_procinfo[MOUNTPROC3_UMNT];
+ else
+ msg.rpc_proc = &clnt->cl_procinfo[MOUNTPROC_UMNT];
+
+ status = rpc_call_sync(clnt, &msg, 0);
+ rpc_shutdown_client(clnt);
+
+ if (unlikely(status < 0))
+ goto out_call_err;
+
+ return;
+
+out_clnt_err:
+ dprintk("NFS: failed to create UMNT RPC client, status=%ld\n",
+ PTR_ERR(clnt));
+ return;
+
+out_call_err:
+ dprintk("NFS: UMNT request failed, status=%d\n", status);
+}
+
/*
* XDR encode/decode functions for MOUNT
*/
@@ -258,7 +323,7 @@ static int decode_status(struct xdr_stream *xdr, struct mountres *res)
return -EIO;
status = ntohl(*p);
- for (i = 0; i <= ARRAY_SIZE(mnt_errtbl); i++) {
+ for (i = 0; i < ARRAY_SIZE(mnt_errtbl); i++) {
if (mnt_errtbl[i].status == status) {
res->errno = mnt_errtbl[i].errno;
return 0;
@@ -309,7 +374,7 @@ static int decode_fhs_status(struct xdr_stream *xdr, struct mountres *res)
return -EIO;
status = ntohl(*p);
- for (i = 0; i <= ARRAY_SIZE(mnt3_errtbl); i++) {
+ for (i = 0; i < ARRAY_SIZE(mnt3_errtbl); i++) {
if (mnt3_errtbl[i].status == status) {
res->errno = mnt3_errtbl[i].errno;
return 0;
@@ -407,6 +472,13 @@ static struct rpc_procinfo mnt_procedures[] = {
.p_statidx = MOUNTPROC_MNT,
.p_name = "MOUNT",
},
+ [MOUNTPROC_UMNT] = {
+ .p_proc = MOUNTPROC_UMNT,
+ .p_encode = (kxdrproc_t)mnt_enc_dirpath,
+ .p_arglen = MNT_enc_dirpath_sz,
+ .p_statidx = MOUNTPROC_UMNT,
+ .p_name = "UMOUNT",
+ },
};
static struct rpc_procinfo mnt3_procedures[] = {
@@ -419,6 +491,13 @@ static struct rpc_procinfo mnt3_procedures[] = {
.p_statidx = MOUNTPROC3_MNT,
.p_name = "MOUNT",
},
+ [MOUNTPROC3_UMNT] = {
+ .p_proc = MOUNTPROC3_UMNT,
+ .p_encode = (kxdrproc_t)mnt_enc_dirpath,
+ .p_arglen = MNT_enc_dirpath_sz,
+ .p_statidx = MOUNTPROC3_UMNT,
+ .p_name = "UMOUNT",
+ },
};
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 61bc3a3..6ea07a3 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -220,6 +220,7 @@ extern void nfs4_destroy_session(struct nfs4_session *session);
extern struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp);
extern int nfs4_proc_create_session(struct nfs_client *, int reset);
extern int nfs4_proc_destroy_session(struct nfs4_session *);
+extern int nfs4_init_session(struct nfs_server *server);
#else /* CONFIG_NFS_v4_1 */
static inline int nfs4_setup_sequence(struct nfs_client *clp,
struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
@@ -227,6 +228,11 @@ static inline int nfs4_setup_sequence(struct nfs_client *clp,
{
return 0;
}
+
+static inline int nfs4_init_session(struct nfs_server *server)
+{
+ return 0;
+}
#endif /* CONFIG_NFS_V4_1 */
extern struct nfs4_state_maintenance_ops *nfs4_state_renewal_ops[];
diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
index 2a2a0a7..2636c26 100644
--- a/fs/nfs/nfs4namespace.c
+++ b/fs/nfs/nfs4namespace.c
@@ -17,6 +17,7 @@
#include <linux/inet.h>
#include "internal.h"
#include "nfs4_fs.h"
+#include "dns_resolve.h"
#define NFSDBG_FACILITY NFSDBG_VFS
@@ -95,6 +96,20 @@ static int nfs4_validate_fspath(const struct vfsmount *mnt_parent,
return 0;
}
+static size_t nfs_parse_server_name(char *string, size_t len,
+ struct sockaddr *sa, size_t salen)
+{
+ ssize_t ret;
+
+ ret = rpc_pton(string, len, sa, salen);
+ if (ret == 0) {
+ ret = nfs_dns_resolve_name(string, len, sa, salen);
+ if (ret < 0)
+ ret = 0;
+ }
+ return ret;
+}
+
static struct vfsmount *try_location(struct nfs_clone_mount *mountdata,
char *page, char *page2,
const struct nfs4_fs_location *location)
@@ -121,11 +136,12 @@ static struct vfsmount *try_location(struct nfs_clone_mount *mountdata,
if (memchr(buf->data, IPV6_SCOPE_DELIMITER, buf->len))
continue;
- nfs_parse_ip_address(buf->data, buf->len,
- mountdata->addr, &mountdata->addrlen);
- if (mountdata->addr->sa_family == AF_UNSPEC)
+ mountdata->addrlen = nfs_parse_server_name(buf->data,
+ buf->len,
+ mountdata->addr, mountdata->addrlen);
+ if (mountdata->addrlen == 0)
continue;
- nfs_set_port(mountdata->addr, NFS_PORT);
+ rpc_set_port(mountdata->addr, NFS_PORT);
memcpy(page2, buf->data, buf->len);
page2[buf->len] = '\0';
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 92ce435..be6544a 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -45,7 +45,6 @@
#include <linux/nfs4.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_page.h>
-#include <linux/smp_lock.h>
#include <linux/namei.h>
#include <linux/mount.h>
#include <linux/module.h>
@@ -62,6 +61,8 @@
#define NFS4_POLL_RETRY_MIN (HZ/10)
#define NFS4_POLL_RETRY_MAX (15*HZ)
+#define NFS4_MAX_LOOP_ON_RECOVER (10)
+
struct nfs4_opendata;
static int _nfs4_proc_open(struct nfs4_opendata *data);
static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
@@ -427,17 +428,19 @@ out:
static int nfs4_recover_session(struct nfs4_session *session)
{
struct nfs_client *clp = session->clp;
+ unsigned int loop;
int ret;
- for (;;) {
+ for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
ret = nfs4_wait_clnt_recover(clp);
if (ret != 0)
- return ret;
+ break;
if (!test_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state))
break;
nfs4_schedule_state_manager(clp);
+ ret = -EIO;
}
- return 0;
+ return ret;
}
static int nfs41_setup_sequence(struct nfs4_session *session,
@@ -1445,18 +1448,20 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
static int nfs4_recover_expired_lease(struct nfs_server *server)
{
struct nfs_client *clp = server->nfs_client;
+ unsigned int loop;
int ret;
- for (;;) {
+ for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
ret = nfs4_wait_clnt_recover(clp);
if (ret != 0)
- return ret;
+ break;
if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) &&
!test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state))
break;
nfs4_schedule_state_recovery(clp);
+ ret = -EIO;
}
- return 0;
+ return ret;
}
/*
@@ -1998,12 +2003,34 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
status = nfs4_call_sync(server, &msg, &args, &res, 0);
if (status == 0) {
memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
+ server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS|
+ NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
+ NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|
+ NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME|
+ NFS_CAP_CTIME|NFS_CAP_MTIME);
if (res.attr_bitmask[0] & FATTR4_WORD0_ACL)
server->caps |= NFS_CAP_ACLS;
if (res.has_links != 0)
server->caps |= NFS_CAP_HARDLINKS;
if (res.has_symlinks != 0)
server->caps |= NFS_CAP_SYMLINKS;
+ if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID)
+ server->caps |= NFS_CAP_FILEID;
+ if (res.attr_bitmask[1] & FATTR4_WORD1_MODE)
+ server->caps |= NFS_CAP_MODE;
+ if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)
+ server->caps |= NFS_CAP_NLINK;
+ if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER)
+ server->caps |= NFS_CAP_OWNER;
+ if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)
+ server->caps |= NFS_CAP_OWNER_GROUP;
+ if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)
+ server->caps |= NFS_CAP_ATIME;
+ if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)
+ server->caps |= NFS_CAP_CTIME;
+ if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)
+ server->caps |= NFS_CAP_MTIME;
+
memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
@@ -2041,15 +2068,9 @@ static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
.rpc_argp = &args,
.rpc_resp = &res,
};
- int status;
nfs_fattr_init(info->fattr);
- status = nfs4_recover_expired_lease(server);
- if (!status)
- status = nfs4_check_client_ready(server->nfs_client);
- if (!status)
- status = nfs4_call_sync(server, &msg, &args, &res, 0);
- return status;
+ return nfs4_call_sync(server, &msg, &args, &res, 0);
}
static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
@@ -4100,15 +4121,23 @@ nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
if (request->fl_start < 0 || request->fl_end < 0)
return -EINVAL;
- if (IS_GETLK(cmd))
- return nfs4_proc_getlk(state, F_GETLK, request);
+ if (IS_GETLK(cmd)) {
+ if (state != NULL)
+ return nfs4_proc_getlk(state, F_GETLK, request);
+ return 0;
+ }
if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
return -EINVAL;
- if (request->fl_type == F_UNLCK)
- return nfs4_proc_unlck(state, cmd, request);
+ if (request->fl_type == F_UNLCK) {
+ if (state != NULL)
+ return nfs4_proc_unlck(state, cmd, request);
+ return 0;
+ }
+ if (state == NULL)
+ return -ENOLCK;
do {
status = nfs4_proc_setlk(state, cmd, request);
if ((status != -EAGAIN) || IS_SETLK(cmd))
@@ -4794,6 +4823,22 @@ int nfs4_proc_destroy_session(struct nfs4_session *session)
return status;
}
+int nfs4_init_session(struct nfs_server *server)
+{
+ struct nfs_client *clp = server->nfs_client;
+ int ret;
+
+ if (!nfs4_has_session(clp))
+ return 0;
+
+ clp->cl_session->fc_attrs.max_rqst_sz = server->wsize;
+ clp->cl_session->fc_attrs.max_resp_sz = server->rsize;
+ ret = nfs4_recover_expired_lease(server);
+ if (!ret)
+ ret = nfs4_check_client_ready(clp);
+ return ret;
+}
+
/*
* Renew the cl_session lease.
*/
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index b73c5a7..65ca8c1 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -553,6 +553,7 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
INIT_LIST_HEAD(&lsp->ls_sequence.list);
lsp->ls_seqid.sequence = &lsp->ls_sequence;
atomic_set(&lsp->ls_count, 1);
+ lsp->ls_state = state;
lsp->ls_owner = fl_owner;
spin_lock(&clp->cl_lock);
nfs_alloc_unique_id(&clp->cl_lockowner_id, &lsp->ls_id, 1, 64);
@@ -587,7 +588,6 @@ static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_
if (lsp != NULL)
break;
if (new != NULL) {
- new->ls_state = state;
list_add(&new->ls_locks, &state->lock_states);
set_bit(LK_STATE_IN_USE, &state->flags);
lsp = new;
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 617273e..cfc30d3 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -702,29 +702,12 @@ struct compound_hdr {
u32 minorversion;
};
-/*
- * START OF "GENERIC" ENCODE ROUTINES.
- * These may look a little ugly since they are imported from a "generic"
- * set of XDR encode/decode routines which are intended to be shared by
- * all of our NFSv4 implementations (OpenBSD, MacOS X...).
- *
- * If the pain of reading these is too great, it should be a straightforward
- * task to translate them into Linux-specific versions which are more
- * consistent with the style used in NFSv2/v3...
- */
-#define WRITE32(n) *p++ = htonl(n)
-#define WRITE64(n) do { \
- *p++ = htonl((uint32_t)((n) >> 32)); \
- *p++ = htonl((uint32_t)(n)); \
-} while (0)
-#define WRITEMEM(ptr,nbytes) do { \
- p = xdr_encode_opaque_fixed(p, ptr, nbytes); \
-} while (0)
-
-#define RESERVE_SPACE(nbytes) do { \
- p = xdr_reserve_space(xdr, nbytes); \
- BUG_ON(!p); \
-} while (0)
+static __be32 *reserve_space(struct xdr_stream *xdr, size_t nbytes)
+{
+ __be32 *p = xdr_reserve_space(xdr, nbytes);
+ BUG_ON(!p);
+ return p;
+}
static void encode_string(struct xdr_stream *xdr, unsigned int len, const char *str)
{
@@ -749,12 +732,11 @@ static void encode_compound_hdr(struct xdr_stream *xdr,
dprintk("encode_compound: tag=%.*s\n", (int)hdr->taglen, hdr->tag);
BUG_ON(hdr->taglen > NFS4_MAXTAGLEN);
- RESERVE_SPACE(12+(XDR_QUADLEN(hdr->taglen)<<2));
- WRITE32(hdr->taglen);
- WRITEMEM(hdr->tag, hdr->taglen);
- WRITE32(hdr->minorversion);
+ p = reserve_space(xdr, 4 + hdr->taglen + 8);
+ p = xdr_encode_opaque(p, hdr->tag, hdr->taglen);
+ *p++ = cpu_to_be32(hdr->minorversion);
hdr->nops_p = p;
- WRITE32(hdr->nops);
+ *p = cpu_to_be32(hdr->nops);
}
static void encode_nops(struct compound_hdr *hdr)
@@ -829,55 +811,53 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap, const
len += 16;
else if (iap->ia_valid & ATTR_MTIME)
len += 4;
- RESERVE_SPACE(len);
+ p = reserve_space(xdr, len);
/*
* We write the bitmap length now, but leave the bitmap and the attribute
* buffer length to be backfilled at the end of this routine.
*/
- WRITE32(2);
+ *p++ = cpu_to_be32(2);
q = p;
p += 3;
if (iap->ia_valid & ATTR_SIZE) {
bmval0 |= FATTR4_WORD0_SIZE;
- WRITE64(iap->ia_size);
+ p = xdr_encode_hyper(p, iap->ia_size);
}
if (iap->ia_valid & ATTR_MODE) {
bmval1 |= FATTR4_WORD1_MODE;
- WRITE32(iap->ia_mode & S_IALLUGO);
+ *p++ = cpu_to_be32(iap->ia_mode & S_IALLUGO);
}
if (iap->ia_valid & ATTR_UID) {
bmval1 |= FATTR4_WORD1_OWNER;
- WRITE32(owner_namelen);
- WRITEMEM(owner_name, owner_namelen);
+ p = xdr_encode_opaque(p, owner_name, owner_namelen);
}
if (iap->ia_valid & ATTR_GID) {
bmval1 |= FATTR4_WORD1_OWNER_GROUP;
- WRITE32(owner_grouplen);
- WRITEMEM(owner_group, owner_grouplen);
+ p = xdr_encode_opaque(p, owner_group, owner_grouplen);
}
if (iap->ia_valid & ATTR_ATIME_SET) {
bmval1 |= FATTR4_WORD1_TIME_ACCESS_SET;
- WRITE32(NFS4_SET_TO_CLIENT_TIME);
- WRITE32(0);
- WRITE32(iap->ia_mtime.tv_sec);
- WRITE32(iap->ia_mtime.tv_nsec);
+ *p++ = cpu_to_be32(NFS4_SET_TO_CLIENT_TIME);
+ *p++ = cpu_to_be32(0);
+ *p++ = cpu_to_be32(iap->ia_mtime.tv_sec);
+ *p++ = cpu_to_be32(iap->ia_mtime.tv_nsec);
}
else if (iap->ia_valid & ATTR_ATIME) {
bmval1 |= FATTR4_WORD1_TIME_ACCESS_SET;
- WRITE32(NFS4_SET_TO_SERVER_TIME);
+ *p++ = cpu_to_be32(NFS4_SET_TO_SERVER_TIME);
}
if (iap->ia_valid & ATTR_MTIME_SET) {
bmval1 |= FATTR4_WORD1_TIME_MODIFY_SET;
- WRITE32(NFS4_SET_TO_CLIENT_TIME);
- WRITE32(0);
- WRITE32(iap->ia_mtime.tv_sec);
- WRITE32(iap->ia_mtime.tv_nsec);
+ *p++ = cpu_to_be32(NFS4_SET_TO_CLIENT_TIME);
+ *p++ = cpu_to_be32(0);
+ *p++ = cpu_to_be32(iap->ia_mtime.tv_sec);
+ *p++ = cpu_to_be32(iap->ia_mtime.tv_nsec);
}
else if (iap->ia_valid & ATTR_MTIME) {
bmval1 |= FATTR4_WORD1_TIME_MODIFY_SET;
- WRITE32(NFS4_SET_TO_SERVER_TIME);
+ *p++ = cpu_to_be32(NFS4_SET_TO_SERVER_TIME);
}
/*
@@ -891,7 +871,7 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap, const
len = (char *)p - (char *)q - 12;
*q++ = htonl(bmval0);
*q++ = htonl(bmval1);
- *q++ = htonl(len);
+ *q = htonl(len);
/* out: */
}
@@ -900,9 +880,9 @@ static void encode_access(struct xdr_stream *xdr, u32 access, struct compound_hd
{
__be32 *p;
- RESERVE_SPACE(8);
- WRITE32(OP_ACCESS);
- WRITE32(access);
+ p = reserve_space(xdr, 8);
+ *p++ = cpu_to_be32(OP_ACCESS);
+ *p = cpu_to_be32(access);
hdr->nops++;
hdr->replen += decode_access_maxsz;
}
@@ -911,10 +891,10 @@ static void encode_close(struct xdr_stream *xdr, const struct nfs_closeargs *arg
{
__be32 *p;
- RESERVE_SPACE(8+NFS4_STATEID_SIZE);
- WRITE32(OP_CLOSE);
- WRITE32(arg->seqid->sequence->counter);
- WRITEMEM(arg->stateid->data, NFS4_STATEID_SIZE);
+ p = reserve_space(xdr, 8+NFS4_STATEID_SIZE);
+ *p++ = cpu_to_be32(OP_CLOSE);
+ *p++ = cpu_to_be32(arg->seqid->sequence->counter);
+ xdr_encode_opaque_fixed(p, arg->stateid->data, NFS4_STATEID_SIZE);
hdr->nops++;
hdr->replen += decode_close_maxsz;
}
@@ -923,10 +903,10 @@ static void encode_commit(struct xdr_stream *xdr, const struct nfs_writeargs *ar
{
__be32 *p;
- RESERVE_SPACE(16);
- WRITE32(OP_COMMIT);
- WRITE64(args->offset);
- WRITE32(args->count);
+ p = reserve_space(xdr, 16);
+ *p++ = cpu_to_be32(OP_COMMIT);
+ p = xdr_encode_hyper(p, args->offset);
+ *p = cpu_to_be32(args->count);
hdr->nops++;
hdr->replen += decode_commit_maxsz;
}
@@ -935,30 +915,28 @@ static void encode_create(struct xdr_stream *xdr, const struct nfs4_create_arg *
{
__be32 *p;
- RESERVE_SPACE(8);
- WRITE32(OP_CREATE);
- WRITE32(create->ftype);
+ p = reserve_space(xdr, 8);
+ *p++ = cpu_to_be32(OP_CREATE);
+ *p = cpu_to_be32(create->ftype);
switch (create->ftype) {
case NF4LNK:
- RESERVE_SPACE(4);
- WRITE32(create->u.symlink.len);
+ p = reserve_space(xdr, 4);
+ *p = cpu_to_be32(create->u.symlink.len);
xdr_write_pages(xdr, create->u.symlink.pages, 0, create->u.symlink.len);
break;
case NF4BLK: case NF4CHR:
- RESERVE_SPACE(8);
- WRITE32(create->u.device.specdata1);
- WRITE32(create->u.device.specdata2);
+ p = reserve_space(xdr, 8);
+ *p++ = cpu_to_be32(create->u.device.specdata1);
+ *p = cpu_to_be32(create->u.device.specdata2);
break;
default:
break;
}
- RESERVE_SPACE(4 + create->name->len);
- WRITE32(create->name->len);
- WRITEMEM(create->name->name, create->name->len);
+ encode_string(xdr, create->name->len, create->name->name);
hdr->nops++;
hdr->replen += decode_create_maxsz;
@@ -969,10 +947,10 @@ static void encode_getattr_one(struct xdr_stream *xdr, uint32_t bitmap, struct c
{
__be32 *p;
- RESERVE_SPACE(12);
- WRITE32(OP_GETATTR);
- WRITE32(1);
- WRITE32(bitmap);
+ p = reserve_space(xdr, 12);
+ *p++ = cpu_to_be32(OP_GETATTR);
+ *p++ = cpu_to_be32(1);
+ *p = cpu_to_be32(bitmap);
hdr->nops++;
hdr->replen += decode_getattr_maxsz;
}
@@ -981,11 +959,11 @@ static void encode_getattr_two(struct xdr_stream *xdr, uint32_t bm0, uint32_t bm
{
__be32 *p;
- RESERVE_SPACE(16);
- WRITE32(OP_GETATTR);
- WRITE32(2);
- WRITE32(bm0);
- WRITE32(bm1);
+ p = reserve_space(xdr, 16);
+ *p++ = cpu_to_be32(OP_GETATTR);
+ *p++ = cpu_to_be32(2);
+ *p++ = cpu_to_be32(bm0);
+ *p = cpu_to_be32(bm1);
hdr->nops++;
hdr->replen += decode_getattr_maxsz;
}
@@ -1012,8 +990,8 @@ static void encode_getfh(struct xdr_stream *xdr, struct compound_hdr *hdr)
{
__be32 *p;
- RESERVE_SPACE(4);
- WRITE32(OP_GETFH);
+ p = reserve_space(xdr, 4);
+ *p = cpu_to_be32(OP_GETFH);
hdr->nops++;
hdr->replen += decode_getfh_maxsz;
}
@@ -1022,10 +1000,9 @@ static void encode_link(struct xdr_stream *xdr, const struct qstr *name, struct
{
__be32 *p;
- RESERVE_SPACE(8 + name->len);
- WRITE32(OP_LINK);
- WRITE32(name->len);
- WRITEMEM(name->name, name->len);
+ p = reserve_space(xdr, 8 + name->len);
+ *p++ = cpu_to_be32(OP_LINK);
+ xdr_encode_opaque(p, name->name, name->len);
hdr->nops++;
hdr->replen += decode_link_maxsz;
}
@@ -1052,27 +1029,27 @@ static void encode_lock(struct xdr_stream *xdr, const struct nfs_lock_args *args
{
__be32 *p;
- RESERVE_SPACE(32);
- WRITE32(OP_LOCK);
- WRITE32(nfs4_lock_type(args->fl, args->block));
- WRITE32(args->reclaim);
- WRITE64(args->fl->fl_start);
- WRITE64(nfs4_lock_length(args->fl));
- WRITE32(args->new_lock_owner);
+ p = reserve_space(xdr, 32);
+ *p++ = cpu_to_be32(OP_LOCK);
+ *p++ = cpu_to_be32(nfs4_lock_type(args->fl, args->block));
+ *p++ = cpu_to_be32(args->reclaim);
+ p = xdr_encode_hyper(p, args->fl->fl_start);
+ p = xdr_encode_hyper(p, nfs4_lock_length(args->fl));
+ *p = cpu_to_be32(args->new_lock_owner);
if (args->new_lock_owner){
- RESERVE_SPACE(4+NFS4_STATEID_SIZE+32);
- WRITE32(args->open_seqid->sequence->counter);
- WRITEMEM(args->open_stateid->data, NFS4_STATEID_SIZE);
- WRITE32(args->lock_seqid->sequence->counter);
- WRITE64(args->lock_owner.clientid);
- WRITE32(16);
- WRITEMEM("lock id:", 8);
- WRITE64(args->lock_owner.id);
+ p = reserve_space(xdr, 4+NFS4_STATEID_SIZE+32);
+ *p++ = cpu_to_be32(args->open_seqid->sequence->counter);
+ p = xdr_encode_opaque_fixed(p, args->open_stateid->data, NFS4_STATEID_SIZE);
+ *p++ = cpu_to_be32(args->lock_seqid->sequence->counter);
+ p = xdr_encode_hyper(p, args->lock_owner.clientid);
+ *p++ = cpu_to_be32(16);
+ p = xdr_encode_opaque_fixed(p, "lock id:", 8);
+ xdr_encode_hyper(p, args->lock_owner.id);
}
else {
- RESERVE_SPACE(NFS4_STATEID_SIZE+4);
- WRITEMEM(args->lock_stateid->data, NFS4_STATEID_SIZE);
- WRITE32(args->lock_seqid->sequence->counter);
+ p = reserve_space(xdr, NFS4_STATEID_SIZE+4);
+ p = xdr_encode_opaque_fixed(p, args->lock_stateid->data, NFS4_STATEID_SIZE);
+ *p = cpu_to_be32(args->lock_seqid->sequence->counter);
}
hdr->nops++;
hdr->replen += decode_lock_maxsz;
@@ -1082,15 +1059,15 @@ static void encode_lockt(struct xdr_stream *xdr, const struct nfs_lockt_args *ar
{
__be32 *p;
- RESERVE_SPACE(52);
- WRITE32(OP_LOCKT);
- WRITE32(nfs4_lock_type(args->fl, 0));
- WRITE64(args->fl->fl_start);
- WRITE64(nfs4_lock_length(args->fl));
- WRITE64(args->lock_owner.clientid);
- WRITE32(16);
- WRITEMEM("lock id:", 8);
- WRITE64(args->lock_owner.id);
+ p = reserve_space(xdr, 52);
+ *p++ = cpu_to_be32(OP_LOCKT);
+ *p++ = cpu_to_be32(nfs4_lock_type(args->fl, 0));
+ p = xdr_encode_hyper(p, args->fl->fl_start);
+ p = xdr_encode_hyper(p, nfs4_lock_length(args->fl));
+ p = xdr_encode_hyper(p, args->lock_owner.clientid);
+ *p++ = cpu_to_be32(16);
+ p = xdr_encode_opaque_fixed(p, "lock id:", 8);
+ xdr_encode_hyper(p, args->lock_owner.id);
hdr->nops++;
hdr->replen += decode_lockt_maxsz;
}
@@ -1099,13 +1076,13 @@ static void encode_locku(struct xdr_stream *xdr, const struct nfs_locku_args *ar
{
__be32 *p;
- RESERVE_SPACE(12+NFS4_STATEID_SIZE+16);
- WRITE32(OP_LOCKU);
- WRITE32(nfs4_lock_type(args->fl, 0));
- WRITE32(args->seqid->sequence->counter);
- WRITEMEM(args->stateid->data, NFS4_STATEID_SIZE);
- WRITE64(args->fl->fl_start);
- WRITE64(nfs4_lock_length(args->fl));
+ p = reserve_space(xdr, 12+NFS4_STATEID_SIZE+16);
+ *p++ = cpu_to_be32(OP_LOCKU);
+ *p++ = cpu_to_be32(nfs4_lock_type(args->fl, 0));
+ *p++ = cpu_to_be32(args->seqid->sequence->counter);
+ p = xdr_encode_opaque_fixed(p, args->stateid->data, NFS4_STATEID_SIZE);
+ p = xdr_encode_hyper(p, args->fl->fl_start);
+ xdr_encode_hyper(p, nfs4_lock_length(args->fl));
hdr->nops++;
hdr->replen += decode_locku_maxsz;
}
@@ -1115,10 +1092,9 @@ static void encode_lookup(struct xdr_stream *xdr, const struct qstr *name, struc
int len = name->len;
__be32 *p;
- RESERVE_SPACE(8 + len);
- WRITE32(OP_LOOKUP);
- WRITE32(len);
- WRITEMEM(name->name, len);
+ p = reserve_space(xdr, 8 + len);
+ *p++ = cpu_to_be32(OP_LOOKUP);
+ xdr_encode_opaque(p, name->name, len);
hdr->nops++;
hdr->replen += decode_lookup_maxsz;
}
@@ -1127,21 +1103,21 @@ static void encode_share_access(struct xdr_stream *xdr, fmode_t fmode)
{
__be32 *p;
- RESERVE_SPACE(8);
+ p = reserve_space(xdr, 8);
switch (fmode & (FMODE_READ|FMODE_WRITE)) {
case FMODE_READ:
- WRITE32(NFS4_SHARE_ACCESS_READ);
+ *p++ = cpu_to_be32(NFS4_SHARE_ACCESS_READ);
break;
case FMODE_WRITE:
- WRITE32(NFS4_SHARE_ACCESS_WRITE);
+ *p++ = cpu_to_be32(NFS4_SHARE_ACCESS_WRITE);
break;
case FMODE_READ|FMODE_WRITE:
- WRITE32(NFS4_SHARE_ACCESS_BOTH);
+ *p++ = cpu_to_be32(NFS4_SHARE_ACCESS_BOTH);
break;
default:
- WRITE32(0);
+ *p++ = cpu_to_be32(0);
}
- WRITE32(0); /* for linux, share_deny = 0 always */
+ *p = cpu_to_be32(0); /* for linux, share_deny = 0 always */
}
static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_openargs *arg)
@@ -1151,29 +1127,29 @@ static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_opena
* opcode 4, seqid 4, share_access 4, share_deny 4, clientid 8, ownerlen 4,
* owner 4 = 32
*/
- RESERVE_SPACE(8);
- WRITE32(OP_OPEN);
- WRITE32(arg->seqid->sequence->counter);
+ p = reserve_space(xdr, 8);
+ *p++ = cpu_to_be32(OP_OPEN);
+ *p = cpu_to_be32(arg->seqid->sequence->counter);
encode_share_access(xdr, arg->fmode);
- RESERVE_SPACE(28);
- WRITE64(arg->clientid);
- WRITE32(16);
- WRITEMEM("open id:", 8);
- WRITE64(arg->id);
+ p = reserve_space(xdr, 28);
+ p = xdr_encode_hyper(p, arg->clientid);
+ *p++ = cpu_to_be32(16);
+ p = xdr_encode_opaque_fixed(p, "open id:", 8);
+ xdr_encode_hyper(p, arg->id);
}
static inline void encode_createmode(struct xdr_stream *xdr, const struct nfs_openargs *arg)
{
__be32 *p;
- RESERVE_SPACE(4);
+ p = reserve_space(xdr, 4);
switch(arg->open_flags & O_EXCL) {
case 0:
- WRITE32(NFS4_CREATE_UNCHECKED);
+ *p = cpu_to_be32(NFS4_CREATE_UNCHECKED);
encode_attrs(xdr, arg->u.attrs, arg->server);
break;
default:
- WRITE32(NFS4_CREATE_EXCLUSIVE);
+ *p = cpu_to_be32(NFS4_CREATE_EXCLUSIVE);
encode_nfs4_verifier(xdr, &arg->u.verifier);
}
}
@@ -1182,14 +1158,14 @@ static void encode_opentype(struct xdr_stream *xdr, const struct nfs_openargs *a
{
__be32 *p;
- RESERVE_SPACE(4);
+ p = reserve_space(xdr, 4);
switch (arg->open_flags & O_CREAT) {
case 0:
- WRITE32(NFS4_OPEN_NOCREATE);
+ *p = cpu_to_be32(NFS4_OPEN_NOCREATE);
break;
default:
BUG_ON(arg->claim != NFS4_OPEN_CLAIM_NULL);
- WRITE32(NFS4_OPEN_CREATE);
+ *p = cpu_to_be32(NFS4_OPEN_CREATE);
encode_createmode(xdr, arg);
}
}
@@ -1198,16 +1174,16 @@ static inline void encode_delegation_type(struct xdr_stream *xdr, fmode_t delega
{
__be32 *p;
- RESERVE_SPACE(4);
+ p = reserve_space(xdr, 4);
switch (delegation_type) {
case 0:
- WRITE32(NFS4_OPEN_DELEGATE_NONE);
+ *p = cpu_to_be32(NFS4_OPEN_DELEGATE_NONE);
break;
case FMODE_READ:
- WRITE32(NFS4_OPEN_DELEGATE_READ);
+ *p = cpu_to_be32(NFS4_OPEN_DELEGATE_READ);
break;
case FMODE_WRITE|FMODE_READ:
- WRITE32(NFS4_OPEN_DELEGATE_WRITE);
+ *p = cpu_to_be32(NFS4_OPEN_DELEGATE_WRITE);
break;
default:
BUG();
@@ -1218,8 +1194,8 @@ static inline void encode_claim_null(struct xdr_stream *xdr, const struct qstr *
{
__be32 *p;
- RESERVE_SPACE(4);
- WRITE32(NFS4_OPEN_CLAIM_NULL);
+ p = reserve_space(xdr, 4);
+ *p = cpu_to_be32(NFS4_OPEN_CLAIM_NULL);
encode_string(xdr, name->len, name->name);
}
@@ -1227,8 +1203,8 @@ static inline void encode_claim_previous(struct xdr_stream *xdr, fmode_t type)
{
__be32 *p;
- RESERVE_SPACE(4);
- WRITE32(NFS4_OPEN_CLAIM_PREVIOUS);
+ p = reserve_space(xdr, 4);
+ *p = cpu_to_be32(NFS4_OPEN_CLAIM_PREVIOUS);
encode_delegation_type(xdr, type);
}
@@ -1236,9 +1212,9 @@ static inline void encode_claim_delegate_cur(struct xdr_stream *xdr, const struc
{
__be32 *p;
- RESERVE_SPACE(4+NFS4_STATEID_SIZE);
- WRITE32(NFS4_OPEN_CLAIM_DELEGATE_CUR);
- WRITEMEM(stateid->data, NFS4_STATEID_SIZE);
+ p = reserve_space(xdr, 4+NFS4_STATEID_SIZE);
+ *p++ = cpu_to_be32(NFS4_OPEN_CLAIM_DELEGATE_CUR);
+ xdr_encode_opaque_fixed(p, stateid->data, NFS4_STATEID_SIZE);
encode_string(xdr, name->len, name->name);
}
@@ -1267,10 +1243,10 @@ static void encode_open_confirm(struct xdr_stream *xdr, const struct nfs_open_co
{
__be32 *p;
- RESERVE_SPACE(4+NFS4_STATEID_SIZE+4);
- WRITE32(OP_OPEN_CONFIRM);
- WRITEMEM(arg->stateid->data, NFS4_STATEID_SIZE);
- WRITE32(arg->seqid->sequence->counter);
+ p = reserve_space(xdr, 4+NFS4_STATEID_SIZE+4);
+ *p++ = cpu_to_be32(OP_OPEN_CONFIRM);
+ p = xdr_encode_opaque_fixed(p, arg->stateid->data, NFS4_STATEID_SIZE);
+ *p = cpu_to_be32(arg->seqid->sequence->counter);
hdr->nops++;
hdr->replen += decode_open_confirm_maxsz;
}
@@ -1279,10 +1255,10 @@ static void encode_open_downgrade(struct xdr_stream *xdr, const struct nfs_close
{
__be32 *p;
- RESERVE_SPACE(4+NFS4_STATEID_SIZE+4);
- WRITE32(OP_OPEN_DOWNGRADE);
- WRITEMEM(arg->stateid->data, NFS4_STATEID_SIZE);
- WRITE32(arg->seqid->sequence->counter);
+ p = reserve_space(xdr, 4+NFS4_STATEID_SIZE+4);
+ *p++ = cpu_to_be32(OP_OPEN_DOWNGRADE);
+ p = xdr_encode_opaque_fixed(p, arg->stateid->data, NFS4_STATEID_SIZE);
+ *p = cpu_to_be32(arg->seqid->sequence->counter);
encode_share_access(xdr, arg->fmode);
hdr->nops++;
hdr->replen += decode_open_downgrade_maxsz;
@@ -1294,10 +1270,9 @@ encode_putfh(struct xdr_stream *xdr, const struct nfs_fh *fh, struct compound_hd
int len = fh->size;
__be32 *p;
- RESERVE_SPACE(8 + len);
- WRITE32(OP_PUTFH);
- WRITE32(len);
- WRITEMEM(fh->data, len);
+ p = reserve_space(xdr, 8 + len);
+ *p++ = cpu_to_be32(OP_PUTFH);
+ xdr_encode_opaque(p, fh->data, len);
hdr->nops++;
hdr->replen += decode_putfh_maxsz;
}
@@ -1306,8 +1281,8 @@ static void encode_putrootfh(struct xdr_stream *xdr, struct compound_hdr *hdr)
{
__be32 *p;
- RESERVE_SPACE(4);
- WRITE32(OP_PUTROOTFH);
+ p = reserve_space(xdr, 4);
+ *p = cpu_to_be32(OP_PUTROOTFH);
hdr->nops++;
hdr->replen += decode_putrootfh_maxsz;
}
@@ -1317,26 +1292,26 @@ static void encode_stateid(struct xdr_stream *xdr, const struct nfs_open_context
nfs4_stateid stateid;
__be32 *p;
- RESERVE_SPACE(NFS4_STATEID_SIZE);
+ p = reserve_space(xdr, NFS4_STATEID_SIZE);
if (ctx->state != NULL) {
nfs4_copy_stateid(&stateid, ctx->state, ctx->lockowner);
- WRITEMEM(stateid.data, NFS4_STATEID_SIZE);
+ xdr_encode_opaque_fixed(p, stateid.data, NFS4_STATEID_SIZE);
} else
- WRITEMEM(zero_stateid.data, NFS4_STATEID_SIZE);
+ xdr_encode_opaque_fixed(p, zero_stateid.data, NFS4_STATEID_SIZE);
}
static void encode_read(struct xdr_stream *xdr, const struct nfs_readargs *args, struct compound_hdr *hdr)
{
__be32 *p;
- RESERVE_SPACE(4);
- WRITE32(OP_READ);
+ p = reserve_space(xdr, 4);
+ *p = cpu_to_be32(OP_READ);
encode_stateid(xdr, args->context);
- RESERVE_SPACE(12);
- WRITE64(args->offset);
- WRITE32(args->count);
+ p = reserve_space(xdr, 12);
+ p = xdr_encode_hyper(p, args->offset);
+ *p = cpu_to_be32(args->count);
hdr->nops++;
hdr->replen += decode_read_maxsz;
}
@@ -1349,20 +1324,20 @@ static void encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg
};
__be32 *p;
- RESERVE_SPACE(12+NFS4_VERIFIER_SIZE+20);
- WRITE32(OP_READDIR);
- WRITE64(readdir->cookie);
- WRITEMEM(readdir->verifier.data, NFS4_VERIFIER_SIZE);
- WRITE32(readdir->count >> 1); /* We're not doing readdirplus */
- WRITE32(readdir->count);
- WRITE32(2);
+ p = reserve_space(xdr, 12+NFS4_VERIFIER_SIZE+20);
+ *p++ = cpu_to_be32(OP_READDIR);
+ p = xdr_encode_hyper(p, readdir->cookie);
+ p = xdr_encode_opaque_fixed(p, readdir->verifier.data, NFS4_VERIFIER_SIZE);
+ *p++ = cpu_to_be32(readdir->count >> 1); /* We're not doing readdirplus */
+ *p++ = cpu_to_be32(readdir->count);
+ *p++ = cpu_to_be32(2);
/* Switch to mounted_on_fileid if the server supports it */
if (readdir->bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
attrs[0] &= ~FATTR4_WORD0_FILEID;
else
attrs[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID;
- WRITE32(attrs[0] & readdir->bitmask[0]);
- WRITE32(attrs[1] & readdir->bitmask[1]);
+ *p++ = cpu_to_be32(attrs[0] & readdir->bitmask[0]);
+ *p = cpu_to_be32(attrs[1] & readdir->bitmask[1]);
hdr->nops++;
hdr->replen += decode_readdir_maxsz;
dprintk("%s: cookie = %Lu, verifier = %08x:%08x, bitmap = %08x:%08x\n",
@@ -1378,8 +1353,8 @@ static void encode_readlink(struct xdr_stream *xdr, const struct nfs4_readlink *
{
__be32 *p;
- RESERVE_SPACE(4);
- WRITE32(OP_READLINK);
+ p = reserve_space(xdr, 4);
+ *p = cpu_to_be32(OP_READLINK);
hdr->nops++;
hdr->replen += decode_readlink_maxsz;
}
@@ -1388,10 +1363,9 @@ static void encode_remove(struct xdr_stream *xdr, const struct qstr *name, struc
{
__be32 *p;
- RESERVE_SPACE(8 + name->len);
- WRITE32(OP_REMOVE);
- WRITE32(name->len);
- WRITEMEM(name->name, name->len);
+ p = reserve_space(xdr, 8 + name->len);
+ *p++ = cpu_to_be32(OP_REMOVE);
+ xdr_encode_opaque(p, name->name, name->len);
hdr->nops++;
hdr->replen += decode_remove_maxsz;
}
@@ -1400,14 +1374,10 @@ static void encode_rename(struct xdr_stream *xdr, const struct qstr *oldname, co
{
__be32 *p;
- RESERVE_SPACE(8 + oldname->len);
- WRITE32(OP_RENAME);
- WRITE32(oldname->len);
- WRITEMEM(oldname->name, oldname->len);
-
- RESERVE_SPACE(4 + newname->len);
- WRITE32(newname->len);
- WRITEMEM(newname->name, newname->len);
+ p = reserve_space(xdr, 4);
+ *p = cpu_to_be32(OP_RENAME);
+ encode_string(xdr, oldname->len, oldname->name);
+ encode_string(xdr, newname->len, newname->name);
hdr->nops++;
hdr->replen += decode_rename_maxsz;
}
@@ -1416,9 +1386,9 @@ static void encode_renew(struct xdr_stream *xdr, const struct nfs_client *client
{
__be32 *p;
- RESERVE_SPACE(12);
- WRITE32(OP_RENEW);
- WRITE64(client_stateid->cl_clientid);
+ p = reserve_space(xdr, 12);
+ *p++ = cpu_to_be32(OP_RENEW);
+ xdr_encode_hyper(p, client_stateid->cl_clientid);
hdr->nops++;
hdr->replen += decode_renew_maxsz;
}
@@ -1428,8 +1398,8 @@ encode_restorefh(struct xdr_stream *xdr, struct compound_hdr *hdr)
{
__be32 *p;
- RESERVE_SPACE(4);
- WRITE32(OP_RESTOREFH);
+ p = reserve_space(xdr, 4);
+ *p = cpu_to_be32(OP_RESTOREFH);
hdr->nops++;
hdr->replen += decode_restorefh_maxsz;
}
@@ -1439,16 +1409,16 @@ encode_setacl(struct xdr_stream *xdr, struct nfs_setaclargs *arg, struct compoun
{
__be32 *p;
- RESERVE_SPACE(4+NFS4_STATEID_SIZE);
- WRITE32(OP_SETATTR);
- WRITEMEM(zero_stateid.data, NFS4_STATEID_SIZE);
- RESERVE_SPACE(2*4);
- WRITE32(1);
- WRITE32(FATTR4_WORD0_ACL);
+ p = reserve_space(xdr, 4+NFS4_STATEID_SIZE);
+ *p++ = cpu_to_be32(OP_SETATTR);
+ xdr_encode_opaque_fixed(p, zero_stateid.data, NFS4_STATEID_SIZE);
+ p = reserve_space(xdr, 2*4);
+ *p++ = cpu_to_be32(1);
+ *p = cpu_to_be32(FATTR4_WORD0_ACL);
if (arg->acl_len % 4)
return -EINVAL;
- RESERVE_SPACE(4);
- WRITE32(arg->acl_len);
+ p = reserve_space(xdr, 4);
+ *p = cpu_to_be32(arg->acl_len);
xdr_write_pages(xdr, arg->acl_pages, arg->acl_pgbase, arg->acl_len);
hdr->nops++;
hdr->replen += decode_setacl_maxsz;
@@ -1460,8 +1430,8 @@ encode_savefh(struct xdr_stream *xdr, struct compound_hdr *hdr)
{
__be32 *p;
- RESERVE_SPACE(4);
- WRITE32(OP_SAVEFH);
+ p = reserve_space(xdr, 4);
+ *p = cpu_to_be32(OP_SAVEFH);
hdr->nops++;
hdr->replen += decode_savefh_maxsz;
}
@@ -1470,9 +1440,9 @@ static void encode_setattr(struct xdr_stream *xdr, const struct nfs_setattrargs
{
__be32 *p;
- RESERVE_SPACE(4+NFS4_STATEID_SIZE);
- WRITE32(OP_SETATTR);
- WRITEMEM(arg->stateid.data, NFS4_STATEID_SIZE);
+ p = reserve_space(xdr, 4+NFS4_STATEID_SIZE);
+ *p++ = cpu_to_be32(OP_SETATTR);
+ xdr_encode_opaque_fixed(p, arg->stateid.data, NFS4_STATEID_SIZE);
hdr->nops++;
hdr->replen += decode_setattr_maxsz;
encode_attrs(xdr, arg->iap, server);
@@ -1482,17 +1452,17 @@ static void encode_setclientid(struct xdr_stream *xdr, const struct nfs4_setclie
{
__be32 *p;
- RESERVE_SPACE(4 + NFS4_VERIFIER_SIZE);
- WRITE32(OP_SETCLIENTID);
- WRITEMEM(setclientid->sc_verifier->data, NFS4_VERIFIER_SIZE);
+ p = reserve_space(xdr, 4 + NFS4_VERIFIER_SIZE);
+ *p++ = cpu_to_be32(OP_SETCLIENTID);
+ xdr_encode_opaque_fixed(p, setclientid->sc_verifier->data, NFS4_VERIFIER_SIZE);
encode_string(xdr, setclientid->sc_name_len, setclientid->sc_name);
- RESERVE_SPACE(4);
- WRITE32(setclientid->sc_prog);
+ p = reserve_space(xdr, 4);
+ *p = cpu_to_be32(setclientid->sc_prog);
encode_string(xdr, setclientid->sc_netid_len, setclientid->sc_netid);
encode_string(xdr, setclientid->sc_uaddr_len, setclientid->sc_uaddr);
- RESERVE_SPACE(4);
- WRITE32(setclientid->sc_cb_ident);
+ p = reserve_space(xdr, 4);
+ *p = cpu_to_be32(setclientid->sc_cb_ident);
hdr->nops++;
hdr->replen += decode_setclientid_maxsz;
}
@@ -1501,10 +1471,10 @@ static void encode_setclientid_confirm(struct xdr_stream *xdr, const struct nfs_
{
__be32 *p;
- RESERVE_SPACE(12 + NFS4_VERIFIER_SIZE);
- WRITE32(OP_SETCLIENTID_CONFIRM);
- WRITE64(client_state->cl_clientid);
- WRITEMEM(client_state->cl_confirm.data, NFS4_VERIFIER_SIZE);
+ p = reserve_space(xdr, 12 + NFS4_VERIFIER_SIZE);
+ *p++ = cpu_to_be32(OP_SETCLIENTID_CONFIRM);
+ p = xdr_encode_hyper(p, client_state->cl_clientid);
+ xdr_encode_opaque_fixed(p, client_state->cl_confirm.data, NFS4_VERIFIER_SIZE);
hdr->nops++;
hdr->replen += decode_setclientid_confirm_maxsz;
}
@@ -1513,15 +1483,15 @@ static void encode_write(struct xdr_stream *xdr, const struct nfs_writeargs *arg
{
__be32 *p;
- RESERVE_SPACE(4);
- WRITE32(OP_WRITE);
+ p = reserve_space(xdr, 4);
+ *p = cpu_to_be32(OP_WRITE);
encode_stateid(xdr, args->context);
- RESERVE_SPACE(16);
- WRITE64(args->offset);
- WRITE32(args->stable);
- WRITE32(args->count);
+ p = reserve_space(xdr, 16);
+ p = xdr_encode_hyper(p, args->offset);
+ *p++ = cpu_to_be32(args->stable);
+ *p = cpu_to_be32(args->count);
xdr_write_pages(xdr, args->pages, args->pgbase, args->count);
hdr->nops++;
@@ -1532,10 +1502,10 @@ static void encode_delegreturn(struct xdr_stream *xdr, const nfs4_stateid *state
{
__be32 *p;
- RESERVE_SPACE(4+NFS4_STATEID_SIZE);
+ p = reserve_space(xdr, 4+NFS4_STATEID_SIZE);
- WRITE32(OP_DELEGRETURN);
- WRITEMEM(stateid->data, NFS4_STATEID_SIZE);
+ *p++ = cpu_to_be32(OP_DELEGRETURN);
+ xdr_encode_opaque_fixed(p, stateid->data, NFS4_STATEID_SIZE);
hdr->nops++;
hdr->replen += decode_delegreturn_maxsz;
}
@@ -1548,16 +1518,16 @@ static void encode_exchange_id(struct xdr_stream *xdr,
{
__be32 *p;
- RESERVE_SPACE(4 + sizeof(args->verifier->data));
- WRITE32(OP_EXCHANGE_ID);
- WRITEMEM(args->verifier->data, sizeof(args->verifier->data));
+ p = reserve_space(xdr, 4 + sizeof(args->verifier->data));
+ *p++ = cpu_to_be32(OP_EXCHANGE_ID);
+ xdr_encode_opaque_fixed(p, args->verifier->data, sizeof(args->verifier->data));
encode_string(xdr, args->id_len, args->id);
- RESERVE_SPACE(12);
- WRITE32(args->flags);
- WRITE32(0); /* zero length state_protect4_a */
- WRITE32(0); /* zero length implementation id array */
+ p = reserve_space(xdr, 12);
+ *p++ = cpu_to_be32(args->flags);
+ *p++ = cpu_to_be32(0); /* zero length state_protect4_a */
+ *p = cpu_to_be32(0); /* zero length implementation id array */
hdr->nops++;
hdr->replen += decode_exchange_id_maxsz;
}
@@ -1571,55 +1541,43 @@ static void encode_create_session(struct xdr_stream *xdr,
uint32_t len;
struct nfs_client *clp = args->client;
- RESERVE_SPACE(4);
- WRITE32(OP_CREATE_SESSION);
-
- RESERVE_SPACE(8);
- WRITE64(clp->cl_ex_clid);
+ len = scnprintf(machine_name, sizeof(machine_name), "%s",
+ clp->cl_ipaddr);
- RESERVE_SPACE(8);
- WRITE32(clp->cl_seqid); /*Sequence id */
- WRITE32(args->flags); /*flags */
+ p = reserve_space(xdr, 20 + 2*28 + 20 + len + 12);
+ *p++ = cpu_to_be32(OP_CREATE_SESSION);
+ p = xdr_encode_hyper(p, clp->cl_ex_clid);
+ *p++ = cpu_to_be32(clp->cl_seqid); /*Sequence id */
+ *p++ = cpu_to_be32(args->flags); /*flags */
- RESERVE_SPACE(2*28); /* 2 channel_attrs */
/* Fore Channel */
- WRITE32(args->fc_attrs.headerpadsz); /* header padding size */
- WRITE32(args->fc_attrs.max_rqst_sz); /* max req size */
- WRITE32(args->fc_attrs.max_resp_sz); /* max resp size */
- WRITE32(args->fc_attrs.max_resp_sz_cached); /* Max resp sz cached */
- WRITE32(args->fc_attrs.max_ops); /* max operations */
- WRITE32(args->fc_attrs.max_reqs); /* max requests */
- WRITE32(0); /* rdmachannel_attrs */
+ *p++ = cpu_to_be32(args->fc_attrs.headerpadsz); /* header padding size */
+ *p++ = cpu_to_be32(args->fc_attrs.max_rqst_sz); /* max req size */
+ *p++ = cpu_to_be32(args->fc_attrs.max_resp_sz); /* max resp size */
+ *p++ = cpu_to_be32(args->fc_attrs.max_resp_sz_cached); /* Max resp sz cached */
+ *p++ = cpu_to_be32(args->fc_attrs.max_ops); /* max operations */
+ *p++ = cpu_to_be32(args->fc_attrs.max_reqs); /* max requests */
+ *p++ = cpu_to_be32(0); /* rdmachannel_attrs */
/* Back Channel */
- WRITE32(args->fc_attrs.headerpadsz); /* header padding size */
- WRITE32(args->bc_attrs.max_rqst_sz); /* max req size */
- WRITE32(args->bc_attrs.max_resp_sz); /* max resp size */
- WRITE32(args->bc_attrs.max_resp_sz_cached); /* Max resp sz cached */
- WRITE32(args->bc_attrs.max_ops); /* max operations */
- WRITE32(args->bc_attrs.max_reqs); /* max requests */
- WRITE32(0); /* rdmachannel_attrs */
-
- RESERVE_SPACE(4);
- WRITE32(args->cb_program); /* cb_program */
-
- RESERVE_SPACE(4); /* # of security flavors */
- WRITE32(1);
-
- RESERVE_SPACE(4);
- WRITE32(RPC_AUTH_UNIX); /* auth_sys */
+ *p++ = cpu_to_be32(args->fc_attrs.headerpadsz); /* header padding size */
+ *p++ = cpu_to_be32(args->bc_attrs.max_rqst_sz); /* max req size */
+ *p++ = cpu_to_be32(args->bc_attrs.max_resp_sz); /* max resp size */
+ *p++ = cpu_to_be32(args->bc_attrs.max_resp_sz_cached); /* Max resp sz cached */
+ *p++ = cpu_to_be32(args->bc_attrs.max_ops); /* max operations */
+ *p++ = cpu_to_be32(args->bc_attrs.max_reqs); /* max requests */
+ *p++ = cpu_to_be32(0); /* rdmachannel_attrs */
+
+ *p++ = cpu_to_be32(args->cb_program); /* cb_program */
+ *p++ = cpu_to_be32(1);
+ *p++ = cpu_to_be32(RPC_AUTH_UNIX); /* auth_sys */
/* authsys_parms rfc1831 */
- RESERVE_SPACE(4);
- WRITE32((u32)clp->cl_boot_time.tv_nsec); /* stamp */
- len = scnprintf(machine_name, sizeof(machine_name), "%s",
- clp->cl_ipaddr);
- RESERVE_SPACE(16 + len);
- WRITE32(len);
- WRITEMEM(machine_name, len);
- WRITE32(0); /* UID */
- WRITE32(0); /* GID */
- WRITE32(0); /* No more gids */
+ *p++ = cpu_to_be32((u32)clp->cl_boot_time.tv_nsec); /* stamp */
+ p = xdr_encode_opaque(p, machine_name, len);
+ *p++ = cpu_to_be32(0); /* UID */
+ *p++ = cpu_to_be32(0); /* GID */
+ *p = cpu_to_be32(0); /* No more gids */
hdr->nops++;
hdr->replen += decode_create_session_maxsz;
}
@@ -1629,9 +1587,9 @@ static void encode_destroy_session(struct xdr_stream *xdr,
struct compound_hdr *hdr)
{
__be32 *p;
- RESERVE_SPACE(4 + NFS4_MAX_SESSIONID_LEN);
- WRITE32(OP_DESTROY_SESSION);
- WRITEMEM(session->sess_id.data, NFS4_MAX_SESSIONID_LEN);
+ p = reserve_space(xdr, 4 + NFS4_MAX_SESSIONID_LEN);
+ *p++ = cpu_to_be32(OP_DESTROY_SESSION);
+ xdr_encode_opaque_fixed(p, session->sess_id.data, NFS4_MAX_SESSIONID_LEN);
hdr->nops++;
hdr->replen += decode_destroy_session_maxsz;
}
@@ -1655,8 +1613,8 @@ static void encode_sequence(struct xdr_stream *xdr,
WARN_ON(args->sa_slotid == NFS4_MAX_SLOT_TABLE);
slot = tp->slots + args->sa_slotid;
- RESERVE_SPACE(4);
- WRITE32(OP_SEQUENCE);
+ p = reserve_space(xdr, 4 + NFS4_MAX_SESSIONID_LEN + 16);
+ *p++ = cpu_to_be32(OP_SEQUENCE);
/*
* Sessionid + seqid + slotid + max slotid + cache_this
@@ -1670,12 +1628,11 @@ static void encode_sequence(struct xdr_stream *xdr,
((u32 *)session->sess_id.data)[3],
slot->seq_nr, args->sa_slotid,
tp->highest_used_slotid, args->sa_cache_this);
- RESERVE_SPACE(NFS4_MAX_SESSIONID_LEN + 16);
- WRITEMEM(session->sess_id.data, NFS4_MAX_SESSIONID_LEN);
- WRITE32(slot->seq_nr);
- WRITE32(args->sa_slotid);
- WRITE32(tp->highest_used_slotid);
- WRITE32(args->sa_cache_this);
+ p = xdr_encode_opaque_fixed(p, session->sess_id.data, NFS4_MAX_SESSIONID_LEN);
+ *p++ = cpu_to_be32(slot->seq_nr);
+ *p++ = cpu_to_be32(args->sa_slotid);
+ *p++ = cpu_to_be32(tp->highest_used_slotid);
+ *p = cpu_to_be32(args->sa_cache_this);
hdr->nops++;
hdr->replen += decode_sequence_maxsz;
#endif /* CONFIG_NFS_V4_1 */
@@ -2466,68 +2423,53 @@ static int nfs4_xdr_enc_get_lease_time(struct rpc_rqst *req, uint32_t *p,
}
#endif /* CONFIG_NFS_V4_1 */
-/*
- * START OF "GENERIC" DECODE ROUTINES.
- * These may look a little ugly since they are imported from a "generic"
- * set of XDR encode/decode routines which are intended to be shared by
- * all of our NFSv4 implementations (OpenBSD, MacOS X...).
- *
- * If the pain of reading these is too great, it should be a straightforward
- * task to translate them into Linux-specific versions which are more
- * consistent with the style used in NFSv2/v3...
- */
-#define READ32(x) (x) = ntohl(*p++)
-#define READ64(x) do { \
- (x) = (u64)ntohl(*p++) << 32; \
- (x) |= ntohl(*p++); \
-} while (0)
-#define READTIME(x) do { \
- p++; \
- (x.tv_sec) = ntohl(*p++); \
- (x.tv_nsec) = ntohl(*p++); \
-} while (0)
-#define COPYMEM(x,nbytes) do { \
- memcpy((x), p, nbytes); \
- p += XDR_QUADLEN(nbytes); \
-} while (0)
-
-#define READ_BUF(nbytes) do { \
- p = xdr_inline_decode(xdr, nbytes); \
- if (unlikely(!p)) { \
- dprintk("nfs: %s: prematurely hit end of receive" \
- " buffer\n", __func__); \
- dprintk("nfs: %s: xdr->p=%p, bytes=%u, xdr->end=%p\n", \
- __func__, xdr->p, nbytes, xdr->end); \
- return -EIO; \
- } \
-} while (0)
+static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
+{
+ dprintk("nfs: %s: prematurely hit end of receive buffer. "
+ "Remaining buffer length is %tu words.\n",
+ func, xdr->end - xdr->p);
+}
static int decode_opaque_inline(struct xdr_stream *xdr, unsigned int *len, char **string)
{
__be32 *p;
- READ_BUF(4);
- READ32(*len);
- READ_BUF(*len);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ *len = be32_to_cpup(p);
+ p = xdr_inline_decode(xdr, *len);
+ if (unlikely(!p))
+ goto out_overflow;
*string = (char *)p;
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_compound_hdr(struct xdr_stream *xdr, struct compound_hdr *hdr)
{
__be32 *p;
- READ_BUF(8);
- READ32(hdr->status);
- READ32(hdr->taglen);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ hdr->status = be32_to_cpup(p++);
+ hdr->taglen = be32_to_cpup(p);
- READ_BUF(hdr->taglen + 4);
+ p = xdr_inline_decode(xdr, hdr->taglen + 4);
+ if (unlikely(!p))
+ goto out_overflow;
hdr->tag = (char *)p;
p += XDR_QUADLEN(hdr->taglen);
- READ32(hdr->nops);
+ hdr->nops = be32_to_cpup(p);
if (unlikely(hdr->nops < 1))
return nfs4_stat_to_errno(hdr->status);
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
@@ -2536,18 +2478,23 @@ static int decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
uint32_t opnum;
int32_t nfserr;
- READ_BUF(8);
- READ32(opnum);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ opnum = be32_to_cpup(p++);
if (opnum != expected) {
dprintk("nfs: Server returned operation"
" %d but we issued a request for %d\n",
opnum, expected);
return -EIO;
}
- READ32(nfserr);
+ nfserr = be32_to_cpup(p);
if (nfserr != NFS_OK)
return nfs4_stat_to_errno(nfserr);
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
/* Dummy routine */
@@ -2557,8 +2504,11 @@ static int decode_ace(struct xdr_stream *xdr, void *ace, struct nfs_client *clp)
unsigned int strlen;
char *str;
- READ_BUF(12);
- return decode_opaque_inline(xdr, &strlen, &str);
+ p = xdr_inline_decode(xdr, 12);
+ if (likely(p))
+ return decode_opaque_inline(xdr, &strlen, &str);
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_bitmap(struct xdr_stream *xdr, uint32_t *bitmap)
@@ -2566,27 +2516,39 @@ static int decode_attr_bitmap(struct xdr_stream *xdr, uint32_t *bitmap)
uint32_t bmlen;
__be32 *p;
- READ_BUF(4);
- READ32(bmlen);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ bmlen = be32_to_cpup(p);
bitmap[0] = bitmap[1] = 0;
- READ_BUF((bmlen << 2));
+ p = xdr_inline_decode(xdr, (bmlen << 2));
+ if (unlikely(!p))
+ goto out_overflow;
if (bmlen > 0) {
- READ32(bitmap[0]);
+ bitmap[0] = be32_to_cpup(p++);
if (bmlen > 1)
- READ32(bitmap[1]);
+ bitmap[1] = be32_to_cpup(p);
}
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static inline int decode_attr_length(struct xdr_stream *xdr, uint32_t *attrlen, __be32 **savep)
{
__be32 *p;
- READ_BUF(4);
- READ32(*attrlen);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ *attrlen = be32_to_cpup(p);
*savep = xdr->p;
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_supported(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *bitmask)
@@ -2609,8 +2571,10 @@ static int decode_attr_type(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *
if (unlikely(bitmap[0] & (FATTR4_WORD0_TYPE - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_TYPE)) {
- READ_BUF(4);
- READ32(*type);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ *type = be32_to_cpup(p);
if (*type < NF4REG || *type > NF4NAMEDATTR) {
dprintk("%s: bad type %d\n", __func__, *type);
return -EIO;
@@ -2620,6 +2584,9 @@ static int decode_attr_type(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *
}
dprintk("%s: type=0%o\n", __func__, nfs_type2fmt[*type]);
return ret;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_change(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *change)
@@ -2631,14 +2598,19 @@ static int decode_attr_change(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t
if (unlikely(bitmap[0] & (FATTR4_WORD0_CHANGE - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_CHANGE)) {
- READ_BUF(8);
- READ64(*change);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, change);
bitmap[0] &= ~FATTR4_WORD0_CHANGE;
ret = NFS_ATTR_FATTR_CHANGE;
}
dprintk("%s: change attribute=%Lu\n", __func__,
(unsigned long long)*change);
return ret;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_size(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *size)
@@ -2650,13 +2622,18 @@ static int decode_attr_size(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *
if (unlikely(bitmap[0] & (FATTR4_WORD0_SIZE - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_SIZE)) {
- READ_BUF(8);
- READ64(*size);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, size);
bitmap[0] &= ~FATTR4_WORD0_SIZE;
ret = NFS_ATTR_FATTR_SIZE;
}
dprintk("%s: file size=%Lu\n", __func__, (unsigned long long)*size);
return ret;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_link_support(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
@@ -2667,12 +2644,17 @@ static int decode_attr_link_support(struct xdr_stream *xdr, uint32_t *bitmap, ui
if (unlikely(bitmap[0] & (FATTR4_WORD0_LINK_SUPPORT - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_LINK_SUPPORT)) {
- READ_BUF(4);
- READ32(*res);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ *res = be32_to_cpup(p);
bitmap[0] &= ~FATTR4_WORD0_LINK_SUPPORT;
}
dprintk("%s: link support=%s\n", __func__, *res == 0 ? "false" : "true");
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_symlink_support(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
@@ -2683,12 +2665,17 @@ static int decode_attr_symlink_support(struct xdr_stream *xdr, uint32_t *bitmap,
if (unlikely(bitmap[0] & (FATTR4_WORD0_SYMLINK_SUPPORT - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_SYMLINK_SUPPORT)) {
- READ_BUF(4);
- READ32(*res);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ *res = be32_to_cpup(p);
bitmap[0] &= ~FATTR4_WORD0_SYMLINK_SUPPORT;
}
dprintk("%s: symlink support=%s\n", __func__, *res == 0 ? "false" : "true");
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_fsid(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs_fsid *fsid)
@@ -2701,9 +2688,11 @@ static int decode_attr_fsid(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs
if (unlikely(bitmap[0] & (FATTR4_WORD0_FSID - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_FSID)) {
- READ_BUF(16);
- READ64(fsid->major);
- READ64(fsid->minor);
+ p = xdr_inline_decode(xdr, 16);
+ if (unlikely(!p))
+ goto out_overflow;
+ p = xdr_decode_hyper(p, &fsid->major);
+ xdr_decode_hyper(p, &fsid->minor);
bitmap[0] &= ~FATTR4_WORD0_FSID;
ret = NFS_ATTR_FATTR_FSID;
}
@@ -2711,6 +2700,9 @@ static int decode_attr_fsid(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs
(unsigned long long)fsid->major,
(unsigned long long)fsid->minor);
return ret;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_lease_time(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
@@ -2721,12 +2713,17 @@ static int decode_attr_lease_time(struct xdr_stream *xdr, uint32_t *bitmap, uint
if (unlikely(bitmap[0] & (FATTR4_WORD0_LEASE_TIME - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_LEASE_TIME)) {
- READ_BUF(4);
- READ32(*res);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ *res = be32_to_cpup(p);
bitmap[0] &= ~FATTR4_WORD0_LEASE_TIME;
}
dprintk("%s: file size=%u\n", __func__, (unsigned int)*res);
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_aclsupport(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
@@ -2737,12 +2734,17 @@ static int decode_attr_aclsupport(struct xdr_stream *xdr, uint32_t *bitmap, uint
if (unlikely(bitmap[0] & (FATTR4_WORD0_ACLSUPPORT - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_ACLSUPPORT)) {
- READ_BUF(4);
- READ32(*res);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ *res = be32_to_cpup(p);
bitmap[0] &= ~FATTR4_WORD0_ACLSUPPORT;
}
dprintk("%s: ACLs supported=%u\n", __func__, (unsigned int)*res);
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_fileid(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *fileid)
@@ -2754,13 +2756,18 @@ static int decode_attr_fileid(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t
if (unlikely(bitmap[0] & (FATTR4_WORD0_FILEID - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_FILEID)) {
- READ_BUF(8);
- READ64(*fileid);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, fileid);
bitmap[0] &= ~FATTR4_WORD0_FILEID;
ret = NFS_ATTR_FATTR_FILEID;
}
dprintk("%s: fileid=%Lu\n", __func__, (unsigned long long)*fileid);
return ret;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_mounted_on_fileid(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *fileid)
@@ -2772,13 +2779,18 @@ static int decode_attr_mounted_on_fileid(struct xdr_stream *xdr, uint32_t *bitma
if (unlikely(bitmap[1] & (FATTR4_WORD1_MOUNTED_ON_FILEID - 1U)))
return -EIO;
if (likely(bitmap[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)) {
- READ_BUF(8);
- READ64(*fileid);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, fileid);
bitmap[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID;
ret = NFS_ATTR_FATTR_FILEID;
}
dprintk("%s: fileid=%Lu\n", __func__, (unsigned long long)*fileid);
return ret;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_files_avail(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
@@ -2790,12 +2802,17 @@ static int decode_attr_files_avail(struct xdr_stream *xdr, uint32_t *bitmap, uin
if (unlikely(bitmap[0] & (FATTR4_WORD0_FILES_AVAIL - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_FILES_AVAIL)) {
- READ_BUF(8);
- READ64(*res);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, res);
bitmap[0] &= ~FATTR4_WORD0_FILES_AVAIL;
}
dprintk("%s: files avail=%Lu\n", __func__, (unsigned long long)*res);
return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_files_free(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
@@ -2807,12 +2824,17 @@ static int decode_attr_files_free(struct xdr_stream *xdr, uint32_t *bitmap, uint
if (unlikely(bitmap[0] & (FATTR4_WORD0_FILES_FREE - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_FILES_FREE)) {
- READ_BUF(8);
- READ64(*res);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, res);
bitmap[0] &= ~FATTR4_WORD0_FILES_FREE;
}
dprintk("%s: files free=%Lu\n", __func__, (unsigned long long)*res);
return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_files_total(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
@@ -2824,12 +2846,17 @@ static int decode_attr_files_total(struct xdr_stream *xdr, uint32_t *bitmap, uin
if (unlikely(bitmap[0] & (FATTR4_WORD0_FILES_TOTAL - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_FILES_TOTAL)) {
- READ_BUF(8);
- READ64(*res);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, res);
bitmap[0] &= ~FATTR4_WORD0_FILES_TOTAL;
}
dprintk("%s: files total=%Lu\n", __func__, (unsigned long long)*res);
return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_pathname(struct xdr_stream *xdr, struct nfs4_pathname *path)
@@ -2838,8 +2865,10 @@ static int decode_pathname(struct xdr_stream *xdr, struct nfs4_pathname *path)
__be32 *p;
int status = 0;
- READ_BUF(4);
- READ32(n);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ n = be32_to_cpup(p);
if (n == 0)
goto root_path;
dprintk("path ");
@@ -2873,6 +2902,9 @@ out_eio:
dprintk(" status %d", status);
status = -EIO;
goto out;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs4_fs_locations *res)
@@ -2890,8 +2922,10 @@ static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, st
status = decode_pathname(xdr, &res->fs_path);
if (unlikely(status != 0))
goto out;
- READ_BUF(4);
- READ32(n);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ n = be32_to_cpup(p);
if (n <= 0)
goto out_eio;
res->nlocations = 0;
@@ -2899,8 +2933,10 @@ static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, st
u32 m;
struct nfs4_fs_location *loc = &res->locations[res->nlocations];
- READ_BUF(4);
- READ32(m);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ m = be32_to_cpup(p);
loc->nservers = 0;
dprintk("%s: servers ", __func__);
@@ -2939,6 +2975,8 @@ static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, st
out:
dprintk("%s: fs_locations done, error = %d\n", __func__, status);
return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
out_eio:
status = -EIO;
goto out;
@@ -2953,12 +2991,17 @@ static int decode_attr_maxfilesize(struct xdr_stream *xdr, uint32_t *bitmap, uin
if (unlikely(bitmap[0] & (FATTR4_WORD0_MAXFILESIZE - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_MAXFILESIZE)) {
- READ_BUF(8);
- READ64(*res);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, res);
bitmap[0] &= ~FATTR4_WORD0_MAXFILESIZE;
}
dprintk("%s: maxfilesize=%Lu\n", __func__, (unsigned long long)*res);
return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_maxlink(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *maxlink)
@@ -2970,12 +3013,17 @@ static int decode_attr_maxlink(struct xdr_stream *xdr, uint32_t *bitmap, uint32_
if (unlikely(bitmap[0] & (FATTR4_WORD0_MAXLINK - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_MAXLINK)) {
- READ_BUF(4);
- READ32(*maxlink);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ *maxlink = be32_to_cpup(p);
bitmap[0] &= ~FATTR4_WORD0_MAXLINK;
}
dprintk("%s: maxlink=%u\n", __func__, *maxlink);
return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_maxname(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *maxname)
@@ -2987,12 +3035,17 @@ static int decode_attr_maxname(struct xdr_stream *xdr, uint32_t *bitmap, uint32_
if (unlikely(bitmap[0] & (FATTR4_WORD0_MAXNAME - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_MAXNAME)) {
- READ_BUF(4);
- READ32(*maxname);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ *maxname = be32_to_cpup(p);
bitmap[0] &= ~FATTR4_WORD0_MAXNAME;
}
dprintk("%s: maxname=%u\n", __func__, *maxname);
return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_maxread(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
@@ -3005,8 +3058,10 @@ static int decode_attr_maxread(struct xdr_stream *xdr, uint32_t *bitmap, uint32_
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_MAXREAD)) {
uint64_t maxread;
- READ_BUF(8);
- READ64(maxread);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, &maxread);
if (maxread > 0x7FFFFFFF)
maxread = 0x7FFFFFFF;
*res = (uint32_t)maxread;
@@ -3014,6 +3069,9 @@ static int decode_attr_maxread(struct xdr_stream *xdr, uint32_t *bitmap, uint32_
}
dprintk("%s: maxread=%lu\n", __func__, (unsigned long)*res);
return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_maxwrite(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
@@ -3026,8 +3084,10 @@ static int decode_attr_maxwrite(struct xdr_stream *xdr, uint32_t *bitmap, uint32
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_MAXWRITE)) {
uint64_t maxwrite;
- READ_BUF(8);
- READ64(maxwrite);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, &maxwrite);
if (maxwrite > 0x7FFFFFFF)
maxwrite = 0x7FFFFFFF;
*res = (uint32_t)maxwrite;
@@ -3035,6 +3095,9 @@ static int decode_attr_maxwrite(struct xdr_stream *xdr, uint32_t *bitmap, uint32
}
dprintk("%s: maxwrite=%lu\n", __func__, (unsigned long)*res);
return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_mode(struct xdr_stream *xdr, uint32_t *bitmap, umode_t *mode)
@@ -3047,14 +3110,19 @@ static int decode_attr_mode(struct xdr_stream *xdr, uint32_t *bitmap, umode_t *m
if (unlikely(bitmap[1] & (FATTR4_WORD1_MODE - 1U)))
return -EIO;
if (likely(bitmap[1] & FATTR4_WORD1_MODE)) {
- READ_BUF(4);
- READ32(tmp);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ tmp = be32_to_cpup(p);
*mode = tmp & ~S_IFMT;
bitmap[1] &= ~FATTR4_WORD1_MODE;
ret = NFS_ATTR_FATTR_MODE;
}
dprintk("%s: file mode=0%o\n", __func__, (unsigned int)*mode);
return ret;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_nlink(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *nlink)
@@ -3066,16 +3134,22 @@ static int decode_attr_nlink(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t
if (unlikely(bitmap[1] & (FATTR4_WORD1_NUMLINKS - 1U)))
return -EIO;
if (likely(bitmap[1] & FATTR4_WORD1_NUMLINKS)) {
- READ_BUF(4);
- READ32(*nlink);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ *nlink = be32_to_cpup(p);
bitmap[1] &= ~FATTR4_WORD1_NUMLINKS;
ret = NFS_ATTR_FATTR_NLINK;
}
dprintk("%s: nlink=%u\n", __func__, (unsigned int)*nlink);
return ret;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
-static int decode_attr_owner(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs_client *clp, uint32_t *uid)
+static int decode_attr_owner(struct xdr_stream *xdr, uint32_t *bitmap,
+ struct nfs_client *clp, uint32_t *uid, int may_sleep)
{
uint32_t len;
__be32 *p;
@@ -3085,10 +3159,16 @@ static int decode_attr_owner(struct xdr_stream *xdr, uint32_t *bitmap, struct nf
if (unlikely(bitmap[1] & (FATTR4_WORD1_OWNER - 1U)))
return -EIO;
if (likely(bitmap[1] & FATTR4_WORD1_OWNER)) {
- READ_BUF(4);
- READ32(len);
- READ_BUF(len);
- if (len < XDR_MAX_NETOBJ) {
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ len = be32_to_cpup(p);
+ p = xdr_inline_decode(xdr, len);
+ if (unlikely(!p))
+ goto out_overflow;
+ if (!may_sleep) {
+ /* do nothing */
+ } else if (len < XDR_MAX_NETOBJ) {
if (nfs_map_name_to_uid(clp, (char *)p, len, uid) == 0)
ret = NFS_ATTR_FATTR_OWNER;
else
@@ -3101,9 +3181,13 @@ static int decode_attr_owner(struct xdr_stream *xdr, uint32_t *bitmap, struct nf
}
dprintk("%s: uid=%d\n", __func__, (int)*uid);
return ret;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
-static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs_client *clp, uint32_t *gid)
+static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap,
+ struct nfs_client *clp, uint32_t *gid, int may_sleep)
{
uint32_t len;
__be32 *p;
@@ -3113,10 +3197,16 @@ static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap, struct nf
if (unlikely(bitmap[1] & (FATTR4_WORD1_OWNER_GROUP - 1U)))
return -EIO;
if (likely(bitmap[1] & FATTR4_WORD1_OWNER_GROUP)) {
- READ_BUF(4);
- READ32(len);
- READ_BUF(len);
- if (len < XDR_MAX_NETOBJ) {
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ len = be32_to_cpup(p);
+ p = xdr_inline_decode(xdr, len);
+ if (unlikely(!p))
+ goto out_overflow;
+ if (!may_sleep) {
+ /* do nothing */
+ } else if (len < XDR_MAX_NETOBJ) {
if (nfs_map_group_to_gid(clp, (char *)p, len, gid) == 0)
ret = NFS_ATTR_FATTR_GROUP;
else
@@ -3129,6 +3219,9 @@ static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap, struct nf
}
dprintk("%s: gid=%d\n", __func__, (int)*gid);
return ret;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_rdev(struct xdr_stream *xdr, uint32_t *bitmap, dev_t *rdev)
@@ -3143,9 +3236,11 @@ static int decode_attr_rdev(struct xdr_stream *xdr, uint32_t *bitmap, dev_t *rde
if (likely(bitmap[1] & FATTR4_WORD1_RAWDEV)) {
dev_t tmp;
- READ_BUF(8);
- READ32(major);
- READ32(minor);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ major = be32_to_cpup(p++);
+ minor = be32_to_cpup(p);
tmp = MKDEV(major, minor);
if (MAJOR(tmp) == major && MINOR(tmp) == minor)
*rdev = tmp;
@@ -3154,6 +3249,9 @@ static int decode_attr_rdev(struct xdr_stream *xdr, uint32_t *bitmap, dev_t *rde
}
dprintk("%s: rdev=(0x%x:0x%x)\n", __func__, major, minor);
return ret;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_space_avail(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
@@ -3165,12 +3263,17 @@ static int decode_attr_space_avail(struct xdr_stream *xdr, uint32_t *bitmap, uin
if (unlikely(bitmap[1] & (FATTR4_WORD1_SPACE_AVAIL - 1U)))
return -EIO;
if (likely(bitmap[1] & FATTR4_WORD1_SPACE_AVAIL)) {
- READ_BUF(8);
- READ64(*res);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, res);
bitmap[1] &= ~FATTR4_WORD1_SPACE_AVAIL;
}
dprintk("%s: space avail=%Lu\n", __func__, (unsigned long long)*res);
return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_space_free(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
@@ -3182,12 +3285,17 @@ static int decode_attr_space_free(struct xdr_stream *xdr, uint32_t *bitmap, uint
if (unlikely(bitmap[1] & (FATTR4_WORD1_SPACE_FREE - 1U)))
return -EIO;
if (likely(bitmap[1] & FATTR4_WORD1_SPACE_FREE)) {
- READ_BUF(8);
- READ64(*res);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, res);
bitmap[1] &= ~FATTR4_WORD1_SPACE_FREE;
}
dprintk("%s: space free=%Lu\n", __func__, (unsigned long long)*res);
return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_space_total(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
@@ -3199,12 +3307,17 @@ static int decode_attr_space_total(struct xdr_stream *xdr, uint32_t *bitmap, uin
if (unlikely(bitmap[1] & (FATTR4_WORD1_SPACE_TOTAL - 1U)))
return -EIO;
if (likely(bitmap[1] & FATTR4_WORD1_SPACE_TOTAL)) {
- READ_BUF(8);
- READ64(*res);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, res);
bitmap[1] &= ~FATTR4_WORD1_SPACE_TOTAL;
}
dprintk("%s: space total=%Lu\n", __func__, (unsigned long long)*res);
return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_space_used(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *used)
@@ -3216,14 +3329,19 @@ static int decode_attr_space_used(struct xdr_stream *xdr, uint32_t *bitmap, uint
if (unlikely(bitmap[1] & (FATTR4_WORD1_SPACE_USED - 1U)))
return -EIO;
if (likely(bitmap[1] & FATTR4_WORD1_SPACE_USED)) {
- READ_BUF(8);
- READ64(*used);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, used);
bitmap[1] &= ~FATTR4_WORD1_SPACE_USED;
ret = NFS_ATTR_FATTR_SPACE_USED;
}
dprintk("%s: space used=%Lu\n", __func__,
(unsigned long long)*used);
return ret;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_time(struct xdr_stream *xdr, struct timespec *time)
@@ -3232,12 +3350,17 @@ static int decode_attr_time(struct xdr_stream *xdr, struct timespec *time)
uint64_t sec;
uint32_t nsec;
- READ_BUF(12);
- READ64(sec);
- READ32(nsec);
+ p = xdr_inline_decode(xdr, 12);
+ if (unlikely(!p))
+ goto out_overflow;
+ p = xdr_decode_hyper(p, &sec);
+ nsec = be32_to_cpup(p);
time->tv_sec = (time_t)sec;
time->tv_nsec = (long)nsec;
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_time_access(struct xdr_stream *xdr, uint32_t *bitmap, struct timespec *time)
@@ -3315,11 +3438,16 @@ static int decode_change_info(struct xdr_stream *xdr, struct nfs4_change_info *c
{
__be32 *p;
- READ_BUF(20);
- READ32(cinfo->atomic);
- READ64(cinfo->before);
- READ64(cinfo->after);
+ p = xdr_inline_decode(xdr, 20);
+ if (unlikely(!p))
+ goto out_overflow;
+ cinfo->atomic = be32_to_cpup(p++);
+ p = xdr_decode_hyper(p, &cinfo->before);
+ xdr_decode_hyper(p, &cinfo->after);
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_access(struct xdr_stream *xdr, struct nfs4_accessres *access)
@@ -3331,40 +3459,62 @@ static int decode_access(struct xdr_stream *xdr, struct nfs4_accessres *access)
status = decode_op_hdr(xdr, OP_ACCESS);
if (status)
return status;
- READ_BUF(8);
- READ32(supp);
- READ32(acc);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ supp = be32_to_cpup(p++);
+ acc = be32_to_cpup(p);
access->supported = supp;
access->access = acc;
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
-static int decode_close(struct xdr_stream *xdr, struct nfs_closeres *res)
+static int decode_opaque_fixed(struct xdr_stream *xdr, void *buf, size_t len)
{
__be32 *p;
+
+ p = xdr_inline_decode(xdr, len);
+ if (likely(p)) {
+ memcpy(buf, p, len);
+ return 0;
+ }
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
+{
+ return decode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE);
+}
+
+static int decode_close(struct xdr_stream *xdr, struct nfs_closeres *res)
+{
int status;
status = decode_op_hdr(xdr, OP_CLOSE);
if (status != -EIO)
nfs_increment_open_seqid(status, res->seqid);
- if (status)
- return status;
- READ_BUF(NFS4_STATEID_SIZE);
- COPYMEM(res->stateid.data, NFS4_STATEID_SIZE);
- return 0;
+ if (!status)
+ status = decode_stateid(xdr, &res->stateid);
+ return status;
+}
+
+static int decode_verifier(struct xdr_stream *xdr, void *verifier)
+{
+ return decode_opaque_fixed(xdr, verifier, 8);
}
static int decode_commit(struct xdr_stream *xdr, struct nfs_writeres *res)
{
- __be32 *p;
int status;
status = decode_op_hdr(xdr, OP_COMMIT);
- if (status)
- return status;
- READ_BUF(8);
- COPYMEM(res->verf->verifier, 8);
- return 0;
+ if (!status)
+ status = decode_verifier(xdr, res->verf->verifier);
+ return status;
}
static int decode_create(struct xdr_stream *xdr, struct nfs4_change_info *cinfo)
@@ -3378,10 +3528,16 @@ static int decode_create(struct xdr_stream *xdr, struct nfs4_change_info *cinfo)
return status;
if ((status = decode_change_info(xdr, cinfo)))
return status;
- READ_BUF(4);
- READ32(bmlen);
- READ_BUF(bmlen << 2);
- return 0;
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ bmlen = be32_to_cpup(p);
+ p = xdr_inline_decode(xdr, bmlen << 2);
+ if (likely(p))
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_server_caps(struct xdr_stream *xdr, struct nfs4_server_caps_res *res)
@@ -3466,7 +3622,8 @@ xdr_error:
return status;
}
-static int decode_getfattr(struct xdr_stream *xdr, struct nfs_fattr *fattr, const struct nfs_server *server)
+static int decode_getfattr(struct xdr_stream *xdr, struct nfs_fattr *fattr,
+ const struct nfs_server *server, int may_sleep)
{
__be32 *savep;
uint32_t attrlen,
@@ -3538,12 +3695,14 @@ static int decode_getfattr(struct xdr_stream *xdr, struct nfs_fattr *fattr, cons
goto xdr_error;
fattr->valid |= status;
- status = decode_attr_owner(xdr, bitmap, server->nfs_client, &fattr->uid);
+ status = decode_attr_owner(xdr, bitmap, server->nfs_client,
+ &fattr->uid, may_sleep);
if (status < 0)
goto xdr_error;
fattr->valid |= status;
- status = decode_attr_group(xdr, bitmap, server->nfs_client, &fattr->gid);
+ status = decode_attr_group(xdr, bitmap, server->nfs_client,
+ &fattr->gid, may_sleep);
if (status < 0)
goto xdr_error;
fattr->valid |= status;
@@ -3633,14 +3792,21 @@ static int decode_getfh(struct xdr_stream *xdr, struct nfs_fh *fh)
if (status)
return status;
- READ_BUF(4);
- READ32(len);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ len = be32_to_cpup(p);
if (len > NFS4_FHSIZE)
return -EIO;
fh->size = len;
- READ_BUF(len);
- COPYMEM(fh->data, len);
+ p = xdr_inline_decode(xdr, len);
+ if (unlikely(!p))
+ goto out_overflow;
+ memcpy(fh->data, p, len);
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_link(struct xdr_stream *xdr, struct nfs4_change_info *cinfo)
@@ -3662,10 +3828,12 @@ static int decode_lock_denied (struct xdr_stream *xdr, struct file_lock *fl)
__be32 *p;
uint32_t namelen, type;
- READ_BUF(32);
- READ64(offset);
- READ64(length);
- READ32(type);
+ p = xdr_inline_decode(xdr, 32);
+ if (unlikely(!p))
+ goto out_overflow;
+ p = xdr_decode_hyper(p, &offset);
+ p = xdr_decode_hyper(p, &length);
+ type = be32_to_cpup(p++);
if (fl != NULL) {
fl->fl_start = (loff_t)offset;
fl->fl_end = fl->fl_start + (loff_t)length - 1;
@@ -3676,23 +3844,27 @@ static int decode_lock_denied (struct xdr_stream *xdr, struct file_lock *fl)
fl->fl_type = F_RDLCK;
fl->fl_pid = 0;
}
- READ64(clientid);
- READ32(namelen);
- READ_BUF(namelen);
- return -NFS4ERR_DENIED;
+ p = xdr_decode_hyper(p, &clientid);
+ namelen = be32_to_cpup(p);
+ p = xdr_inline_decode(xdr, namelen);
+ if (likely(p))
+ return -NFS4ERR_DENIED;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_lock(struct xdr_stream *xdr, struct nfs_lock_res *res)
{
- __be32 *p;
int status;
status = decode_op_hdr(xdr, OP_LOCK);
if (status == -EIO)
goto out;
if (status == 0) {
- READ_BUF(NFS4_STATEID_SIZE);
- COPYMEM(res->stateid.data, NFS4_STATEID_SIZE);
+ status = decode_stateid(xdr, &res->stateid);
+ if (unlikely(status))
+ goto out;
} else if (status == -NFS4ERR_DENIED)
status = decode_lock_denied(xdr, NULL);
if (res->open_seqid != NULL)
@@ -3713,16 +3885,13 @@ static int decode_lockt(struct xdr_stream *xdr, struct nfs_lockt_res *res)
static int decode_locku(struct xdr_stream *xdr, struct nfs_locku_res *res)
{
- __be32 *p;
int status;
status = decode_op_hdr(xdr, OP_LOCKU);
if (status != -EIO)
nfs_increment_lock_seqid(status, res->seqid);
- if (status == 0) {
- READ_BUF(NFS4_STATEID_SIZE);
- COPYMEM(res->stateid.data, NFS4_STATEID_SIZE);
- }
+ if (status == 0)
+ status = decode_stateid(xdr, &res->stateid);
return status;
}
@@ -3737,34 +3906,46 @@ static int decode_space_limit(struct xdr_stream *xdr, u64 *maxsize)
__be32 *p;
uint32_t limit_type, nblocks, blocksize;
- READ_BUF(12);
- READ32(limit_type);
+ p = xdr_inline_decode(xdr, 12);
+ if (unlikely(!p))
+ goto out_overflow;
+ limit_type = be32_to_cpup(p++);
switch (limit_type) {
case 1:
- READ64(*maxsize);
+ xdr_decode_hyper(p, maxsize);
break;
case 2:
- READ32(nblocks);
- READ32(blocksize);
+ nblocks = be32_to_cpup(p++);
+ blocksize = be32_to_cpup(p);
*maxsize = (uint64_t)nblocks * (uint64_t)blocksize;
}
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_delegation(struct xdr_stream *xdr, struct nfs_openres *res)
{
__be32 *p;
uint32_t delegation_type;
+ int status;
- READ_BUF(4);
- READ32(delegation_type);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ delegation_type = be32_to_cpup(p);
if (delegation_type == NFS4_OPEN_DELEGATE_NONE) {
res->delegation_type = 0;
return 0;
}
- READ_BUF(NFS4_STATEID_SIZE+4);
- COPYMEM(res->delegation.data, NFS4_STATEID_SIZE);
- READ32(res->do_recall);
+ status = decode_stateid(xdr, &res->delegation);
+ if (unlikely(status))
+ return status;
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ res->do_recall = be32_to_cpup(p);
switch (delegation_type) {
case NFS4_OPEN_DELEGATE_READ:
@@ -3776,6 +3957,9 @@ static int decode_delegation(struct xdr_stream *xdr, struct nfs_openres *res)
return -EIO;
}
return decode_ace(xdr, NULL, res->server->nfs_client);
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res)
@@ -3787,23 +3971,27 @@ static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res)
status = decode_op_hdr(xdr, OP_OPEN);
if (status != -EIO)
nfs_increment_open_seqid(status, res->seqid);
- if (status)
+ if (!status)
+ status = decode_stateid(xdr, &res->stateid);
+ if (unlikely(status))
return status;
- READ_BUF(NFS4_STATEID_SIZE);
- COPYMEM(res->stateid.data, NFS4_STATEID_SIZE);
decode_change_info(xdr, &res->cinfo);
- READ_BUF(8);
- READ32(res->rflags);
- READ32(bmlen);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ res->rflags = be32_to_cpup(p++);
+ bmlen = be32_to_cpup(p);
if (bmlen > 10)
goto xdr_error;
- READ_BUF(bmlen << 2);
+ p = xdr_inline_decode(xdr, bmlen << 2);
+ if (unlikely(!p))
+ goto out_overflow;
savewords = min_t(uint32_t, bmlen, NFS4_BITMAP_SIZE);
for (i = 0; i < savewords; ++i)
- READ32(res->attrset[i]);
+ res->attrset[i] = be32_to_cpup(p++);
for (; i < NFS4_BITMAP_SIZE; i++)
res->attrset[i] = 0;
@@ -3811,36 +3999,33 @@ static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res)
xdr_error:
dprintk("%s: Bitmap too large! Length = %u\n", __func__, bmlen);
return -EIO;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_open_confirm(struct xdr_stream *xdr, struct nfs_open_confirmres *res)
{
- __be32 *p;
int status;
status = decode_op_hdr(xdr, OP_OPEN_CONFIRM);
if (status != -EIO)
nfs_increment_open_seqid(status, res->seqid);
- if (status)
- return status;
- READ_BUF(NFS4_STATEID_SIZE);
- COPYMEM(res->stateid.data, NFS4_STATEID_SIZE);
- return 0;
+ if (!status)
+ status = decode_stateid(xdr, &res->stateid);
+ return status;
}
static int decode_open_downgrade(struct xdr_stream *xdr, struct nfs_closeres *res)
{
- __be32 *p;
int status;
status = decode_op_hdr(xdr, OP_OPEN_DOWNGRADE);
if (status != -EIO)
nfs_increment_open_seqid(status, res->seqid);
- if (status)
- return status;
- READ_BUF(NFS4_STATEID_SIZE);
- COPYMEM(res->stateid.data, NFS4_STATEID_SIZE);
- return 0;
+ if (!status)
+ status = decode_stateid(xdr, &res->stateid);
+ return status;
}
static int decode_putfh(struct xdr_stream *xdr)
@@ -3863,9 +4048,11 @@ static int decode_read(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs_
status = decode_op_hdr(xdr, OP_READ);
if (status)
return status;
- READ_BUF(8);
- READ32(eof);
- READ32(count);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ eof = be32_to_cpup(p++);
+ count = be32_to_cpup(p);
hdrlen = (u8 *) p - (u8 *) iov->iov_base;
recvd = req->rq_rcv_buf.len - hdrlen;
if (count > recvd) {
@@ -3878,6 +4065,9 @@ static int decode_read(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs_
res->eof = eof;
res->count = count;
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs4_readdir_res *readdir)
@@ -3892,17 +4082,17 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n
int status;
status = decode_op_hdr(xdr, OP_READDIR);
- if (status)
+ if (!status)
+ status = decode_verifier(xdr, readdir->verifier.data);
+ if (unlikely(status))
return status;
- READ_BUF(8);
- COPYMEM(readdir->verifier.data, 8);
dprintk("%s: verifier = %08x:%08x\n",
__func__,
((u32 *)readdir->verifier.data)[0],
((u32 *)readdir->verifier.data)[1]);
- hdrlen = (char *) p - (char *) iov->iov_base;
+ hdrlen = (char *) xdr->p - (char *) iov->iov_base;
recvd = rcvbuf->len - hdrlen;
if (pglen > recvd)
pglen = recvd;
@@ -3990,8 +4180,10 @@ static int decode_readlink(struct xdr_stream *xdr, struct rpc_rqst *req)
return status;
/* Convert length of symlink */
- READ_BUF(4);
- READ32(len);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ len = be32_to_cpup(p);
if (len >= rcvbuf->page_len || len <= 0) {
dprintk("nfs: server returned giant symlink!\n");
return -ENAMETOOLONG;
@@ -4015,6 +4207,9 @@ static int decode_readlink(struct xdr_stream *xdr, struct rpc_rqst *req)
kaddr[len+rcvbuf->page_base] = '\0';
kunmap_atomic(kaddr, KM_USER0);
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_remove(struct xdr_stream *xdr, struct nfs4_change_info *cinfo)
@@ -4112,10 +4307,16 @@ static int decode_setattr(struct xdr_stream *xdr)
status = decode_op_hdr(xdr, OP_SETATTR);
if (status)
return status;
- READ_BUF(4);
- READ32(bmlen);
- READ_BUF(bmlen << 2);
- return 0;
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ bmlen = be32_to_cpup(p);
+ p = xdr_inline_decode(xdr, bmlen << 2);
+ if (likely(p))
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_setclientid(struct xdr_stream *xdr, struct nfs_client *clp)
@@ -4124,35 +4325,50 @@ static int decode_setclientid(struct xdr_stream *xdr, struct nfs_client *clp)
uint32_t opnum;
int32_t nfserr;
- READ_BUF(8);
- READ32(opnum);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ opnum = be32_to_cpup(p++);
if (opnum != OP_SETCLIENTID) {
dprintk("nfs: decode_setclientid: Server returned operation"
" %d\n", opnum);
return -EIO;
}
- READ32(nfserr);
+ nfserr = be32_to_cpup(p);
if (nfserr == NFS_OK) {
- READ_BUF(8 + NFS4_VERIFIER_SIZE);
- READ64(clp->cl_clientid);
- COPYMEM(clp->cl_confirm.data, NFS4_VERIFIER_SIZE);
+ p = xdr_inline_decode(xdr, 8 + NFS4_VERIFIER_SIZE);
+ if (unlikely(!p))
+ goto out_overflow;
+ p = xdr_decode_hyper(p, &clp->cl_clientid);
+ memcpy(clp->cl_confirm.data, p, NFS4_VERIFIER_SIZE);
} else if (nfserr == NFSERR_CLID_INUSE) {
uint32_t len;
/* skip netid string */
- READ_BUF(4);
- READ32(len);
- READ_BUF(len);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ len = be32_to_cpup(p);
+ p = xdr_inline_decode(xdr, len);
+ if (unlikely(!p))
+ goto out_overflow;
/* skip uaddr string */
- READ_BUF(4);
- READ32(len);
- READ_BUF(len);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ len = be32_to_cpup(p);
+ p = xdr_inline_decode(xdr, len);
+ if (unlikely(!p))
+ goto out_overflow;
return -NFSERR_CLID_INUSE;
} else
return nfs4_stat_to_errno(nfserr);
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_setclientid_confirm(struct xdr_stream *xdr)
@@ -4169,11 +4385,16 @@ static int decode_write(struct xdr_stream *xdr, struct nfs_writeres *res)
if (status)
return status;
- READ_BUF(16);
- READ32(res->count);
- READ32(res->verf->committed);
- COPYMEM(res->verf->verifier, 8);
+ p = xdr_inline_decode(xdr, 16);
+ if (unlikely(!p))
+ goto out_overflow;
+ res->count = be32_to_cpup(p++);
+ res->verf->committed = be32_to_cpup(p++);
+ memcpy(res->verf->verifier, p, 8);
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_delegreturn(struct xdr_stream *xdr)
@@ -4187,6 +4408,7 @@ static int decode_exchange_id(struct xdr_stream *xdr,
{
__be32 *p;
uint32_t dummy;
+ char *dummy_str;
int status;
struct nfs_client *clp = res->client;
@@ -4194,36 +4416,45 @@ static int decode_exchange_id(struct xdr_stream *xdr,
if (status)
return status;
- READ_BUF(8);
- READ64(clp->cl_ex_clid);
- READ_BUF(12);
- READ32(clp->cl_seqid);
- READ32(clp->cl_exchange_flags);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, &clp->cl_ex_clid);
+ p = xdr_inline_decode(xdr, 12);
+ if (unlikely(!p))
+ goto out_overflow;
+ clp->cl_seqid = be32_to_cpup(p++);
+ clp->cl_exchange_flags = be32_to_cpup(p++);
/* We ask for SP4_NONE */
- READ32(dummy);
+ dummy = be32_to_cpup(p);
if (dummy != SP4_NONE)
return -EIO;
/* Throw away minor_id */
- READ_BUF(8);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
/* Throw away Major id */
- READ_BUF(4);
- READ32(dummy);
- READ_BUF(dummy);
+ status = decode_opaque_inline(xdr, &dummy, &dummy_str);
+ if (unlikely(status))
+ return status;
/* Throw away server_scope */
- READ_BUF(4);
- READ32(dummy);
- READ_BUF(dummy);
+ status = decode_opaque_inline(xdr, &dummy, &dummy_str);
+ if (unlikely(status))
+ return status;
/* Throw away Implementation id array */
- READ_BUF(4);
- READ32(dummy);
- READ_BUF(dummy);
+ status = decode_opaque_inline(xdr, &dummy, &dummy_str);
+ if (unlikely(status))
+ return status;
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_chan_attrs(struct xdr_stream *xdr,
@@ -4232,22 +4463,35 @@ static int decode_chan_attrs(struct xdr_stream *xdr,
__be32 *p;
u32 nr_attrs;
- READ_BUF(28);
- READ32(attrs->headerpadsz);
- READ32(attrs->max_rqst_sz);
- READ32(attrs->max_resp_sz);
- READ32(attrs->max_resp_sz_cached);
- READ32(attrs->max_ops);
- READ32(attrs->max_reqs);
- READ32(nr_attrs);
+ p = xdr_inline_decode(xdr, 28);
+ if (unlikely(!p))
+ goto out_overflow;
+ attrs->headerpadsz = be32_to_cpup(p++);
+ attrs->max_rqst_sz = be32_to_cpup(p++);
+ attrs->max_resp_sz = be32_to_cpup(p++);
+ attrs->max_resp_sz_cached = be32_to_cpup(p++);
+ attrs->max_ops = be32_to_cpup(p++);
+ attrs->max_reqs = be32_to_cpup(p++);
+ nr_attrs = be32_to_cpup(p);
if (unlikely(nr_attrs > 1)) {
printk(KERN_WARNING "%s: Invalid rdma channel attrs count %u\n",
__func__, nr_attrs);
return -EINVAL;
}
- if (nr_attrs == 1)
- READ_BUF(4); /* skip rdma_attrs */
+ if (nr_attrs == 1) {
+ p = xdr_inline_decode(xdr, 4); /* skip rdma_attrs */
+ if (unlikely(!p))
+ goto out_overflow;
+ }
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_sessionid(struct xdr_stream *xdr, struct nfs4_sessionid *sid)
+{
+ return decode_opaque_fixed(xdr, sid->data, NFS4_MAX_SESSIONID_LEN);
}
static int decode_create_session(struct xdr_stream *xdr,
@@ -4259,24 +4503,26 @@ static int decode_create_session(struct xdr_stream *xdr,
struct nfs4_session *session = clp->cl_session;
status = decode_op_hdr(xdr, OP_CREATE_SESSION);
-
- if (status)
+ if (!status)
+ status = decode_sessionid(xdr, &session->sess_id);
+ if (unlikely(status))
return status;
- /* sessionid */
- READ_BUF(NFS4_MAX_SESSIONID_LEN);
- COPYMEM(&session->sess_id, NFS4_MAX_SESSIONID_LEN);
-
/* seqid, flags */
- READ_BUF(8);
- READ32(clp->cl_seqid);
- READ32(session->flags);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ clp->cl_seqid = be32_to_cpup(p++);
+ session->flags = be32_to_cpup(p);
/* Channel attributes */
status = decode_chan_attrs(xdr, &session->fc_attrs);
if (!status)
status = decode_chan_attrs(xdr, &session->bc_attrs);
return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_destroy_session(struct xdr_stream *xdr, void *dummy)
@@ -4300,7 +4546,9 @@ static int decode_sequence(struct xdr_stream *xdr,
return 0;
status = decode_op_hdr(xdr, OP_SEQUENCE);
- if (status)
+ if (!status)
+ status = decode_sessionid(xdr, &id);
+ if (unlikely(status))
goto out_err;
/*
@@ -4309,36 +4557,43 @@ static int decode_sequence(struct xdr_stream *xdr,
*/
status = -ESERVERFAULT;
- slot = &res->sr_session->fc_slot_table.slots[res->sr_slotid];
- READ_BUF(NFS4_MAX_SESSIONID_LEN + 20);
- COPYMEM(id.data, NFS4_MAX_SESSIONID_LEN);
if (memcmp(id.data, res->sr_session->sess_id.data,
NFS4_MAX_SESSIONID_LEN)) {
dprintk("%s Invalid session id\n", __func__);
goto out_err;
}
+
+ p = xdr_inline_decode(xdr, 20);
+ if (unlikely(!p))
+ goto out_overflow;
+
/* seqid */
- READ32(dummy);
+ slot = &res->sr_session->fc_slot_table.slots[res->sr_slotid];
+ dummy = be32_to_cpup(p++);
if (dummy != slot->seq_nr) {
dprintk("%s Invalid sequence number\n", __func__);
goto out_err;
}
/* slot id */
- READ32(dummy);
+ dummy = be32_to_cpup(p++);
if (dummy != res->sr_slotid) {
dprintk("%s Invalid slot id\n", __func__);
goto out_err;
}
/* highest slot id - currently not processed */
- READ32(dummy);
+ dummy = be32_to_cpup(p++);
/* target highest slot id - currently not processed */
- READ32(dummy);
+ dummy = be32_to_cpup(p++);
/* result flags - currently not processed */
- READ32(dummy);
+ dummy = be32_to_cpup(p);
status = 0;
out_err:
res->sr_status = status;
return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ status = -EIO;
+ goto out_err;
#else /* CONFIG_NFS_V4_1 */
return 0;
#endif /* CONFIG_NFS_V4_1 */
@@ -4370,7 +4625,8 @@ static int nfs4_xdr_dec_open_downgrade(struct rpc_rqst *rqstp, __be32 *p, struct
status = decode_open_downgrade(&xdr, res);
if (status != 0)
goto out;
- decode_getfattr(&xdr, res->fattr, res->server);
+ decode_getfattr(&xdr, res->fattr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
@@ -4397,7 +4653,8 @@ static int nfs4_xdr_dec_access(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_ac
status = decode_access(&xdr, res);
if (status != 0)
goto out;
- decode_getfattr(&xdr, res->fattr, res->server);
+ decode_getfattr(&xdr, res->fattr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
@@ -4424,7 +4681,8 @@ static int nfs4_xdr_dec_lookup(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_lo
goto out;
if ((status = decode_getfh(&xdr, res->fh)) != 0)
goto out;
- status = decode_getfattr(&xdr, res->fattr, res->server);
+ status = decode_getfattr(&xdr, res->fattr, res->server
+ ,!RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
@@ -4448,7 +4706,8 @@ static int nfs4_xdr_dec_lookup_root(struct rpc_rqst *rqstp, __be32 *p, struct nf
if ((status = decode_putrootfh(&xdr)) != 0)
goto out;
if ((status = decode_getfh(&xdr, res->fh)) == 0)
- status = decode_getfattr(&xdr, res->fattr, res->server);
+ status = decode_getfattr(&xdr, res->fattr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
@@ -4473,7 +4732,8 @@ static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, __be32 *p, struct nfs_rem
goto out;
if ((status = decode_remove(&xdr, &res->cinfo)) != 0)
goto out;
- decode_getfattr(&xdr, &res->dir_attr, res->server);
+ decode_getfattr(&xdr, &res->dir_attr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
@@ -4503,11 +4763,13 @@ static int nfs4_xdr_dec_rename(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_re
if ((status = decode_rename(&xdr, &res->old_cinfo, &res->new_cinfo)) != 0)
goto out;
/* Current FH is target directory */
- if (decode_getfattr(&xdr, res->new_fattr, res->server) != 0)
+ if (decode_getfattr(&xdr, res->new_fattr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task)) != 0)
goto out;
if ((status = decode_restorefh(&xdr)) != 0)
goto out;
- decode_getfattr(&xdr, res->old_fattr, res->server);
+ decode_getfattr(&xdr, res->old_fattr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
@@ -4540,11 +4802,13 @@ static int nfs4_xdr_dec_link(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_link
* Note order: OP_LINK leaves the directory as the current
* filehandle.
*/
- if (decode_getfattr(&xdr, res->dir_attr, res->server) != 0)
+ if (decode_getfattr(&xdr, res->dir_attr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task)) != 0)
goto out;
if ((status = decode_restorefh(&xdr)) != 0)
goto out;
- decode_getfattr(&xdr, res->fattr, res->server);
+ decode_getfattr(&xdr, res->fattr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
@@ -4573,11 +4837,13 @@ static int nfs4_xdr_dec_create(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_cr
goto out;
if ((status = decode_getfh(&xdr, res->fh)) != 0)
goto out;
- if (decode_getfattr(&xdr, res->fattr, res->server) != 0)
+ if (decode_getfattr(&xdr, res->fattr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task)) != 0)
goto out;
if ((status = decode_restorefh(&xdr)) != 0)
goto out;
- decode_getfattr(&xdr, res->dir_fattr, res->server);
+ decode_getfattr(&xdr, res->dir_fattr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
@@ -4609,7 +4875,8 @@ static int nfs4_xdr_dec_getattr(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_g
status = decode_putfh(&xdr);
if (status)
goto out;
- status = decode_getfattr(&xdr, res->fattr, res->server);
+ status = decode_getfattr(&xdr, res->fattr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
@@ -4716,7 +4983,8 @@ static int nfs4_xdr_dec_close(struct rpc_rqst *rqstp, __be32 *p, struct nfs_clos
* an ESTALE error. Shouldn't be a problem,
* though, since fattr->valid will remain unset.
*/
- decode_getfattr(&xdr, res->fattr, res->server);
+ decode_getfattr(&xdr, res->fattr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
@@ -4748,11 +5016,13 @@ static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, __be32 *p, struct nfs_openr
goto out;
if (decode_getfh(&xdr, &res->fh) != 0)
goto out;
- if (decode_getfattr(&xdr, res->f_attr, res->server) != 0)
+ if (decode_getfattr(&xdr, res->f_attr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task)) != 0)
goto out;
if (decode_restorefh(&xdr) != 0)
goto out;
- decode_getfattr(&xdr, res->dir_attr, res->server);
+ decode_getfattr(&xdr, res->dir_attr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
@@ -4800,7 +5070,8 @@ static int nfs4_xdr_dec_open_noattr(struct rpc_rqst *rqstp, __be32 *p, struct nf
status = decode_open(&xdr, res);
if (status)
goto out;
- decode_getfattr(&xdr, res->f_attr, res->server);
+ decode_getfattr(&xdr, res->f_attr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
@@ -4827,7 +5098,8 @@ static int nfs4_xdr_dec_setattr(struct rpc_rqst *rqstp, __be32 *p, struct nfs_se
status = decode_setattr(&xdr);
if (status)
goto out;
- decode_getfattr(&xdr, res->fattr, res->server);
+ decode_getfattr(&xdr, res->fattr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
@@ -5001,7 +5273,8 @@ static int nfs4_xdr_dec_write(struct rpc_rqst *rqstp, __be32 *p, struct nfs_writ
status = decode_write(&xdr, res);
if (status)
goto out;
- decode_getfattr(&xdr, res->fattr, res->server);
+ decode_getfattr(&xdr, res->fattr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task));
if (!status)
status = res->count;
out:
@@ -5030,7 +5303,8 @@ static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, __be32 *p, struct nfs_wri
status = decode_commit(&xdr, res);
if (status)
goto out;
- decode_getfattr(&xdr, res->fattr, res->server);
+ decode_getfattr(&xdr, res->fattr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
@@ -5194,7 +5468,8 @@ static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp, __be32 *p, struct nf
if (status != 0)
goto out;
status = decode_delegreturn(&xdr);
- decode_getfattr(&xdr, res->fattr, res->server);
+ decode_getfattr(&xdr, res->fattr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
@@ -5222,7 +5497,8 @@ static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req, __be32 *p,
goto out;
xdr_enter_page(&xdr, PAGE_SIZE);
status = decode_getfattr(&xdr, &res->fs_locations->fattr,
- res->fs_locations->server);
+ res->fs_locations->server,
+ !RPC_IS_ASYNC(req->rq_task));
out:
return status;
}
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 96c4ebf..12c9e66 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -18,7 +18,6 @@
#include <linux/sunrpc/clnt.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_page.h>
-#include <linux/smp_lock.h>
#include <asm/system.h>
@@ -61,17 +60,15 @@ struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount)
return p;
}
-static void nfs_readdata_free(struct nfs_read_data *p)
+void nfs_readdata_free(struct nfs_read_data *p)
{
if (p && (p->pagevec != &p->page_array[0]))
kfree(p->pagevec);
mempool_free(p, nfs_rdata_mempool);
}
-void nfs_readdata_release(void *data)
+static void nfs_readdata_release(struct nfs_read_data *rdata)
{
- struct nfs_read_data *rdata = data;
-
put_nfs_open_context(rdata->args.context);
nfs_readdata_free(rdata);
}
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 0b4cbdc..9c85cdb 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -158,7 +158,7 @@ static const match_table_t nfs_mount_option_tokens = {
{ Opt_mountvers, "mountvers=%s" },
{ Opt_nfsvers, "nfsvers=%s" },
{ Opt_nfsvers, "vers=%s" },
- { Opt_minorversion, "minorversion=%u" },
+ { Opt_minorversion, "minorversion=%s" },
{ Opt_sec, "sec=%s" },
{ Opt_proto, "proto=%s" },
@@ -742,129 +742,10 @@ static int nfs_verify_server_address(struct sockaddr *addr)
}
}
+ dfprintk(MOUNT, "NFS: Invalid IP address specified\n");
return 0;
}
-static void nfs_parse_ipv4_address(char *string, size_t str_len,
- struct sockaddr *sap, size_t *addr_len)
-{
- struct sockaddr_in *sin = (struct sockaddr_in *)sap;
- u8 *addr = (u8 *)&sin->sin_addr.s_addr;
-
- if (str_len <= INET_ADDRSTRLEN) {
- dfprintk(MOUNT, "NFS: parsing IPv4 address %*s\n",
- (int)str_len, string);
-
- sin->sin_family = AF_INET;
- *addr_len = sizeof(*sin);
- if (in4_pton(string, str_len, addr, '\0', NULL))
- return;
- }
-
- sap->sa_family = AF_UNSPEC;
- *addr_len = 0;
-}
-
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-static int nfs_parse_ipv6_scope_id(const char *string, const size_t str_len,
- const char *delim,
- struct sockaddr_in6 *sin6)
-{
- char *p;
- size_t len;
-
- if ((string + str_len) == delim)
- return 1;
-
- if (*delim != IPV6_SCOPE_DELIMITER)
- return 0;
-
- if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL))
- return 0;
-
- len = (string + str_len) - delim - 1;
- p = kstrndup(delim + 1, len, GFP_KERNEL);
- if (p) {
- unsigned long scope_id = 0;
- struct net_device *dev;
-
- dev = dev_get_by_name(&init_net, p);
- if (dev != NULL) {
- scope_id = dev->ifindex;
- dev_put(dev);
- } else {
- if (strict_strtoul(p, 10, &scope_id) == 0) {
- kfree(p);
- return 0;
- }
- }
-
- kfree(p);
-
- sin6->sin6_scope_id = scope_id;
- dfprintk(MOUNT, "NFS: IPv6 scope ID = %lu\n", scope_id);
- return 1;
- }
-
- return 0;
-}
-
-static void nfs_parse_ipv6_address(char *string, size_t str_len,
- struct sockaddr *sap, size_t *addr_len)
-{
- struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
- u8 *addr = (u8 *)&sin6->sin6_addr.in6_u;
- const char *delim;
-
- if (str_len <= INET6_ADDRSTRLEN) {
- dfprintk(MOUNT, "NFS: parsing IPv6 address %*s\n",
- (int)str_len, string);
-
- sin6->sin6_family = AF_INET6;
- *addr_len = sizeof(*sin6);
- if (in6_pton(string, str_len, addr,
- IPV6_SCOPE_DELIMITER, &delim) != 0) {
- if (nfs_parse_ipv6_scope_id(string, str_len,
- delim, sin6) != 0)
- return;
- }
- }
-
- sap->sa_family = AF_UNSPEC;
- *addr_len = 0;
-}
-#else
-static void nfs_parse_ipv6_address(char *string, size_t str_len,
- struct sockaddr *sap, size_t *addr_len)
-{
- sap->sa_family = AF_UNSPEC;
- *addr_len = 0;
-}
-#endif
-
-/*
- * Construct a sockaddr based on the contents of a string that contains
- * an IP address in presentation format.
- *
- * If there is a problem constructing the new sockaddr, set the address
- * family to AF_UNSPEC.
- */
-void nfs_parse_ip_address(char *string, size_t str_len,
- struct sockaddr *sap, size_t *addr_len)
-{
- unsigned int i, colons;
-
- colons = 0;
- for (i = 0; i < str_len; i++)
- if (string[i] == ':')
- colons++;
-
- if (colons >= 2)
- nfs_parse_ipv6_address(string, str_len, sap, addr_len);
- else
- nfs_parse_ipv4_address(string, str_len, sap, addr_len);
-}
-
/*
* Sanity check the NFS transport protocol.
*
@@ -904,8 +785,6 @@ static void nfs_set_mount_transport_protocol(struct nfs_parsed_mount_data *mnt)
/*
* Parse the value of the 'sec=' option.
- *
- * The flavor_len setting is for v4 mounts.
*/
static int nfs_parse_security_flavors(char *value,
struct nfs_parsed_mount_data *mnt)
@@ -916,53 +795,43 @@ static int nfs_parse_security_flavors(char *value,
switch (match_token(value, nfs_secflavor_tokens, args)) {
case Opt_sec_none:
- mnt->auth_flavor_len = 0;
mnt->auth_flavors[0] = RPC_AUTH_NULL;
break;
case Opt_sec_sys:
- mnt->auth_flavor_len = 0;
mnt->auth_flavors[0] = RPC_AUTH_UNIX;
break;
case Opt_sec_krb5:
- mnt->auth_flavor_len = 1;
mnt->auth_flavors[0] = RPC_AUTH_GSS_KRB5;
break;
case Opt_sec_krb5i:
- mnt->auth_flavor_len = 1;
mnt->auth_flavors[0] = RPC_AUTH_GSS_KRB5I;
break;
case Opt_sec_krb5p:
- mnt->auth_flavor_len = 1;
mnt->auth_flavors[0] = RPC_AUTH_GSS_KRB5P;
break;
case Opt_sec_lkey:
- mnt->auth_flavor_len = 1;
mnt->auth_flavors[0] = RPC_AUTH_GSS_LKEY;
break;
case Opt_sec_lkeyi:
- mnt->auth_flavor_len = 1;
mnt->auth_flavors[0] = RPC_AUTH_GSS_LKEYI;
break;
case Opt_sec_lkeyp:
- mnt->auth_flavor_len = 1;
mnt->auth_flavors[0] = RPC_AUTH_GSS_LKEYP;
break;
case Opt_sec_spkm:
- mnt->auth_flavor_len = 1;
mnt->auth_flavors[0] = RPC_AUTH_GSS_SPKM;
break;
case Opt_sec_spkmi:
- mnt->auth_flavor_len = 1;
mnt->auth_flavors[0] = RPC_AUTH_GSS_SPKMI;
break;
case Opt_sec_spkmp:
- mnt->auth_flavor_len = 1;
mnt->auth_flavors[0] = RPC_AUTH_GSS_SPKMP;
break;
default:
return 0;
}
+ mnt->auth_flavor_len = 1;
return 1;
}
@@ -1001,7 +870,6 @@ static int nfs_parse_mount_options(char *raw,
while ((p = strsep(&raw, ",")) != NULL) {
substring_t args[MAX_OPT_ARGS];
unsigned long option;
- int int_option;
int token;
if (!*p)
@@ -1273,11 +1141,16 @@ static int nfs_parse_mount_options(char *raw,
}
break;
case Opt_minorversion:
- if (match_int(args, &int_option))
- return 0;
- if (int_option < 0 || int_option > NFS4_MAX_MINOR_VERSION)
- return 0;
- mnt->minorversion = int_option;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0)
+ goto out_invalid_value;
+ if (option > NFS4_MAX_MINOR_VERSION)
+ goto out_invalid_value;
+ mnt->minorversion = option;
break;
/*
@@ -1352,11 +1225,14 @@ static int nfs_parse_mount_options(char *raw,
string = match_strdup(args);
if (string == NULL)
goto out_nomem;
- nfs_parse_ip_address(string, strlen(string),
- (struct sockaddr *)
- &mnt->nfs_server.address,
- &mnt->nfs_server.addrlen);
+ mnt->nfs_server.addrlen =
+ rpc_pton(string, strlen(string),
+ (struct sockaddr *)
+ &mnt->nfs_server.address,
+ sizeof(mnt->nfs_server.address));
kfree(string);
+ if (mnt->nfs_server.addrlen == 0)
+ goto out_invalid_address;
break;
case Opt_clientaddr:
string = match_strdup(args);
@@ -1376,11 +1252,14 @@ static int nfs_parse_mount_options(char *raw,
string = match_strdup(args);
if (string == NULL)
goto out_nomem;
- nfs_parse_ip_address(string, strlen(string),
- (struct sockaddr *)
- &mnt->mount_server.address,
- &mnt->mount_server.addrlen);
+ mnt->mount_server.addrlen =
+ rpc_pton(string, strlen(string),
+ (struct sockaddr *)
+ &mnt->mount_server.address,
+ sizeof(mnt->mount_server.address));
kfree(string);
+ if (mnt->mount_server.addrlen == 0)
+ goto out_invalid_address;
break;
case Opt_lookupcache:
string = match_strdup(args);
@@ -1432,8 +1311,11 @@ static int nfs_parse_mount_options(char *raw,
return 1;
+out_invalid_address:
+ printk(KERN_INFO "NFS: bad IP address specified: %s\n", p);
+ return 0;
out_invalid_value:
- printk(KERN_INFO "NFS: bad mount option value specified: %s \n", p);
+ printk(KERN_INFO "NFS: bad mount option value specified: %s\n", p);
return 0;
out_nomem:
printk(KERN_INFO "NFS: not enough memory to parse option\n");
@@ -1445,13 +1327,50 @@ out_security_failure:
}
/*
+ * Match the requested auth flavors with the list returned by
+ * the server. Returns zero and sets the mount's authentication
+ * flavor on success; returns -EACCES if server does not support
+ * the requested flavor.
+ */
+static int nfs_walk_authlist(struct nfs_parsed_mount_data *args,
+ struct nfs_mount_request *request)
+{
+ unsigned int i, j, server_authlist_len = *(request->auth_flav_len);
+
+ /*
+ * We avoid sophisticated negotiating here, as there are
+ * plenty of cases where we can get it wrong, providing
+ * either too little or too much security.
+ *
+ * RFC 2623, section 2.7 suggests we SHOULD prefer the
+ * flavor listed first. However, some servers list
+ * AUTH_NULL first. Our caller plants AUTH_SYS, the
+ * preferred default, in args->auth_flavors[0] if user
+ * didn't specify sec= mount option.
+ */
+ for (i = 0; i < args->auth_flavor_len; i++)
+ for (j = 0; j < server_authlist_len; j++)
+ if (args->auth_flavors[i] == request->auth_flavs[j]) {
+ dfprintk(MOUNT, "NFS: using auth flavor %d\n",
+ request->auth_flavs[j]);
+ args->auth_flavors[0] = request->auth_flavs[j];
+ return 0;
+ }
+
+ dfprintk(MOUNT, "NFS: server does not support requested auth flavor\n");
+ nfs_umount(request);
+ return -EACCES;
+}
+
+/*
* Use the remote server's MOUNT service to request the NFS file handle
* corresponding to the provided path.
*/
static int nfs_try_mount(struct nfs_parsed_mount_data *args,
struct nfs_fh *root_fh)
{
- unsigned int auth_flavor_len = 0;
+ rpc_authflavor_t server_authlist[NFS_MAX_SECFLAVORS];
+ unsigned int server_authlist_len = ARRAY_SIZE(server_authlist);
struct nfs_mount_request request = {
.sap = (struct sockaddr *)
&args->mount_server.address,
@@ -1459,7 +1378,8 @@ static int nfs_try_mount(struct nfs_parsed_mount_data *args,
.protocol = args->mount_server.protocol,
.fh = root_fh,
.noresvport = args->flags & NFS_MOUNT_NORESVPORT,
- .auth_flav_len = &auth_flavor_len,
+ .auth_flav_len = &server_authlist_len,
+ .auth_flavs = server_authlist,
};
int status;
@@ -1489,19 +1409,25 @@ static int nfs_try_mount(struct nfs_parsed_mount_data *args,
/*
* autobind will be used if mount_server.port == 0
*/
- nfs_set_port(request.sap, args->mount_server.port);
+ rpc_set_port(request.sap, args->mount_server.port);
/*
* Now ask the mount server to map our export path
* to a file handle.
*/
status = nfs_mount(&request);
- if (status == 0)
- return 0;
+ if (status != 0) {
+ dfprintk(MOUNT, "NFS: unable to mount server %s, error %d\n",
+ request.hostname, status);
+ return status;
+ }
- dfprintk(MOUNT, "NFS: unable to mount server %s, error %d\n",
- request.hostname, status);
- return status;
+ /*
+ * MNTv1 (NFSv2) does not support auth flavor negotiation.
+ */
+ if (args->mount_server.version != NFS_MNT3_VERSION)
+ return 0;
+ return nfs_walk_authlist(args, &request);
}
static int nfs_parse_simple_hostname(const char *dev_name,
@@ -1676,6 +1602,7 @@ static int nfs_validate_mount_data(void *options,
args->nfs_server.port = 0; /* autobind unless user sets port */
args->nfs_server.protocol = XPRT_TRANSPORT_TCP;
args->auth_flavors[0] = RPC_AUTH_UNIX;
+ args->auth_flavor_len = 1;
switch (data->version) {
case 1:
@@ -1776,7 +1703,7 @@ static int nfs_validate_mount_data(void *options,
&args->nfs_server.address))
goto out_no_address;
- nfs_set_port((struct sockaddr *)&args->nfs_server.address,
+ rpc_set_port((struct sockaddr *)&args->nfs_server.address,
args->nfs_server.port);
nfs_set_mount_transport_protocol(args);
@@ -2339,7 +2266,7 @@ static int nfs4_validate_mount_data(void *options,
args->acdirmax = NFS_DEF_ACDIRMAX;
args->nfs_server.port = NFS_PORT; /* 2049 unless user set port= */
args->auth_flavors[0] = RPC_AUTH_UNIX;
- args->auth_flavor_len = 0;
+ args->auth_flavor_len = 1;
args->minorversion = 0;
switch (data->version) {
@@ -2409,7 +2336,7 @@ static int nfs4_validate_mount_data(void *options,
&args->nfs_server.address))
return -EINVAL;
- nfs_set_port((struct sockaddr *)&args->nfs_server.address,
+ rpc_set_port((struct sockaddr *)&args->nfs_server.address,
args->nfs_server.port);
nfs_validate_transport_protocol(args);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index ce72882..120acad 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -13,6 +13,7 @@
#include <linux/file.h>
#include <linux/writeback.h>
#include <linux/swap.h>
+#include <linux/migrate.h>
#include <linux/sunrpc/clnt.h>
#include <linux/nfs_fs.h>
@@ -26,6 +27,7 @@
#include "internal.h"
#include "iostat.h"
#include "nfs4_fs.h"
+#include "fscache.h"
#define NFSDBG_FACILITY NFSDBG_PAGECACHE
@@ -87,17 +89,15 @@ struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount)
return p;
}
-static void nfs_writedata_free(struct nfs_write_data *p)
+void nfs_writedata_free(struct nfs_write_data *p)
{
if (p && (p->pagevec != &p->page_array[0]))
kfree(p->pagevec);
mempool_free(p, nfs_wdata_mempool);
}
-void nfs_writedata_release(void *data)
+static void nfs_writedata_release(struct nfs_write_data *wdata)
{
- struct nfs_write_data *wdata = data;
-
put_nfs_open_context(wdata->args.context);
nfs_writedata_free(wdata);
}
@@ -202,8 +202,10 @@ static int nfs_set_page_writeback(struct page *page)
struct nfs_server *nfss = NFS_SERVER(inode);
if (atomic_long_inc_return(&nfss->writeback) >
- NFS_CONGESTION_ON_THRESH)
- set_bdi_congested(&nfss->backing_dev_info, WRITE);
+ NFS_CONGESTION_ON_THRESH) {
+ set_bdi_congested(&nfss->backing_dev_info,
+ BLK_RW_ASYNC);
+ }
}
return ret;
}
@@ -215,27 +217,20 @@ static void nfs_end_page_writeback(struct page *page)
end_page_writeback(page);
if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
- clear_bdi_congested(&nfss->backing_dev_info, WRITE);
+ clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
}
-/*
- * Find an associated nfs write request, and prepare to flush it out
- * May return an error if the user signalled nfs_wait_on_request().
- */
-static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
- struct page *page)
+static struct nfs_page *nfs_find_and_lock_request(struct page *page)
{
struct inode *inode = page->mapping->host;
struct nfs_page *req;
int ret;
spin_lock(&inode->i_lock);
- for(;;) {
+ for (;;) {
req = nfs_page_find_request_locked(page);
- if (req == NULL) {
- spin_unlock(&inode->i_lock);
- return 0;
- }
+ if (req == NULL)
+ break;
if (nfs_set_page_tag_locked(req))
break;
/* Note: If we hold the page lock, as is the case in nfs_writepage,
@@ -247,23 +242,40 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
ret = nfs_wait_on_request(req);
nfs_release_request(req);
if (ret != 0)
- return ret;
+ return ERR_PTR(ret);
spin_lock(&inode->i_lock);
}
- if (test_bit(PG_CLEAN, &req->wb_flags)) {
- spin_unlock(&inode->i_lock);
- BUG();
- }
- if (nfs_set_page_writeback(page) != 0) {
- spin_unlock(&inode->i_lock);
- BUG();
- }
spin_unlock(&inode->i_lock);
+ return req;
+}
+
+/*
+ * Find an associated nfs write request, and prepare to flush it out
+ * May return an error if the user signalled nfs_wait_on_request().
+ */
+static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
+ struct page *page)
+{
+ struct nfs_page *req;
+ int ret = 0;
+
+ req = nfs_find_and_lock_request(page);
+ if (!req)
+ goto out;
+ ret = PTR_ERR(req);
+ if (IS_ERR(req))
+ goto out;
+
+ ret = nfs_set_page_writeback(page);
+ BUG_ON(ret != 0);
+ BUG_ON(test_bit(PG_CLEAN, &req->wb_flags));
+
if (!nfs_pageio_add_request(pgio, req)) {
nfs_redirty_request(req);
- return pgio->pg_error;
+ ret = pgio->pg_error;
}
- return 0;
+out:
+ return ret;
}
static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio)
@@ -1580,6 +1592,41 @@ int nfs_wb_page(struct inode *inode, struct page* page)
return nfs_wb_page_priority(inode, page, FLUSH_STABLE);
}
+#ifdef CONFIG_MIGRATION
+int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
+ struct page *page)
+{
+ struct nfs_page *req;
+ int ret;
+
+ if (PageFsCache(page))
+ nfs_fscache_release_page(page, GFP_KERNEL);
+
+ req = nfs_find_and_lock_request(page);
+ ret = PTR_ERR(req);
+ if (IS_ERR(req))
+ goto out;
+
+ ret = migrate_page(mapping, newpage, page);
+ if (!req)
+ goto out;
+ if (ret)
+ goto out_unlock;
+ page_cache_get(newpage);
+ req->wb_page = newpage;
+ SetPagePrivate(newpage);
+ set_page_private(newpage, page_private(page));
+ ClearPagePrivate(page);
+ set_page_private(page, 0);
+ page_cache_release(page);
+out_unlock:
+ nfs_clear_page_tag_locked(req);
+ nfs_release_request(req);
+out:
+ return ret;
+}
+#endif
+
int __init nfs_init_writepagecache(void)
{
nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index b92a276..d946264 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -85,6 +85,11 @@ static void expkey_request(struct cache_detail *cd,
(*bpp)[-1] = '\n';
}
+static int expkey_upcall(struct cache_detail *cd, struct cache_head *h)
+{
+ return sunrpc_cache_pipe_upcall(cd, h, expkey_request);
+}
+
static struct svc_expkey *svc_expkey_update(struct svc_expkey *new, struct svc_expkey *old);
static struct svc_expkey *svc_expkey_lookup(struct svc_expkey *);
static struct cache_detail svc_expkey_cache;
@@ -259,7 +264,7 @@ static struct cache_detail svc_expkey_cache = {
.hash_table = expkey_table,
.name = "nfsd.fh",
.cache_put = expkey_put,
- .cache_request = expkey_request,
+ .cache_upcall = expkey_upcall,
.cache_parse = expkey_parse,
.cache_show = expkey_show,
.match = expkey_match,
@@ -355,6 +360,11 @@ static void svc_export_request(struct cache_detail *cd,
(*bpp)[-1] = '\n';
}
+static int svc_export_upcall(struct cache_detail *cd, struct cache_head *h)
+{
+ return sunrpc_cache_pipe_upcall(cd, h, svc_export_request);
+}
+
static struct svc_export *svc_export_update(struct svc_export *new,
struct svc_export *old);
static struct svc_export *svc_export_lookup(struct svc_export *);
@@ -724,7 +734,7 @@ struct cache_detail svc_export_cache = {
.hash_table = export_table,
.name = "nfsd.export",
.cache_put = svc_export_put,
- .cache_request = svc_export_request,
+ .cache_upcall = svc_export_upcall,
.cache_parse = svc_export_parse,
.cache_show = svc_export_show,
.match = svc_export_match,
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
index 5b39842..cdfa86f 100644
--- a/fs/nfsd/nfs4idmap.c
+++ b/fs/nfsd/nfs4idmap.c
@@ -146,6 +146,12 @@ idtoname_request(struct cache_detail *cd, struct cache_head *ch, char **bpp,
}
static int
+idtoname_upcall(struct cache_detail *cd, struct cache_head *ch)
+{
+ return sunrpc_cache_pipe_upcall(cd, ch, idtoname_request);
+}
+
+static int
idtoname_match(struct cache_head *ca, struct cache_head *cb)
{
struct ent *a = container_of(ca, struct ent, h);
@@ -175,10 +181,10 @@ idtoname_show(struct seq_file *m, struct cache_detail *cd, struct cache_head *h)
}
static void
-warn_no_idmapd(struct cache_detail *detail)
+warn_no_idmapd(struct cache_detail *detail, int has_died)
{
printk("nfsd: nfsv4 idmapping failing: has idmapd %s?\n",
- detail->last_close? "died" : "not been started");
+ has_died ? "died" : "not been started");
}
@@ -192,7 +198,7 @@ static struct cache_detail idtoname_cache = {
.hash_table = idtoname_table,
.name = "nfs4.idtoname",
.cache_put = ent_put,
- .cache_request = idtoname_request,
+ .cache_upcall = idtoname_upcall,
.cache_parse = idtoname_parse,
.cache_show = idtoname_show,
.warn_no_listener = warn_no_idmapd,
@@ -325,6 +331,12 @@ nametoid_request(struct cache_detail *cd, struct cache_head *ch, char **bpp,
}
static int
+nametoid_upcall(struct cache_detail *cd, struct cache_head *ch)
+{
+ return sunrpc_cache_pipe_upcall(cd, ch, nametoid_request);
+}
+
+static int
nametoid_match(struct cache_head *ca, struct cache_head *cb)
{
struct ent *a = container_of(ca, struct ent, h);
@@ -363,7 +375,7 @@ static struct cache_detail nametoid_cache = {
.hash_table = nametoid_table,
.name = "nfs4.nametoid",
.cache_put = ent_put,
- .cache_request = nametoid_request,
+ .cache_upcall = nametoid_upcall,
.cache_parse = nametoid_parse,
.cache_show = nametoid_show,
.warn_no_listener = warn_no_idmapd,
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index b51e7ae..b764d7d 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -25,7 +25,6 @@
#include <linux/init.h>
#include <linux/inet.h>
#include <linux/string.h>
-#include <linux/smp_lock.h>
#include <linux/ctype.h>
#include <linux/nfs.h>
@@ -38,6 +37,7 @@
#include <linux/nfsd/xdr.h>
#include <linux/nfsd/syscall.h>
#include <linux/lockd/lockd.h>
+#include <linux/sunrpc/clnt.h>
#include <asm/uaccess.h>
#include <net/ipv6.h>
@@ -491,22 +491,18 @@ static ssize_t write_getfd(struct file *file, char *buf, size_t size)
*
* Input:
* buf: '\n'-terminated C string containing a
- * presentation format IPv4 address
+ * presentation format IP address
* size: length of C string in @buf
* Output:
* On success: returns zero if all specified locks were released;
* returns one if one or more locks were not released
* On error: return code is negative errno value
- *
- * Note: Only AF_INET client addresses are passed in
*/
static ssize_t write_unlock_ip(struct file *file, char *buf, size_t size)
{
- struct sockaddr_in sin = {
- .sin_family = AF_INET,
- };
- int b1, b2, b3, b4;
- char c;
+ struct sockaddr_storage address;
+ struct sockaddr *sap = (struct sockaddr *)&address;
+ size_t salen = sizeof(address);
char *fo_path;
/* sanity check */
@@ -520,14 +516,10 @@ static ssize_t write_unlock_ip(struct file *file, char *buf, size_t size)
if (qword_get(&buf, fo_path, size) < 0)
return -EINVAL;
- /* get ipv4 address */
- if (sscanf(fo_path, "%u.%u.%u.%u%c", &b1, &b2, &b3, &b4, &c) != 4)
- return -EINVAL;
- if (b1 > 255 || b2 > 255 || b3 > 255 || b4 > 255)
+ if (rpc_pton(fo_path, size, sap, salen) == 0)
return -EINVAL;
- sin.sin_addr.s_addr = htonl((b1 << 24) | (b2 << 16) | (b3 << 8) | b4);
- return nlmsvc_unlock_all_by_ip((struct sockaddr *)&sin);
+ return nlmsvc_unlock_all_by_ip(sap);
}
/**
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 5a280a9..d68cd05 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -18,7 +18,6 @@
#include <linux/unistd.h>
#include <linux/slab.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/freezer.h>
#include <linux/fs_struct.h>
#include <linux/kthread.h>
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 4145083..23341c1 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -678,7 +678,6 @@ __be32
nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
int access, struct file **filp)
{
- const struct cred *cred = current_cred();
struct dentry *dentry;
struct inode *inode;
int flags = O_RDONLY|O_LARGEFILE;
@@ -733,7 +732,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
vfs_dq_init(inode);
}
*filp = dentry_open(dget(dentry), mntget(fhp->fh_export->ex_path.mnt),
- flags, cred);
+ flags, current_cred());
if (IS_ERR(*filp))
host_err = PTR_ERR(*filp);
else
diff --git a/fs/nilfs2/Kconfig b/fs/nilfs2/Kconfig
new file mode 100644
index 0000000..72da095
--- /dev/null
+++ b/fs/nilfs2/Kconfig
@@ -0,0 +1,25 @@
+config NILFS2_FS
+ tristate "NILFS2 file system support (EXPERIMENTAL)"
+ depends on BLOCK && EXPERIMENTAL
+ select CRC32
+ help
+ NILFS2 is a log-structured file system (LFS) supporting continuous
+ snapshotting. In addition to versioning capability of the entire
+ file system, users can even restore files mistakenly overwritten or
+ destroyed just a few seconds ago. Since this file system can keep
+ consistency like conventional LFS, it achieves quick recovery after
+ system crashes.
+
+ NILFS2 creates a number of checkpoints every few seconds or per
+ synchronous write basis (unless there is no change). Users can
+ select significant versions among continuously created checkpoints,
+ and can change them into snapshots which will be preserved for long
+ periods until they are changed back to checkpoints. Each
+ snapshot is mountable as a read-only file system concurrently with
+ its writable mount, and this feature is convenient for online backup.
+
+ Some features including atime, extended attributes, and POSIX ACLs,
+ are not supported yet.
+
+ To compile this file system support as a module, choose M here: the
+ module will be called nilfs2. If unsure, say N.
diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c
index 36df60b..99d58a0 100644
--- a/fs/nilfs2/bmap.c
+++ b/fs/nilfs2/bmap.c
@@ -568,6 +568,7 @@ void nilfs_bmap_abort_update_v(struct nilfs_bmap *bmap,
}
static struct lock_class_key nilfs_bmap_dat_lock_key;
+static struct lock_class_key nilfs_bmap_mdt_lock_key;
/**
* nilfs_bmap_read - read a bmap from an inode
@@ -603,7 +604,11 @@ int nilfs_bmap_read(struct nilfs_bmap *bmap, struct nilfs_inode *raw_inode)
bmap->b_ptr_type = NILFS_BMAP_PTR_VS;
bmap->b_last_allocated_key = 0;
bmap->b_last_allocated_ptr = NILFS_BMAP_INVALID_PTR;
+ lockdep_set_class(&bmap->b_sem, &nilfs_bmap_mdt_lock_key);
break;
+ case NILFS_IFILE_INO:
+ lockdep_set_class(&bmap->b_sem, &nilfs_bmap_mdt_lock_key);
+ /* Fall through */
default:
bmap->b_ptr_type = NILFS_BMAP_PTR_VM;
bmap->b_last_allocated_key = 0;
diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c
index 7d49813..aec942c 100644
--- a/fs/nilfs2/cpfile.c
+++ b/fs/nilfs2/cpfile.c
@@ -307,7 +307,7 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
if (ret < 0) {
if (ret != -ENOENT)
- goto out_header;
+ break;
/* skip hole */
ret = 0;
continue;
@@ -340,7 +340,7 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
continue;
printk(KERN_ERR "%s: cannot delete block\n",
__func__);
- goto out_header;
+ break;
}
}
@@ -358,7 +358,6 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
kunmap_atomic(kaddr, KM_USER0);
}
- out_header:
brelse(header_bh);
out_sem:
diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c
index 0b2710e..8927ca2 100644
--- a/fs/nilfs2/dat.c
+++ b/fs/nilfs2/dat.c
@@ -134,15 +134,6 @@ void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req,
entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
req->pr_entry_bh, kaddr);
entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat));
- if (entry->de_blocknr != cpu_to_le64(0) ||
- entry->de_end != cpu_to_le64(NILFS_CNO_MAX)) {
- printk(KERN_CRIT
- "%s: vbn = %llu, start = %llu, end = %llu, pbn = %llu\n",
- __func__, (unsigned long long)req->pr_entry_nr,
- (unsigned long long)le64_to_cpu(entry->de_start),
- (unsigned long long)le64_to_cpu(entry->de_end),
- (unsigned long long)le64_to_cpu(entry->de_blocknr));
- }
entry->de_blocknr = cpu_to_le64(blocknr);
kunmap_atomic(kaddr, KM_USER0);
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index 54100ac..1a4fa04 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -43,7 +43,6 @@
*/
#include <linux/pagemap.h>
-#include <linux/smp_lock.h>
#include "nilfs.h"
#include "page.h"
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index 3d3ddb3..2dfd477 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -412,8 +412,10 @@ nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc)
return 0; /* Do not request flush for shadow page cache */
if (!sb) {
writer = nilfs_get_writer(NILFS_MDT(inode)->mi_nilfs);
- if (!writer)
+ if (!writer) {
+ nilfs_put_writer(NILFS_MDT(inode)->mi_nilfs);
return -EROFS;
+ }
sb = writer->s_super;
}
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index aa97754..51ff3d0 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -1829,26 +1829,13 @@ static int nilfs_segctor_write(struct nilfs_sc_info *sci,
err = nilfs_segbuf_write(segbuf, &wi);
res = nilfs_segbuf_wait(segbuf, &wi);
- err = unlikely(err) ? : res;
- if (unlikely(err))
+ err = err ? : res;
+ if (err)
return err;
}
return 0;
}
-static int nilfs_page_has_uncleared_buffer(struct page *page)
-{
- struct buffer_head *head, *bh;
-
- head = bh = page_buffers(page);
- do {
- if (buffer_dirty(bh) && !list_empty(&bh->b_assoc_buffers))
- return 1;
- bh = bh->b_this_page;
- } while (bh != head);
- return 0;
-}
-
static void __nilfs_end_page_io(struct page *page, int err)
{
if (!err) {
@@ -1872,13 +1859,26 @@ static void nilfs_end_page_io(struct page *page, int err)
if (!page)
return;
- if (buffer_nilfs_node(page_buffers(page)) &&
- nilfs_page_has_uncleared_buffer(page))
- /* For b-tree node pages, this function may be called twice
- or more because they might be split in a segment.
- This check assures that cleanup has been done for all
- buffers in a split btnode page. */
+ if (buffer_nilfs_node(page_buffers(page)) && !PageWriteback(page)) {
+ /*
+ * For b-tree node pages, this function may be called twice
+ * or more because they might be split in a segment.
+ */
+ if (PageDirty(page)) {
+ /*
+ * For pages holding split b-tree node buffers, dirty
+ * flag on the buffers may be cleared discretely.
+ * In that case, the page is once redirtied for
+ * remaining buffers, and it must be cancelled if
+ * all the buffers get cleaned later.
+ */
+ lock_page(page);
+ if (nilfs_page_buffers_clean(page))
+ __nilfs_clear_page_dirty(page);
+ unlock_page(page);
+ }
return;
+ }
__nilfs_end_page_io(page, err);
}
@@ -1940,7 +1940,7 @@ static void nilfs_segctor_abort_write(struct nilfs_sc_info *sci,
}
if (bh->b_page != fs_page) {
nilfs_end_page_io(fs_page, err);
- if (unlikely(fs_page == failed_page))
+ if (fs_page && fs_page == failed_page)
goto done;
fs_page = bh->b_page;
}
diff --git a/fs/notify/Kconfig b/fs/notify/Kconfig
index 31dac7e..dffbb09 100644
--- a/fs/notify/Kconfig
+++ b/fs/notify/Kconfig
@@ -1,15 +1,5 @@
config FSNOTIFY
- bool "Filesystem notification backend"
- default y
- ---help---
- fsnotify is a backend for filesystem notification. fsnotify does
- not provide any userspace interface but does provide the basis
- needed for other notification schemes such as dnotify, inotify,
- and fanotify.
-
- Say Y here to enable fsnotify suport.
-
- If unsure, say Y.
+ def_bool n
source "fs/notify/dnotify/Kconfig"
source "fs/notify/inotify/Kconfig"
diff --git a/fs/notify/dnotify/Kconfig b/fs/notify/dnotify/Kconfig
index 904ff8d..f9c1ca1 100644
--- a/fs/notify/dnotify/Kconfig
+++ b/fs/notify/dnotify/Kconfig
@@ -1,6 +1,6 @@
config DNOTIFY
bool "Dnotify support"
- depends on FSNOTIFY
+ select FSNOTIFY
default y
help
Dnotify is a directory-based per-fd file change notification system
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index ec2f7bd..037e878 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -159,7 +159,9 @@ void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is, const
if (!group->ops->should_send_event(group, to_tell, mask))
continue;
if (!event) {
- event = fsnotify_create_event(to_tell, mask, data, data_is, file_name, cookie);
+ event = fsnotify_create_event(to_tell, mask, data,
+ data_is, file_name, cookie,
+ GFP_KERNEL);
/* shit, we OOM'd and now we can't tell, maybe
* someday someone else will want to do something
* here */
diff --git a/fs/notify/inotify/Kconfig b/fs/notify/inotify/Kconfig
index 5356884..3e56dbf 100644
--- a/fs/notify/inotify/Kconfig
+++ b/fs/notify/inotify/Kconfig
@@ -15,7 +15,7 @@ config INOTIFY
config INOTIFY_USER
bool "Inotify support for userspace"
- depends on FSNOTIFY
+ select FSNOTIFY
default y
---help---
Say Y here to enable inotify support for userspace, including the
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index ff231ad..f30d9bb 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -57,7 +57,6 @@ int inotify_max_user_watches __read_mostly;
static struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
struct kmem_cache *event_priv_cachep __read_mostly;
-static struct fsnotify_event *inotify_ignored_event;
/*
* When inotify registers a new group it increments this and uses that
@@ -296,12 +295,15 @@ static int inotify_fasync(int fd, struct file *file, int on)
static int inotify_release(struct inode *ignored, struct file *file)
{
struct fsnotify_group *group = file->private_data;
+ struct user_struct *user = group->inotify_data.user;
fsnotify_clear_marks_by_group(group);
/* free this group, matching get was inotify_init->fsnotify_obtain_group */
fsnotify_put_group(group);
+ atomic_dec(&user->inotify_devs);
+
return 0;
}
@@ -362,6 +364,17 @@ static int inotify_find_inode(const char __user *dirname, struct path *path, uns
return error;
}
+static void inotify_remove_from_idr(struct fsnotify_group *group,
+ struct inotify_inode_mark_entry *ientry)
+{
+ struct idr *idr;
+
+ spin_lock(&group->inotify_data.idr_lock);
+ idr = &group->inotify_data.idr;
+ idr_remove(idr, ientry->wd);
+ spin_unlock(&group->inotify_data.idr_lock);
+ ientry->wd = -1;
+}
/*
* Send IN_IGNORED for this wd, remove this wd from the idr, and drop the
* internal reference help on the mark because it is in the idr.
@@ -370,13 +383,19 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry,
struct fsnotify_group *group)
{
struct inotify_inode_mark_entry *ientry;
+ struct fsnotify_event *ignored_event;
struct inotify_event_private_data *event_priv;
struct fsnotify_event_private_data *fsn_event_priv;
- struct idr *idr;
+
+ ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL,
+ FSNOTIFY_EVENT_NONE, NULL, 0,
+ GFP_NOFS);
+ if (!ignored_event)
+ return;
ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
- event_priv = kmem_cache_alloc(event_priv_cachep, GFP_KERNEL);
+ event_priv = kmem_cache_alloc(event_priv_cachep, GFP_NOFS);
if (unlikely(!event_priv))
goto skip_send_ignore;
@@ -385,7 +404,7 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry,
fsn_event_priv->group = group;
event_priv->wd = ientry->wd;
- fsnotify_add_notify_event(group, inotify_ignored_event, fsn_event_priv);
+ fsnotify_add_notify_event(group, ignored_event, fsn_event_priv);
/* did the private data get added? */
if (list_empty(&fsn_event_priv->event_list))
@@ -393,14 +412,16 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry,
skip_send_ignore:
+ /* matches the reference taken when the event was created */
+ fsnotify_put_event(ignored_event);
+
/* remove this entry from the idr */
- spin_lock(&group->inotify_data.idr_lock);
- idr = &group->inotify_data.idr;
- idr_remove(idr, ientry->wd);
- spin_unlock(&group->inotify_data.idr_lock);
+ inotify_remove_from_idr(group, ientry);
/* removed from idr, drop that reference */
fsnotify_put_mark(entry);
+
+ atomic_dec(&group->inotify_data.user->inotify_watches);
}
/* ding dong the mark is dead */
@@ -415,6 +436,7 @@ static int inotify_update_watch(struct fsnotify_group *group, struct inode *inod
{
struct fsnotify_mark_entry *entry = NULL;
struct inotify_inode_mark_entry *ientry;
+ struct inotify_inode_mark_entry *tmp_ientry;
int ret = 0;
int add = (arg & IN_MASK_ADD);
__u32 mask;
@@ -425,54 +447,66 @@ static int inotify_update_watch(struct fsnotify_group *group, struct inode *inod
if (unlikely(!mask))
return -EINVAL;
- ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
- if (unlikely(!ientry))
+ tmp_ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
+ if (unlikely(!tmp_ientry))
return -ENOMEM;
/* we set the mask at the end after attaching it */
- fsnotify_init_mark(&ientry->fsn_entry, inotify_free_mark);
- ientry->wd = 0;
+ fsnotify_init_mark(&tmp_ientry->fsn_entry, inotify_free_mark);
+ tmp_ientry->wd = -1;
find_entry:
spin_lock(&inode->i_lock);
entry = fsnotify_find_mark_entry(group, inode);
spin_unlock(&inode->i_lock);
if (entry) {
- kmem_cache_free(inotify_inode_mark_cachep, ientry);
ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
} else {
- if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches) {
- ret = -ENOSPC;
+ ret = -ENOSPC;
+ if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches)
goto out_err;
- }
-
- ret = fsnotify_add_mark(&ientry->fsn_entry, group, inode);
- if (ret == -EEXIST)
- goto find_entry;
- else if (ret)
- goto out_err;
-
- entry = &ientry->fsn_entry;
retry:
ret = -ENOMEM;
if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL)))
goto out_err;
spin_lock(&group->inotify_data.idr_lock);
- /* if entry is added to the idr we keep the reference obtained
- * through fsnotify_mark_add. remember to drop this reference
- * when entry is removed from idr */
- ret = idr_get_new_above(&group->inotify_data.idr, entry,
- ++group->inotify_data.last_wd,
- &ientry->wd);
+ ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry,
+ group->inotify_data.last_wd,
+ &tmp_ientry->wd);
spin_unlock(&group->inotify_data.idr_lock);
if (ret) {
if (ret == -EAGAIN)
goto retry;
goto out_err;
}
+
+ ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode);
+ if (ret) {
+ inotify_remove_from_idr(group, tmp_ientry);
+ if (ret == -EEXIST)
+ goto find_entry;
+ goto out_err;
+ }
+
+ /* tmp_ientry has been added to the inode, so we are all set up.
+ * now we just need to make sure tmp_ientry doesn't get freed and
+ * we need to set up entry and ientry so the generic code can
+ * do its thing. */
+ ientry = tmp_ientry;
+ entry = &ientry->fsn_entry;
+ tmp_ientry = NULL;
+
atomic_inc(&group->inotify_data.user->inotify_watches);
+
+ /* update the idr hint */
+ group->inotify_data.last_wd = ientry->wd;
+
+ /* we put the mark on the idr, take a reference */
+ fsnotify_get_mark(entry);
}
+ ret = ientry->wd;
+
spin_lock(&entry->lock);
old_mask = entry->mask;
@@ -503,14 +537,19 @@ retry:
fsnotify_recalc_group_mask(group);
}
- return ientry->wd;
+ /* this either matches fsnotify_find_mark_entry, or init_mark_entry
+ * depending on which path we took... */
+ fsnotify_put_mark(entry);
out_err:
- /* see this isn't supposed to happen, just kill the watch */
- if (entry) {
- fsnotify_destroy_mark_by_entry(entry);
- fsnotify_put_mark(entry);
+ /* could be an error, could be that we found an existing mark */
+ if (tmp_ientry) {
+ /* on the idr but didn't make it on the inode */
+ if (tmp_ientry->wd != -1)
+ inotify_remove_from_idr(group, tmp_ientry);
+ kmem_cache_free(inotify_inode_mark_cachep, tmp_ientry);
}
+
return ret;
}
@@ -718,9 +757,6 @@ static int __init inotify_user_setup(void)
inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark_entry, SLAB_PANIC);
event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC);
- inotify_ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL, FSNOTIFY_EVENT_NONE, NULL, 0);
- if (!inotify_ignored_event)
- panic("unable to allocate the inotify ignored event\n");
inotify_max_queued_events = 16384;
inotify_max_user_instances = 128;
diff --git a/fs/notify/notification.c b/fs/notify/notification.c
index 959b73e..5213685 100644
--- a/fs/notify/notification.c
+++ b/fs/notify/notification.c
@@ -136,18 +136,24 @@ static bool event_compare(struct fsnotify_event *old, struct fsnotify_event *new
{
if ((old->mask == new->mask) &&
(old->to_tell == new->to_tell) &&
- (old->data_type == new->data_type)) {
+ (old->data_type == new->data_type) &&
+ (old->name_len == new->name_len)) {
switch (old->data_type) {
case (FSNOTIFY_EVENT_INODE):
- if (old->inode == new->inode)
+ /* remember, after old was put on the wait_q we aren't
+ * allowed to look at the inode any more, only thing
+ * left to check was if the file_name is the same */
+ if (old->name_len &&
+ !strcmp(old->file_name, new->file_name))
return true;
break;
case (FSNOTIFY_EVENT_PATH):
if ((old->path.mnt == new->path.mnt) &&
(old->path.dentry == new->path.dentry))
return true;
+ break;
case (FSNOTIFY_EVENT_NONE):
- return true;
+ return false;
};
}
return false;
@@ -339,18 +345,19 @@ static void initialize_event(struct fsnotify_event *event)
* @name the filename, if available
*/
struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask, void *data,
- int data_type, const char *name, u32 cookie)
+ int data_type, const char *name, u32 cookie,
+ gfp_t gfp)
{
struct fsnotify_event *event;
- event = kmem_cache_alloc(fsnotify_event_cachep, GFP_KERNEL);
+ event = kmem_cache_alloc(fsnotify_event_cachep, gfp);
if (!event)
return NULL;
initialize_event(event);
if (name) {
- event->file_name = kstrdup(name, GFP_KERNEL);
+ event->file_name = kstrdup(name, gfp);
if (!event->file_name) {
kmem_cache_free(fsnotify_event_cachep, event);
return NULL;
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 9edcde4..f9a3e89 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -1914,7 +1914,8 @@ static void ocfs2_adjust_adjacent_records(struct ocfs2_extent_rec *left_rec,
* immediately to their right.
*/
left_clusters = le32_to_cpu(right_child_el->l_recs[0].e_cpos);
- if (ocfs2_is_empty_extent(&right_child_el->l_recs[0])) {
+ if (!ocfs2_rec_clusters(right_child_el, &right_child_el->l_recs[0])) {
+ BUG_ON(right_child_el->l_tree_depth);
BUG_ON(le16_to_cpu(right_child_el->l_next_free_rec) <= 1);
left_clusters = le32_to_cpu(right_child_el->l_recs[1].e_cpos);
}
@@ -2476,15 +2477,37 @@ out_ret_path:
return ret;
}
-static void ocfs2_update_edge_lengths(struct inode *inode, handle_t *handle,
- struct ocfs2_path *path)
+static int ocfs2_update_edge_lengths(struct inode *inode, handle_t *handle,
+ int subtree_index, struct ocfs2_path *path)
{
- int i, idx;
+ int i, idx, ret;
struct ocfs2_extent_rec *rec;
struct ocfs2_extent_list *el;
struct ocfs2_extent_block *eb;
u32 range;
+ /*
+ * In normal tree rotation process, we will never touch the
+ * tree branch above subtree_index and ocfs2_extend_rotate_transaction
+ * doesn't reserve the credits for them either.
+ *
+ * But we do have a special case here which will update the rightmost
+ * records for all the bh in the path.
+ * So we have to allocate extra credits and access them.
+ */
+ ret = ocfs2_extend_trans(handle,
+ handle->h_buffer_credits + subtree_index);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_journal_access_path(inode, handle, path);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
/* Path should always be rightmost. */
eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data;
BUG_ON(eb->h_next_leaf_blk != 0ULL);
@@ -2505,6 +2528,8 @@ static void ocfs2_update_edge_lengths(struct inode *inode, handle_t *handle,
ocfs2_journal_dirty(handle, path->p_node[i].bh);
}
+out:
+ return ret;
}
static void ocfs2_unlink_path(struct inode *inode, handle_t *handle,
@@ -2717,7 +2742,12 @@ static int ocfs2_rotate_subtree_left(struct inode *inode, handle_t *handle,
if (del_right_subtree) {
ocfs2_unlink_subtree(inode, handle, left_path, right_path,
subtree_index, dealloc);
- ocfs2_update_edge_lengths(inode, handle, left_path);
+ ret = ocfs2_update_edge_lengths(inode, handle, subtree_index,
+ left_path);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data;
ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno));
@@ -3034,7 +3064,12 @@ static int ocfs2_remove_rightmost_path(struct inode *inode, handle_t *handle,
ocfs2_unlink_subtree(inode, handle, left_path, path,
subtree_index, dealloc);
- ocfs2_update_edge_lengths(inode, handle, left_path);
+ ret = ocfs2_update_edge_lengths(inode, handle, subtree_index,
+ left_path);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data;
ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno));
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index b2c52b3..b401654 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -193,6 +193,7 @@ static int ocfs2_get_block(struct inode *inode, sector_t iblock,
(unsigned long long)OCFS2_I(inode)->ip_blkno);
mlog(ML_ERROR, "Size %llu, clusters %u\n", (unsigned long long)i_size_read(inode), OCFS2_I(inode)->ip_clusters);
dump_stack();
+ goto bail;
}
past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
@@ -894,18 +895,17 @@ struct ocfs2_write_cluster_desc {
*/
unsigned c_new;
unsigned c_unwritten;
+ unsigned c_needs_zero;
};
-static inline int ocfs2_should_zero_cluster(struct ocfs2_write_cluster_desc *d)
-{
- return d->c_new || d->c_unwritten;
-}
-
struct ocfs2_write_ctxt {
/* Logical cluster position / len of write */
u32 w_cpos;
u32 w_clen;
+ /* First cluster allocated in a nonsparse extend */
+ u32 w_first_new_cpos;
+
struct ocfs2_write_cluster_desc w_desc[OCFS2_MAX_CLUSTERS_PER_PAGE];
/*
@@ -983,6 +983,7 @@ static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp,
return -ENOMEM;
wc->w_cpos = pos >> osb->s_clustersize_bits;
+ wc->w_first_new_cpos = UINT_MAX;
cend = (pos + len - 1) >> osb->s_clustersize_bits;
wc->w_clen = cend - wc->w_cpos + 1;
get_bh(di_bh);
@@ -1217,20 +1218,18 @@ out:
*/
static int ocfs2_write_cluster(struct address_space *mapping,
u32 phys, unsigned int unwritten,
+ unsigned int should_zero,
struct ocfs2_alloc_context *data_ac,
struct ocfs2_alloc_context *meta_ac,
struct ocfs2_write_ctxt *wc, u32 cpos,
loff_t user_pos, unsigned user_len)
{
- int ret, i, new, should_zero = 0;
+ int ret, i, new;
u64 v_blkno, p_blkno;
struct inode *inode = mapping->host;
struct ocfs2_extent_tree et;
new = phys == 0 ? 1 : 0;
- if (new || unwritten)
- should_zero = 1;
-
if (new) {
u32 tmp_pos;
@@ -1301,7 +1300,7 @@ static int ocfs2_write_cluster(struct address_space *mapping,
if (tmpret) {
mlog_errno(tmpret);
if (ret == 0)
- tmpret = ret;
+ ret = tmpret;
}
}
@@ -1341,7 +1340,9 @@ static int ocfs2_write_cluster_by_desc(struct address_space *mapping,
local_len = osb->s_clustersize - cluster_off;
ret = ocfs2_write_cluster(mapping, desc->c_phys,
- desc->c_unwritten, data_ac, meta_ac,
+ desc->c_unwritten,
+ desc->c_needs_zero,
+ data_ac, meta_ac,
wc, desc->c_cpos, pos, local_len);
if (ret) {
mlog_errno(ret);
@@ -1391,14 +1392,14 @@ static void ocfs2_set_target_boundaries(struct ocfs2_super *osb,
* newly allocated cluster.
*/
desc = &wc->w_desc[0];
- if (ocfs2_should_zero_cluster(desc))
+ if (desc->c_needs_zero)
ocfs2_figure_cluster_boundaries(osb,
desc->c_cpos,
&wc->w_target_from,
NULL);
desc = &wc->w_desc[wc->w_clen - 1];
- if (ocfs2_should_zero_cluster(desc))
+ if (desc->c_needs_zero)
ocfs2_figure_cluster_boundaries(osb,
desc->c_cpos,
NULL,
@@ -1466,13 +1467,28 @@ static int ocfs2_populate_write_desc(struct inode *inode,
phys++;
}
+ /*
+ * If w_first_new_cpos is < UINT_MAX, we have a non-sparse
+ * file that got extended. w_first_new_cpos tells us
+ * where the newly allocated clusters are so we can
+ * zero them.
+ */
+ if (desc->c_cpos >= wc->w_first_new_cpos) {
+ BUG_ON(phys == 0);
+ desc->c_needs_zero = 1;
+ }
+
desc->c_phys = phys;
if (phys == 0) {
desc->c_new = 1;
+ desc->c_needs_zero = 1;
*clusters_to_alloc = *clusters_to_alloc + 1;
}
- if (ext_flags & OCFS2_EXT_UNWRITTEN)
+
+ if (ext_flags & OCFS2_EXT_UNWRITTEN) {
desc->c_unwritten = 1;
+ desc->c_needs_zero = 1;
+ }
num_clusters--;
}
@@ -1632,10 +1648,13 @@ static int ocfs2_expand_nonsparse_inode(struct inode *inode, loff_t pos,
if (newsize <= i_size_read(inode))
return 0;
- ret = ocfs2_extend_no_holes(inode, newsize, newsize - len);
+ ret = ocfs2_extend_no_holes(inode, newsize, pos);
if (ret)
mlog_errno(ret);
+ wc->w_first_new_cpos =
+ ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode));
+
return ret;
}
@@ -1644,7 +1663,7 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
struct page **pagep, void **fsdata,
struct buffer_head *di_bh, struct page *mmap_page)
{
- int ret, credits = OCFS2_INODE_UPDATE_CREDITS;
+ int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS;
unsigned int clusters_to_alloc, extents_to_split;
struct ocfs2_write_ctxt *wc;
struct inode *inode = mapping->host;
@@ -1722,8 +1741,19 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
}
- ocfs2_set_target_boundaries(osb, wc, pos, len,
- clusters_to_alloc + extents_to_split);
+ /*
+ * We have to zero sparse allocated clusters, unwritten extent clusters,
+ * and non-sparse clusters we just extended. For non-sparse writes,
+ * we know zeros will only be needed in the first and/or last cluster.
+ */
+ if (clusters_to_alloc || extents_to_split ||
+ wc->w_desc[0].c_needs_zero ||
+ wc->w_desc[wc->w_clen - 1].c_needs_zero)
+ cluster_of_pages = 1;
+ else
+ cluster_of_pages = 0;
+
+ ocfs2_set_target_boundaries(osb, wc, pos, len, cluster_of_pages);
handle = ocfs2_start_trans(osb, credits);
if (IS_ERR(handle)) {
@@ -1756,8 +1786,7 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
* extent.
*/
ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos,
- clusters_to_alloc + extents_to_split,
- mmap_page);
+ cluster_of_pages, mmap_page);
if (ret) {
mlog_errno(ret);
goto out_quota;
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
index b574431..2f28b7d 100644
--- a/fs/ocfs2/dcache.c
+++ b/fs/ocfs2/dcache.c
@@ -310,22 +310,19 @@ out_attach:
return ret;
}
-static DEFINE_SPINLOCK(dentry_list_lock);
+DEFINE_SPINLOCK(dentry_list_lock);
/* We limit the number of dentry locks to drop in one go. We have
* this limit so that we don't starve other users of ocfs2_wq. */
#define DL_INODE_DROP_COUNT 64
/* Drop inode references from dentry locks */
-void ocfs2_drop_dl_inodes(struct work_struct *work)
+static void __ocfs2_drop_dl_inodes(struct ocfs2_super *osb, int drop_count)
{
- struct ocfs2_super *osb = container_of(work, struct ocfs2_super,
- dentry_lock_work);
struct ocfs2_dentry_lock *dl;
- int drop_count = DL_INODE_DROP_COUNT;
spin_lock(&dentry_list_lock);
- while (osb->dentry_lock_list && drop_count--) {
+ while (osb->dentry_lock_list && (drop_count < 0 || drop_count--)) {
dl = osb->dentry_lock_list;
osb->dentry_lock_list = dl->dl_next;
spin_unlock(&dentry_list_lock);
@@ -333,11 +330,32 @@ void ocfs2_drop_dl_inodes(struct work_struct *work)
kfree(dl);
spin_lock(&dentry_list_lock);
}
- if (osb->dentry_lock_list)
+ spin_unlock(&dentry_list_lock);
+}
+
+void ocfs2_drop_dl_inodes(struct work_struct *work)
+{
+ struct ocfs2_super *osb = container_of(work, struct ocfs2_super,
+ dentry_lock_work);
+
+ __ocfs2_drop_dl_inodes(osb, DL_INODE_DROP_COUNT);
+ /*
+ * Don't queue dropping if umount is in progress. We flush the
+ * list in ocfs2_dismount_volume
+ */
+ spin_lock(&dentry_list_lock);
+ if (osb->dentry_lock_list &&
+ !ocfs2_test_osb_flag(osb, OCFS2_OSB_DROP_DENTRY_LOCK_IMMED))
queue_work(ocfs2_wq, &osb->dentry_lock_work);
spin_unlock(&dentry_list_lock);
}
+/* Flush the whole work queue */
+void ocfs2_drop_all_dl_inodes(struct ocfs2_super *osb)
+{
+ __ocfs2_drop_dl_inodes(osb, -1);
+}
+
/*
* ocfs2_dentry_iput() and friends.
*
@@ -368,7 +386,8 @@ static void ocfs2_drop_dentry_lock(struct ocfs2_super *osb,
/* We leave dropping of inode reference to ocfs2_wq as that can
* possibly lead to inode deletion which gets tricky */
spin_lock(&dentry_list_lock);
- if (!osb->dentry_lock_list)
+ if (!osb->dentry_lock_list &&
+ !ocfs2_test_osb_flag(osb, OCFS2_OSB_DROP_DENTRY_LOCK_IMMED))
queue_work(ocfs2_wq, &osb->dentry_lock_work);
dl->dl_next = osb->dentry_lock_list;
osb->dentry_lock_list = dl;
diff --git a/fs/ocfs2/dcache.h b/fs/ocfs2/dcache.h
index faa12e7..f5dd178 100644
--- a/fs/ocfs2/dcache.h
+++ b/fs/ocfs2/dcache.h
@@ -49,10 +49,13 @@ struct ocfs2_dentry_lock {
int ocfs2_dentry_attach_lock(struct dentry *dentry, struct inode *inode,
u64 parent_blkno);
+extern spinlock_t dentry_list_lock;
+
void ocfs2_dentry_lock_put(struct ocfs2_super *osb,
struct ocfs2_dentry_lock *dl);
void ocfs2_drop_dl_inodes(struct work_struct *work);
+void ocfs2_drop_all_dl_inodes(struct ocfs2_super *osb);
struct dentry *ocfs2_find_local_alias(struct inode *inode, u64 parent_blkno,
int skip_unhashed);
diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c
index d07ddbe..81eff8e 100644
--- a/fs/ocfs2/dlm/dlmast.c
+++ b/fs/ocfs2/dlm/dlmast.c
@@ -103,7 +103,6 @@ static void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
lock->ast_pending, lock->ml.type);
BUG();
}
- BUG_ON(!list_empty(&lock->ast_list));
if (lock->ast_pending)
mlog(0, "lock has an ast getting flushed right now\n");
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index bcb9260..43e6e32 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -1118,7 +1118,7 @@ static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
mlog(0, "%s:%.*s: sending mig lockres (%s) to %u\n",
dlm->name, res->lockname.len, res->lockname.name,
- orig_flags & DLM_MRES_MIGRATION ? "migrate" : "recovery",
+ orig_flags & DLM_MRES_MIGRATION ? "migration" : "recovery",
send_to);
/* send it */
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 62442e4..aa501d3 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1851,6 +1851,7 @@ relock:
if (ret)
goto out_dio;
+ count = ocount;
ret = generic_write_checks(file, ppos, &count,
S_ISBLK(inode->i_mode));
if (ret)
@@ -1918,8 +1919,10 @@ out_sems:
mutex_unlock(&inode->i_mutex);
+ if (written)
+ ret = written;
mlog_exit(ret);
- return written ? written : ret;
+ return ret;
}
static int ocfs2_splice_to_file(struct pipe_inode_info *pipe,
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index 9fcd36d..467b413 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -7,7 +7,6 @@
#include <linux/fs.h>
#include <linux/mount.h>
-#include <linux/smp_lock.h>
#define MLOG_MASK_PREFIX ML_INODE
#include <cluster/masklog.h>
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index f033760..c48b93a 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -1954,10 +1954,16 @@ void ocfs2_orphan_scan_init(struct ocfs2_super *osb)
os->os_osb = osb;
os->os_count = 0;
os->os_seqno = 0;
- os->os_scantime = CURRENT_TIME;
mutex_init(&os->os_lock);
INIT_DELAYED_WORK(&os->os_orphan_scan_work, ocfs2_orphan_scan_work);
+}
+void ocfs2_orphan_scan_start(struct ocfs2_super *osb)
+{
+ struct ocfs2_orphan_scan *os;
+
+ os = &osb->osb_orphan_scan;
+ os->os_scantime = CURRENT_TIME;
if (ocfs2_is_hard_readonly(osb) || ocfs2_mount_local(osb))
atomic_set(&os->os_state, ORPHAN_SCAN_INACTIVE);
else {
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 5432c7f..2c3222a 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -145,6 +145,7 @@ static inline void ocfs2_inode_set_new(struct ocfs2_super *osb,
/* Exported only for the journal struct init code in super.c. Do not call. */
void ocfs2_orphan_scan_init(struct ocfs2_super *osb);
+void ocfs2_orphan_scan_start(struct ocfs2_super *osb);
void ocfs2_orphan_scan_stop(struct ocfs2_super *osb);
void ocfs2_orphan_scan_exit(struct ocfs2_super *osb);
@@ -329,20 +330,27 @@ int ocfs2_journal_dirty(handle_t *handle,
/* extended attribute block update */
#define OCFS2_XATTR_BLOCK_UPDATE_CREDITS 1
+/* Update of a single quota block */
+#define OCFS2_QUOTA_BLOCK_UPDATE_CREDITS 1
+
/* global quotafile inode update, data block */
-#define OCFS2_QINFO_WRITE_CREDITS (OCFS2_INODE_UPDATE_CREDITS + 1)
+#define OCFS2_QINFO_WRITE_CREDITS (OCFS2_INODE_UPDATE_CREDITS + \
+ OCFS2_QUOTA_BLOCK_UPDATE_CREDITS)
+#define OCFS2_LOCAL_QINFO_WRITE_CREDITS OCFS2_QUOTA_BLOCK_UPDATE_CREDITS
/*
* The two writes below can accidentally see global info dirty due
* to set_info() quotactl so make them prepared for the writes.
*/
/* quota data block, global info */
/* Write to local quota file */
-#define OCFS2_QWRITE_CREDITS (OCFS2_QINFO_WRITE_CREDITS + 1)
+#define OCFS2_QWRITE_CREDITS (OCFS2_QINFO_WRITE_CREDITS + \
+ OCFS2_QUOTA_BLOCK_UPDATE_CREDITS)
/* global quota data block, local quota data block, global quota inode,
* global quota info */
-#define OCFS2_QSYNC_CREDITS (OCFS2_INODE_UPDATE_CREDITS + 3)
+#define OCFS2_QSYNC_CREDITS (OCFS2_QINFO_WRITE_CREDITS + \
+ 2 * OCFS2_QUOTA_BLOCK_UPDATE_CREDITS)
static inline int ocfs2_quota_trans_credits(struct super_block *sb)
{
@@ -355,11 +363,6 @@ static inline int ocfs2_quota_trans_credits(struct super_block *sb)
return credits;
}
-/* Number of credits needed for removing quota structure from file */
-int ocfs2_calc_qdel_credits(struct super_block *sb, int type);
-/* Number of credits needed for initialization of new quota structure */
-int ocfs2_calc_qinit_credits(struct super_block *sb, int type);
-
/* group extend. inode update and last group update. */
#define OCFS2_GROUP_EXTEND_CREDITS (OCFS2_INODE_UPDATE_CREDITS + 1)
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index c9345eb..39e1d5a 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -224,10 +224,12 @@ enum ocfs2_mount_options
OCFS2_MOUNT_GRPQUOTA = 1 << 10, /* We support group quotas */
};
-#define OCFS2_OSB_SOFT_RO 0x0001
-#define OCFS2_OSB_HARD_RO 0x0002
-#define OCFS2_OSB_ERROR_FS 0x0004
-#define OCFS2_DEFAULT_ATIME_QUANTUM 60
+#define OCFS2_OSB_SOFT_RO 0x0001
+#define OCFS2_OSB_HARD_RO 0x0002
+#define OCFS2_OSB_ERROR_FS 0x0004
+#define OCFS2_OSB_DROP_DENTRY_LOCK_IMMED 0x0008
+
+#define OCFS2_DEFAULT_ATIME_QUANTUM 60
struct ocfs2_journal;
struct ocfs2_slot_info;
@@ -490,6 +492,18 @@ static inline void ocfs2_set_osb_flag(struct ocfs2_super *osb,
spin_unlock(&osb->osb_lock);
}
+
+static inline unsigned long ocfs2_test_osb_flag(struct ocfs2_super *osb,
+ unsigned long flag)
+{
+ unsigned long ret;
+
+ spin_lock(&osb->osb_lock);
+ ret = osb->osb_flags & flag;
+ spin_unlock(&osb->osb_lock);
+ return ret;
+}
+
static inline void ocfs2_set_ro_flag(struct ocfs2_super *osb,
int hard)
{
diff --git a/fs/ocfs2/quota.h b/fs/ocfs2/quota.h
index 7365e2e..3fb96fcd 100644
--- a/fs/ocfs2/quota.h
+++ b/fs/ocfs2/quota.h
@@ -50,7 +50,6 @@ struct ocfs2_mem_dqinfo {
unsigned int dqi_chunks; /* Number of chunks in local quota file */
unsigned int dqi_blocks; /* Number of blocks allocated for local quota file */
unsigned int dqi_syncms; /* How often should we sync with other nodes */
- unsigned int dqi_syncjiff; /* Precomputed dqi_syncms in jiffies */
struct list_head dqi_chunk; /* List of chunks */
struct inode *dqi_gqinode; /* Global quota file inode */
struct ocfs2_lock_res dqi_gqlock; /* Lock protecting quota information structure */
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index edfa60c..bf7742d 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -69,6 +69,7 @@ static void ocfs2_global_mem2diskdqb(void *dp, struct dquot *dquot)
d->dqb_curspace = cpu_to_le64(m->dqb_curspace);
d->dqb_btime = cpu_to_le64(m->dqb_btime);
d->dqb_itime = cpu_to_le64(m->dqb_itime);
+ d->dqb_pad1 = d->dqb_pad2 = 0;
}
static int ocfs2_global_is_id(void *dp, struct dquot *dquot)
@@ -211,14 +212,13 @@ ssize_t ocfs2_quota_write(struct super_block *sb, int type,
mutex_lock_nested(&gqinode->i_mutex, I_MUTEX_QUOTA);
if (gqinode->i_size < off + len) {
- down_write(&OCFS2_I(gqinode)->ip_alloc_sem);
- err = ocfs2_extend_no_holes(gqinode, off + len, off);
- up_write(&OCFS2_I(gqinode)->ip_alloc_sem);
- if (err < 0)
- goto out;
+ loff_t rounded_end =
+ ocfs2_align_bytes_to_blocks(sb, off + len);
+
+ /* Space is already allocated in ocfs2_global_read_dquot() */
err = ocfs2_simple_size_update(gqinode,
oinfo->dqi_gqi_bh,
- off + len);
+ rounded_end);
if (err < 0)
goto out;
new = 1;
@@ -234,7 +234,7 @@ ssize_t ocfs2_quota_write(struct super_block *sb, int type,
}
if (err) {
mlog_errno(err);
- return err;
+ goto out;
}
lock_buffer(bh);
if (new)
@@ -342,7 +342,6 @@ int ocfs2_global_read_info(struct super_block *sb, int type)
info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace);
info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace);
oinfo->dqi_syncms = le32_to_cpu(dinfo.dqi_syncms);
- oinfo->dqi_syncjiff = msecs_to_jiffies(oinfo->dqi_syncms);
oinfo->dqi_gi.dqi_blocks = le32_to_cpu(dinfo.dqi_blocks);
oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk);
oinfo->dqi_gi.dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry);
@@ -352,7 +351,7 @@ int ocfs2_global_read_info(struct super_block *sb, int type)
oinfo->dqi_gi.dqi_qtree_depth = qtree_depth(&oinfo->dqi_gi);
INIT_DELAYED_WORK(&oinfo->dqi_sync_work, qsync_work_fn);
queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work,
- oinfo->dqi_syncjiff);
+ msecs_to_jiffies(oinfo->dqi_syncms));
out_err:
mlog_exit(status);
@@ -402,13 +401,36 @@ int ocfs2_global_write_info(struct super_block *sb, int type)
return err;
}
+static int ocfs2_global_qinit_alloc(struct super_block *sb, int type)
+{
+ struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
+
+ /*
+ * We may need to allocate tree blocks and a leaf block but not the
+ * root block
+ */
+ return oinfo->dqi_gi.dqi_qtree_depth;
+}
+
+static int ocfs2_calc_global_qinit_credits(struct super_block *sb, int type)
+{
+ /* We modify all the allocated blocks, tree root, and info block */
+ return (ocfs2_global_qinit_alloc(sb, type) + 2) *
+ OCFS2_QUOTA_BLOCK_UPDATE_CREDITS;
+}
+
/* Read in information from global quota file and acquire a reference to it.
* dquot_acquire() has already started the transaction and locked quota file */
int ocfs2_global_read_dquot(struct dquot *dquot)
{
int err, err2, ex = 0;
- struct ocfs2_mem_dqinfo *info =
- sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
+ struct super_block *sb = dquot->dq_sb;
+ int type = dquot->dq_type;
+ struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
+ struct ocfs2_super *osb = OCFS2_SB(sb);
+ struct inode *gqinode = info->dqi_gqinode;
+ int need_alloc = ocfs2_global_qinit_alloc(sb, type);
+ handle_t *handle = NULL;
err = ocfs2_qinfo_lock(info, 0);
if (err < 0)
@@ -419,14 +441,33 @@ int ocfs2_global_read_dquot(struct dquot *dquot)
OCFS2_DQUOT(dquot)->dq_use_count++;
OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
+ ocfs2_qinfo_unlock(info, 0);
+
if (!dquot->dq_off) { /* No real quota entry? */
- /* Upgrade to exclusive lock for allocation */
- ocfs2_qinfo_unlock(info, 0);
- err = ocfs2_qinfo_lock(info, 1);
- if (err < 0)
- goto out_qlock;
ex = 1;
+ /*
+ * Add blocks to quota file before we start a transaction since
+ * locking allocators ranks above a transaction start
+ */
+ WARN_ON(journal_current_handle());
+ down_write(&OCFS2_I(gqinode)->ip_alloc_sem);
+ err = ocfs2_extend_no_holes(gqinode,
+ gqinode->i_size + (need_alloc << sb->s_blocksize_bits),
+ gqinode->i_size);
+ up_write(&OCFS2_I(gqinode)->ip_alloc_sem);
+ if (err < 0)
+ goto out;
}
+
+ handle = ocfs2_start_trans(osb,
+ ocfs2_calc_global_qinit_credits(sb, type));
+ if (IS_ERR(handle)) {
+ err = PTR_ERR(handle);
+ goto out;
+ }
+ err = ocfs2_qinfo_lock(info, ex);
+ if (err < 0)
+ goto out_trans;
err = qtree_write_dquot(&info->dqi_gi, dquot);
if (ex && info_dirty(sb_dqinfo(dquot->dq_sb, dquot->dq_type))) {
err2 = __ocfs2_global_write_info(dquot->dq_sb, dquot->dq_type);
@@ -438,6 +479,9 @@ out_qlock:
ocfs2_qinfo_unlock(info, 1);
else
ocfs2_qinfo_unlock(info, 0);
+out_trans:
+ if (handle)
+ ocfs2_commit_trans(osb, handle);
out:
if (err < 0)
mlog_errno(err);
@@ -607,7 +651,7 @@ static void qsync_work_fn(struct work_struct *work)
dquot_scan_active(sb, ocfs2_sync_dquot_helper, oinfo->dqi_type);
queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work,
- oinfo->dqi_syncjiff);
+ msecs_to_jiffies(oinfo->dqi_syncms));
}
/*
@@ -635,20 +679,18 @@ out:
return status;
}
-int ocfs2_calc_qdel_credits(struct super_block *sb, int type)
+static int ocfs2_calc_qdel_credits(struct super_block *sb, int type)
{
- struct ocfs2_mem_dqinfo *oinfo;
- int features[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA,
- OCFS2_FEATURE_RO_COMPAT_GRPQUOTA };
-
- if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, features[type]))
- return 0;
-
- oinfo = sb_dqinfo(sb, type)->dqi_priv;
- /* We modify tree, leaf block, global info, local chunk header,
- * global and local inode */
- return oinfo->dqi_gi.dqi_qtree_depth + 2 + 1 +
- 2 * OCFS2_INODE_UPDATE_CREDITS;
+ struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
+ /*
+ * We modify tree, leaf block, global info, local chunk header,
+ * global and local inode; OCFS2_QINFO_WRITE_CREDITS already
+ * accounts for inode update
+ */
+ return (oinfo->dqi_gi.dqi_qtree_depth + 2) *
+ OCFS2_QUOTA_BLOCK_UPDATE_CREDITS +
+ OCFS2_QINFO_WRITE_CREDITS +
+ OCFS2_INODE_UPDATE_CREDITS;
}
static int ocfs2_release_dquot(struct dquot *dquot)
@@ -680,33 +722,10 @@ out:
return status;
}
-int ocfs2_calc_qinit_credits(struct super_block *sb, int type)
-{
- struct ocfs2_mem_dqinfo *oinfo;
- int features[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA,
- OCFS2_FEATURE_RO_COMPAT_GRPQUOTA };
- struct ocfs2_dinode *lfe, *gfe;
-
- if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, features[type]))
- return 0;
-
- oinfo = sb_dqinfo(sb, type)->dqi_priv;
- gfe = (struct ocfs2_dinode *)oinfo->dqi_gqi_bh->b_data;
- lfe = (struct ocfs2_dinode *)oinfo->dqi_lqi_bh->b_data;
- /* We can extend local file + global file. In local file we
- * can modify info, chunk header block and dquot block. In
- * global file we can modify info, tree and leaf block */
- return ocfs2_calc_extend_credits(sb, &lfe->id2.i_list, 0) +
- ocfs2_calc_extend_credits(sb, &gfe->id2.i_list, 0) +
- 3 + oinfo->dqi_gi.dqi_qtree_depth + 2;
-}
-
static int ocfs2_acquire_dquot(struct dquot *dquot)
{
- handle_t *handle;
struct ocfs2_mem_dqinfo *oinfo =
sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
- struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
int status = 0;
mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
@@ -715,16 +734,7 @@ static int ocfs2_acquire_dquot(struct dquot *dquot)
status = ocfs2_lock_global_qf(oinfo, 1);
if (status < 0)
goto out;
- handle = ocfs2_start_trans(osb,
- ocfs2_calc_qinit_credits(dquot->dq_sb, dquot->dq_type));
- if (IS_ERR(handle)) {
- status = PTR_ERR(handle);
- mlog_errno(status);
- goto out_ilock;
- }
status = dquot_acquire(dquot);
- ocfs2_commit_trans(osb, handle);
-out_ilock:
ocfs2_unlock_global_qf(oinfo, 1);
out:
mlog_exit(status);
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index 5a460fa..bdb09cb 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -20,6 +20,7 @@
#include "sysfile.h"
#include "dlmglue.h"
#include "quota.h"
+#include "uptodate.h"
/* Number of local quota structures per block */
static inline unsigned int ol_quota_entries_per_block(struct super_block *sb)
@@ -100,7 +101,8 @@ static int ocfs2_modify_bh(struct inode *inode, struct buffer_head *bh,
handle_t *handle;
int status;
- handle = ocfs2_start_trans(OCFS2_SB(sb), 1);
+ handle = ocfs2_start_trans(OCFS2_SB(sb),
+ OCFS2_QUOTA_BLOCK_UPDATE_CREDITS);
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
mlog_errno(status);
@@ -610,7 +612,8 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb,
goto out_bh;
/* Mark quota file as clean if we are recovering quota file of
* some other node. */
- handle = ocfs2_start_trans(osb, 1);
+ handle = ocfs2_start_trans(osb,
+ OCFS2_LOCAL_QINFO_WRITE_CREDITS);
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
mlog_errno(status);
@@ -940,7 +943,7 @@ static struct ocfs2_quota_chunk *ocfs2_local_quota_add_chunk(
struct ocfs2_local_disk_chunk *dchunk;
int status;
handle_t *handle;
- struct buffer_head *bh = NULL;
+ struct buffer_head *bh = NULL, *dbh = NULL;
u64 p_blkno;
/* We are protected by dqio_sem so no locking needed */
@@ -964,32 +967,35 @@ static struct ocfs2_quota_chunk *ocfs2_local_quota_add_chunk(
mlog_errno(status);
goto out;
}
+ /* Local quota info and two new blocks we initialize */
+ handle = ocfs2_start_trans(OCFS2_SB(sb),
+ OCFS2_LOCAL_QINFO_WRITE_CREDITS +
+ 2 * OCFS2_QUOTA_BLOCK_UPDATE_CREDITS);
+ if (IS_ERR(handle)) {
+ status = PTR_ERR(handle);
+ mlog_errno(status);
+ goto out;
+ }
+ /* Initialize chunk header */
down_read(&OCFS2_I(lqinode)->ip_alloc_sem);
status = ocfs2_extent_map_get_blocks(lqinode, oinfo->dqi_blocks,
&p_blkno, NULL, NULL);
up_read(&OCFS2_I(lqinode)->ip_alloc_sem);
if (status < 0) {
mlog_errno(status);
- goto out;
+ goto out_trans;
}
bh = sb_getblk(sb, p_blkno);
if (!bh) {
status = -ENOMEM;
mlog_errno(status);
- goto out;
+ goto out_trans;
}
dchunk = (struct ocfs2_local_disk_chunk *)bh->b_data;
-
- handle = ocfs2_start_trans(OCFS2_SB(sb), 2);
- if (IS_ERR(handle)) {
- status = PTR_ERR(handle);
- mlog_errno(status);
- goto out;
- }
-
+ ocfs2_set_new_buffer_uptodate(lqinode, bh);
status = ocfs2_journal_access_dq(handle, lqinode, bh,
- OCFS2_JOURNAL_ACCESS_WRITE);
+ OCFS2_JOURNAL_ACCESS_CREATE);
if (status < 0) {
mlog_errno(status);
goto out_trans;
@@ -999,7 +1005,6 @@ static struct ocfs2_quota_chunk *ocfs2_local_quota_add_chunk(
memset(dchunk->dqc_bitmap, 0,
sb->s_blocksize - sizeof(struct ocfs2_local_disk_chunk) -
OCFS2_QBLK_RESERVED_SPACE);
- set_buffer_uptodate(bh);
unlock_buffer(bh);
status = ocfs2_journal_dirty(handle, bh);
if (status < 0) {
@@ -1007,6 +1012,38 @@ static struct ocfs2_quota_chunk *ocfs2_local_quota_add_chunk(
goto out_trans;
}
+ /* Initialize new block with structures */
+ down_read(&OCFS2_I(lqinode)->ip_alloc_sem);
+ status = ocfs2_extent_map_get_blocks(lqinode, oinfo->dqi_blocks + 1,
+ &p_blkno, NULL, NULL);
+ up_read(&OCFS2_I(lqinode)->ip_alloc_sem);
+ if (status < 0) {
+ mlog_errno(status);
+ goto out_trans;
+ }
+ dbh = sb_getblk(sb, p_blkno);
+ if (!dbh) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto out_trans;
+ }
+ ocfs2_set_new_buffer_uptodate(lqinode, dbh);
+ status = ocfs2_journal_access_dq(handle, lqinode, dbh,
+ OCFS2_JOURNAL_ACCESS_CREATE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto out_trans;
+ }
+ lock_buffer(dbh);
+ memset(dbh->b_data, 0, sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE);
+ unlock_buffer(dbh);
+ status = ocfs2_journal_dirty(handle, dbh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto out_trans;
+ }
+
+ /* Update local quotafile info */
oinfo->dqi_blocks += 2;
oinfo->dqi_chunks++;
status = ocfs2_local_write_info(sb, type);
@@ -1031,6 +1068,7 @@ out_trans:
ocfs2_commit_trans(OCFS2_SB(sb), handle);
out:
brelse(bh);
+ brelse(dbh);
kmem_cache_free(ocfs2_qf_chunk_cachep, chunk);
return ERR_PTR(status);
}
@@ -1048,6 +1086,8 @@ static struct ocfs2_quota_chunk *ocfs2_extend_local_quota_file(
struct ocfs2_local_disk_chunk *dchunk;
int epb = ol_quota_entries_per_block(sb);
unsigned int chunk_blocks;
+ struct buffer_head *bh;
+ u64 p_blkno;
int status;
handle_t *handle;
@@ -1075,12 +1115,49 @@ static struct ocfs2_quota_chunk *ocfs2_extend_local_quota_file(
mlog_errno(status);
goto out;
}
- handle = ocfs2_start_trans(OCFS2_SB(sb), 2);
+
+ /* Get buffer from the just added block */
+ down_read(&OCFS2_I(lqinode)->ip_alloc_sem);
+ status = ocfs2_extent_map_get_blocks(lqinode, oinfo->dqi_blocks,
+ &p_blkno, NULL, NULL);
+ up_read(&OCFS2_I(lqinode)->ip_alloc_sem);
+ if (status < 0) {
+ mlog_errno(status);
+ goto out;
+ }
+ bh = sb_getblk(sb, p_blkno);
+ if (!bh) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto out;
+ }
+ ocfs2_set_new_buffer_uptodate(lqinode, bh);
+
+ /* Local quota info, chunk header and the new block we initialize */
+ handle = ocfs2_start_trans(OCFS2_SB(sb),
+ OCFS2_LOCAL_QINFO_WRITE_CREDITS +
+ 2 * OCFS2_QUOTA_BLOCK_UPDATE_CREDITS);
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
mlog_errno(status);
goto out;
}
+ /* Zero created block */
+ status = ocfs2_journal_access_dq(handle, lqinode, bh,
+ OCFS2_JOURNAL_ACCESS_CREATE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto out_trans;
+ }
+ lock_buffer(bh);
+ memset(bh->b_data, 0, sb->s_blocksize);
+ unlock_buffer(bh);
+ status = ocfs2_journal_dirty(handle, bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto out_trans;
+ }
+ /* Update chunk header */
status = ocfs2_journal_access_dq(handle, lqinode, chunk->qc_headerbh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
@@ -1097,6 +1174,7 @@ static struct ocfs2_quota_chunk *ocfs2_extend_local_quota_file(
mlog_errno(status);
goto out_trans;
}
+ /* Update file header */
oinfo->dqi_blocks++;
status = ocfs2_local_write_info(sb, type);
if (status < 0) {
diff --git a/fs/ocfs2/stack_o2cb.c b/fs/ocfs2/stack_o2cb.c
index 3f66137..e49c410 100644
--- a/fs/ocfs2/stack_o2cb.c
+++ b/fs/ocfs2/stack_o2cb.c
@@ -17,6 +17,7 @@
* General Public License for more details.
*/
+#include <linux/kernel.h>
#include <linux/crc32.h>
#include <linux/module.h>
@@ -153,7 +154,7 @@ static int status_map[] = {
static int dlm_status_to_errno(enum dlm_status status)
{
- BUG_ON(status > (sizeof(status_map) / sizeof(status_map[0])));
+ BUG_ON(status < 0 || status >= ARRAY_SIZE(status_map));
return status_map[status];
}
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 7efb349..b0ee0fd 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -777,6 +777,7 @@ static int ocfs2_sb_probe(struct super_block *sb,
}
di = (struct ocfs2_dinode *) (*bh)->b_data;
memset(stats, 0, sizeof(struct ocfs2_blockcheck_stats));
+ spin_lock_init(&stats->b_lock);
status = ocfs2_verify_volume(di, *bh, blksize, stats);
if (status >= 0)
goto bail;
@@ -1182,7 +1183,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
wake_up(&osb->osb_mount_event);
/* Start this when the mount is almost sure of being successful */
- ocfs2_orphan_scan_init(osb);
+ ocfs2_orphan_scan_start(osb);
mlog_exit(status);
return status;
@@ -1213,14 +1214,27 @@ static int ocfs2_get_sb(struct file_system_type *fs_type,
mnt);
}
+static void ocfs2_kill_sb(struct super_block *sb)
+{
+ struct ocfs2_super *osb = OCFS2_SB(sb);
+
+ /* Prevent further queueing of inode drop events */
+ spin_lock(&dentry_list_lock);
+ ocfs2_set_osb_flag(osb, OCFS2_OSB_DROP_DENTRY_LOCK_IMMED);
+ spin_unlock(&dentry_list_lock);
+ /* Wait for work to finish and/or remove it */
+ cancel_work_sync(&osb->dentry_lock_work);
+
+ kill_block_super(sb);
+}
+
static struct file_system_type ocfs2_fs_type = {
.owner = THIS_MODULE,
.name = "ocfs2",
.get_sb = ocfs2_get_sb, /* is this called when we mount
* the fs? */
- .kill_sb = kill_block_super, /* set to the generic one
- * right now, but do we
- * need to change that? */
+ .kill_sb = ocfs2_kill_sb,
+
.fs_flags = FS_REQUIRES_DEV|FS_RENAME_DOES_D_MOVE,
.next = NULL
};
@@ -1819,6 +1833,12 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
debugfs_remove(osb->osb_ctxt);
+ /*
+ * Flush inode dropping work queue so that deletes are
+ * performed while the filesystem is still working
+ */
+ ocfs2_drop_all_dl_inodes(osb);
+
/* Orphan scan should be stopped as early as possible */
ocfs2_orphan_scan_stop(osb);
@@ -1981,6 +2001,8 @@ static int ocfs2_initialize_super(struct super_block *sb,
snprintf(osb->dev_str, sizeof(osb->dev_str), "%u,%u",
MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev));
+ ocfs2_orphan_scan_init(osb);
+
status = ocfs2_recovery_init(osb);
if (status) {
mlog(ML_ERROR, "Unable to initialize recovery state\n");
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index ba320e2..d1a27cd 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -1052,7 +1052,8 @@ static int ocfs2_xattr_block_get(struct inode *inode,
struct ocfs2_xattr_block *xb;
struct ocfs2_xattr_value_root *xv;
size_t size;
- int ret = -ENODATA, name_offset, name_len, block_off, i;
+ int ret = -ENODATA, name_offset, name_len, i;
+ int uninitialized_var(block_off);
xs->bucket = ocfs2_xattr_bucket_new(inode);
if (!xs->bucket) {
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 1a9c787..ea4e6cb 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -436,7 +436,7 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
rcu_assign_pointer(ptbl->part[partno], p);
/* suppress uevent if the disk supresses it */
- if (!dev_get_uevent_suppress(pdev))
+ if (!dev_get_uevent_suppress(ddev))
kobject_uevent(&pdev->kobj, KOBJ_ADD);
return p;
diff --git a/fs/pipe.c b/fs/pipe.c
index f7dd21a..52c4151 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -68,8 +68,8 @@ void pipe_double_lock(struct pipe_inode_info *pipe1,
pipe_lock_nested(pipe1, I_MUTEX_PARENT);
pipe_lock_nested(pipe2, I_MUTEX_CHILD);
} else {
- pipe_lock_nested(pipe2, I_MUTEX_CHILD);
- pipe_lock_nested(pipe1, I_MUTEX_PARENT);
+ pipe_lock_nested(pipe2, I_MUTEX_PARENT);
+ pipe_lock_nested(pipe1, I_MUTEX_CHILD);
}
}
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 3ce5ae9e..175db25 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -234,23 +234,20 @@ static int check_mem_permission(struct task_struct *task)
struct mm_struct *mm_for_maps(struct task_struct *task)
{
- struct mm_struct *mm = get_task_mm(task);
- if (!mm)
+ struct mm_struct *mm;
+
+ if (mutex_lock_killable(&task->cred_guard_mutex))
return NULL;
- down_read(&mm->mmap_sem);
- task_lock(task);
- if (task->mm != mm)
- goto out;
- if (task->mm != current->mm &&
- __ptrace_may_access(task, PTRACE_MODE_READ) < 0)
- goto out;
- task_unlock(task);
+
+ mm = get_task_mm(task);
+ if (mm && mm != current->mm &&
+ !ptrace_may_access(task, PTRACE_MODE_READ)) {
+ mmput(mm);
+ mm = NULL;
+ }
+ mutex_unlock(&task->cred_guard_mutex);
+
return mm;
-out:
- task_unlock(task);
- up_read(&mm->mmap_sem);
- mmput(mm);
- return NULL;
}
static int proc_pid_cmdline(struct task_struct *task, char * buffer)
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 6f61b7c..9bd8be1 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -119,6 +119,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
mm = mm_for_maps(priv->task);
if (!mm)
return NULL;
+ down_read(&mm->mmap_sem);
tail_vma = get_gate_vma(priv->task);
priv->tail_vma = tail_vma;
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index 64a72e2..8f5c05d 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -189,6 +189,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
priv->task = NULL;
return NULL;
}
+ down_read(&mm->mmap_sem);
/* start from the Nth VMA */
for (p = rb_first(&mm->mm_rb); p; p = rb_next(p))
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 607c579..38f7bd5 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -2042,7 +2042,6 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
* changes */
invalidate_bdev(sb->s_bdev);
}
- mutex_lock(&inode->i_mutex);
mutex_lock(&dqopt->dqonoff_mutex);
if (sb_has_quota_loaded(sb, type)) {
error = -EBUSY;
@@ -2054,9 +2053,11 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
* possible) Also nobody should write to the file - we use
* special IO operations which ignore the immutable bit. */
down_write(&dqopt->dqptr_sem);
+ mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE |
S_NOQUOTA);
inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE;
+ mutex_unlock(&inode->i_mutex);
up_write(&dqopt->dqptr_sem);
sb->dq_op->drop(inode);
}
@@ -2080,7 +2081,6 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
goto out_file_init;
}
mutex_unlock(&dqopt->dqio_mutex);
- mutex_unlock(&inode->i_mutex);
spin_lock(&dq_state_lock);
dqopt->flags |= dquot_state_flag(flags, type);
spin_unlock(&dq_state_lock);
@@ -2094,16 +2094,17 @@ out_file_init:
dqopt->files[type] = NULL;
iput(inode);
out_lock:
- mutex_unlock(&dqopt->dqonoff_mutex);
if (oldflags != -1) {
down_write(&dqopt->dqptr_sem);
+ mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
/* Set the flags back (in the case of accidental quotaon()
* on a wrong file we don't want to mess up the flags) */
inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE);
inode->i_flags |= oldflags;
+ mutex_unlock(&inode->i_mutex);
up_write(&dqopt->dqptr_sem);
}
- mutex_unlock(&inode->i_mutex);
+ mutex_unlock(&dqopt->dqonoff_mutex);
out_fmt:
put_quota_format(fmt);
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index ebb2c41..11f0c06 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -20,6 +20,7 @@
#include <linux/ramfs.h>
#include <linux/pagevec.h>
#include <linux/mman.h>
+#include <linux/sched.h>
#include <asm/uaccess.h>
#include "internal.h"
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 77f5bb7..9062220 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -997,7 +997,7 @@ static int reiserfs_async_progress_wait(struct super_block *s)
DEFINE_WAIT(wait);
struct reiserfs_journal *j = SB_JOURNAL(s);
if (atomic_read(&j->j_async_throttle))
- congestion_wait(WRITE, HZ / 10);
+ congestion_wait(BLK_RW_ASYNC, HZ / 10);
return 0;
}
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index d3aeb06..7adea74 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -24,7 +24,6 @@
#include <linux/exportfs.h>
#include <linux/quotaops.h>
#include <linux/vfs.h>
-#include <linux/mnt_namespace.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/crc32.h>
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index f3d47d8..6925b83 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -46,7 +46,6 @@
#include <linux/reiserfs_acl.h>
#include <asm/uaccess.h>
#include <net/checksum.h>
-#include <linux/smp_lock.h>
#include <linux/stat.h>
#include <linux/quotaops.h>
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index 3b52770..cb5fc57 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -30,6 +30,7 @@
#include <linux/fs.h>
#include <linux/vfs.h>
#include <linux/slab.h>
+#include <linux/smp_lock.h>
#include <linux/mutex.h>
#include <linux/pagemap.h>
#include <linux/init.h>
diff --git a/fs/sync.c b/fs/sync.c
index dd20002..3422ba6 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -112,8 +112,13 @@ restart:
mutex_unlock(&mutex);
}
+/*
+ * sync everything. Start out by waking pdflush, because that writes back
+ * all queues in parallel.
+ */
SYSCALL_DEFINE0(sync)
{
+ wakeup_pdflush(0);
sync_filesystems(0);
sync_filesystems(1);
if (unlikely(laptop_mode))
diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
index 9345806..2524714 100644
--- a/fs/sysfs/bin.c
+++ b/fs/sysfs/bin.c
@@ -171,6 +171,7 @@ static ssize_t write(struct file *file, const char __user *userbuf,
if (count > 0)
*off = offs + count;
+ kfree(temp);
return count;
}
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index d88d0fa..14f2d71 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -939,8 +939,10 @@ again:
/* Remove from old parent's list and insert into new parent's list. */
sysfs_unlink_sibling(sd);
sysfs_get(new_parent_sd);
+ drop_nlink(old_parent->d_inode);
sysfs_put(sd->s_parent);
sd->s_parent = new_parent_sd;
+ inc_nlink(new_parent->d_inode);
sysfs_link_sibling(sd);
out_unlock:
diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
index bc58571..762a7d6 100644
--- a/fs/ubifs/io.c
+++ b/fs/ubifs/io.c
@@ -297,6 +297,7 @@ static enum hrtimer_restart wbuf_timer_callback_nolock(struct hrtimer *timer)
{
struct ubifs_wbuf *wbuf = container_of(timer, struct ubifs_wbuf, timer);
+ dbg_io("jhead %d", wbuf->jhead);
wbuf->need_sync = 1;
wbuf->c->need_wbuf_sync = 1;
ubifs_wake_up_bgt(wbuf->c);
@@ -311,8 +312,12 @@ static void new_wbuf_timer_nolock(struct ubifs_wbuf *wbuf)
{
ubifs_assert(!hrtimer_active(&wbuf->timer));
- if (!ktime_to_ns(wbuf->softlimit))
+ if (wbuf->no_timer)
return;
+ dbg_io("set timer for jhead %d, %llu-%llu millisecs", wbuf->jhead,
+ div_u64(ktime_to_ns(wbuf->softlimit), USEC_PER_SEC),
+ div_u64(ktime_to_ns(wbuf->softlimit) + wbuf->delta,
+ USEC_PER_SEC));
hrtimer_start_range_ns(&wbuf->timer, wbuf->softlimit, wbuf->delta,
HRTIMER_MODE_REL);
}
@@ -323,11 +328,8 @@ static void new_wbuf_timer_nolock(struct ubifs_wbuf *wbuf)
*/
static void cancel_wbuf_timer_nolock(struct ubifs_wbuf *wbuf)
{
- /*
- * If the syncer is waiting for the lock (from the background thread's
- * context) and another task is changing write-buffer then the syncing
- * should be canceled.
- */
+ if (wbuf->no_timer)
+ return;
wbuf->need_sync = 0;
hrtimer_cancel(&wbuf->timer);
}
@@ -349,8 +351,8 @@ int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf)
/* Write-buffer is empty or not seeked */
return 0;
- dbg_io("LEB %d:%d, %d bytes",
- wbuf->lnum, wbuf->offs, wbuf->used);
+ dbg_io("LEB %d:%d, %d bytes, jhead %d",
+ wbuf->lnum, wbuf->offs, wbuf->used, wbuf->jhead);
ubifs_assert(!(c->vfs_sb->s_flags & MS_RDONLY));
ubifs_assert(!(wbuf->avail & 7));
ubifs_assert(wbuf->offs + c->min_io_size <= c->leb_size);
@@ -390,7 +392,7 @@ int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf)
* @offs: logical eraseblock offset to seek to
* @dtype: data type
*
- * This function targets the write buffer to logical eraseblock @lnum:@offs.
+ * This function targets the write-buffer to logical eraseblock @lnum:@offs.
* The write-buffer is synchronized if it is not empty. Returns zero in case of
* success and a negative error code in case of failure.
*/
@@ -399,7 +401,7 @@ int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs,
{
const struct ubifs_info *c = wbuf->c;
- dbg_io("LEB %d:%d", lnum, offs);
+ dbg_io("LEB %d:%d, jhead %d", lnum, offs, wbuf->jhead);
ubifs_assert(lnum >= 0 && lnum < c->leb_cnt);
ubifs_assert(offs >= 0 && offs <= c->leb_size);
ubifs_assert(offs % c->min_io_size == 0 && !(offs & 7));
@@ -506,9 +508,9 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
struct ubifs_info *c = wbuf->c;
int err, written, n, aligned_len = ALIGN(len, 8), offs;
- dbg_io("%d bytes (%s) to wbuf at LEB %d:%d", len,
- dbg_ntype(((struct ubifs_ch *)buf)->node_type), wbuf->lnum,
- wbuf->offs + wbuf->used);
+ dbg_io("%d bytes (%s) to jhead %d wbuf at LEB %d:%d", len,
+ dbg_ntype(((struct ubifs_ch *)buf)->node_type), wbuf->jhead,
+ wbuf->lnum, wbuf->offs + wbuf->used);
ubifs_assert(len > 0 && wbuf->lnum >= 0 && wbuf->lnum < c->leb_cnt);
ubifs_assert(wbuf->offs >= 0 && wbuf->offs % c->min_io_size == 0);
ubifs_assert(!(wbuf->offs & 7) && wbuf->offs <= c->leb_size);
@@ -533,8 +535,8 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
memcpy(wbuf->buf + wbuf->used, buf, len);
if (aligned_len == wbuf->avail) {
- dbg_io("flush wbuf to LEB %d:%d", wbuf->lnum,
- wbuf->offs);
+ dbg_io("flush jhead %d wbuf to LEB %d:%d",
+ wbuf->jhead, wbuf->lnum, wbuf->offs);
err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf,
wbuf->offs, c->min_io_size,
wbuf->dtype);
@@ -562,7 +564,8 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
* minimal I/O unit. We have to fill and flush write-buffer and switch
* to the next min. I/O unit.
*/
- dbg_io("flush wbuf to LEB %d:%d", wbuf->lnum, wbuf->offs);
+ dbg_io("flush jhead %d wbuf to LEB %d:%d",
+ wbuf->jhead, wbuf->lnum, wbuf->offs);
memcpy(wbuf->buf + wbuf->used, buf, wbuf->avail);
err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, wbuf->offs,
c->min_io_size, wbuf->dtype);
@@ -695,7 +698,8 @@ int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len,
int err, rlen, overlap;
struct ubifs_ch *ch = buf;
- dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len);
+ dbg_io("LEB %d:%d, %s, length %d, jhead %d", lnum, offs,
+ dbg_ntype(type), len, wbuf->jhead);
ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
ubifs_assert(!(offs & 7) && offs < c->leb_size);
ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT);
@@ -819,13 +823,12 @@ out:
* @c: UBIFS file-system description object
* @wbuf: write-buffer to initialize
*
- * This function initializes write buffer. Returns zero in case of success
+ * This function initializes write-buffer. Returns zero in case of success
* %-ENOMEM in case of failure.
*/
int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf)
{
size_t size;
- ktime_t hardlimit;
wbuf->buf = kmalloc(c->min_io_size, GFP_KERNEL);
if (!wbuf->buf)
@@ -851,22 +854,16 @@ int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf)
hrtimer_init(&wbuf->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
wbuf->timer.function = wbuf_timer_callback_nolock;
- /*
- * Make write-buffer soft limit to be 20% of the hard limit. The
- * write-buffer timer is allowed to expire any time between the soft
- * and hard limits.
- */
- hardlimit = ktime_set(DEFAULT_WBUF_TIMEOUT_SECS, 0);
- wbuf->delta = (DEFAULT_WBUF_TIMEOUT_SECS * NSEC_PER_SEC) * 2 / 10;
- wbuf->softlimit = ktime_sub_ns(hardlimit, wbuf->delta);
- hrtimer_set_expires_range_ns(&wbuf->timer, wbuf->softlimit,
- wbuf->delta);
+ wbuf->softlimit = ktime_set(WBUF_TIMEOUT_SOFTLIMIT, 0);
+ wbuf->delta = WBUF_TIMEOUT_HARDLIMIT - WBUF_TIMEOUT_SOFTLIMIT;
+ wbuf->delta *= 1000000000ULL;
+ ubifs_assert(wbuf->delta <= ULONG_MAX);
return 0;
}
/**
* ubifs_wbuf_add_ino_nolock - add an inode number into the wbuf inode array.
- * @wbuf: the write-buffer whereto add
+ * @wbuf: the write-buffer where to add
* @inum: the inode number
*
* This function adds an inode number to the inode array of the write-buffer.
diff --git a/fs/ubifs/ioctl.c b/fs/ubifs/ioctl.c
index 6db7a6b..8aacd64 100644
--- a/fs/ubifs/ioctl.c
+++ b/fs/ubifs/ioctl.c
@@ -25,7 +25,6 @@
/* This file implements EXT2-compatible extended attribute ioctl() calls */
#include <linux/compat.h>
-#include <linux/smp_lock.h>
#include <linux/mount.h>
#include "ubifs.h"
diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c
index 8056052..e5f6cf8 100644
--- a/fs/ubifs/recovery.c
+++ b/fs/ubifs/recovery.c
@@ -53,6 +53,25 @@ static int is_empty(void *buf, int len)
}
/**
+ * first_non_ff - find offset of the first non-0xff byte.
+ * @buf: buffer to search in
+ * @len: length of buffer
+ *
+ * This function returns offset of the first non-0xff byte in @buf or %-1 if
+ * the buffer contains only 0xff bytes.
+ */
+static int first_non_ff(void *buf, int len)
+{
+ uint8_t *p = buf;
+ int i;
+
+ for (i = 0; i < len; i++)
+ if (*p++ != 0xff)
+ return i;
+ return -1;
+}
+
+/**
* get_master_node - get the last valid master node allowing for corruption.
* @c: UBIFS file-system description object
* @lnum: LEB number
@@ -357,11 +376,7 @@ static int is_last_write(const struct ubifs_info *c, void *buf, int offs)
empty_offs = ALIGN(offs + 1, c->min_io_size);
check_len = c->leb_size - empty_offs;
p = buf + empty_offs - offs;
-
- for (; check_len > 0; check_len--)
- if (*p++ != 0xff)
- return 0;
- return 1;
+ return is_empty(p, check_len);
}
/**
@@ -543,8 +558,8 @@ static int drop_incomplete_group(struct ubifs_scan_leb *sleb, int *offs)
*
* This function does a scan of a LEB, but caters for errors that might have
* been caused by the unclean unmount from which we are attempting to recover.
- *
- * This function returns %0 on success and a negative error code on failure.
+ * Returns %0 in case of success, %-EUCLEAN if an unrecoverable corruption is
+ * found, and a negative error code in case of failure.
*/
struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
int offs, void *sbuf, int grouped)
@@ -643,7 +658,8 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
goto corrupted;
default:
dbg_err("unknown");
- goto corrupted;
+ err = -EINVAL;
+ goto error;
}
}
@@ -652,8 +668,13 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
clean_buf(c, &buf, lnum, &offs, &len);
need_clean = 1;
} else {
- ubifs_err("corrupt empty space at LEB %d:%d",
- lnum, offs);
+ int corruption = first_non_ff(buf, len);
+
+ ubifs_err("corrupt empty space LEB %d:%d, corruption "
+ "starts at %d", lnum, offs, corruption);
+ /* Make sure we dump interesting non-0xFF data */
+ offs = corruption;
+ buf += corruption;
goto corrupted;
}
}
@@ -813,7 +834,7 @@ struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum,
static int recover_head(const struct ubifs_info *c, int lnum, int offs,
void *sbuf)
{
- int len, err, need_clean = 0;
+ int len, err;
if (c->min_io_size > 1)
len = c->min_io_size;
@@ -827,19 +848,7 @@ static int recover_head(const struct ubifs_info *c, int lnum, int offs,
/* Read at the head location and check it is empty flash */
err = ubi_read(c->ubi, lnum, sbuf, offs, len);
- if (err)
- need_clean = 1;
- else {
- uint8_t *p = sbuf;
-
- while (len--)
- if (*p++ != 0xff) {
- need_clean = 1;
- break;
- }
- }
-
- if (need_clean) {
+ if (err || !is_empty(sbuf, len)) {
dbg_rcvry("cleaning head at %d:%d", lnum, offs);
if (offs == 0)
return ubifs_leb_unmap(c, lnum);
diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
index 11cc801..2970500 100644
--- a/fs/ubifs/replay.c
+++ b/fs/ubifs/replay.c
@@ -837,9 +837,10 @@ static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf)
dbg_mnt("replay log LEB %d:%d", lnum, offs);
sleb = ubifs_scan(c, lnum, offs, sbuf);
- if (IS_ERR(sleb)) {
- if (c->need_recovery)
- sleb = ubifs_recover_log_leb(c, lnum, offs, sbuf);
+ if (IS_ERR(sleb) ) {
+ if (PTR_ERR(sleb) != -EUCLEAN || !c->need_recovery)
+ return PTR_ERR(sleb);
+ sleb = ubifs_recover_log_leb(c, lnum, offs, sbuf);
if (IS_ERR(sleb))
return PTR_ERR(sleb);
}
@@ -957,7 +958,7 @@ out:
return err;
out_dump:
- ubifs_err("log error detected while replying the log at LEB %d:%d",
+ ubifs_err("log error detected while replaying the log at LEB %d:%d",
lnum, offs + snod->offs);
dbg_dump_node(c, snod->node);
ubifs_scan_destroy(sleb);
diff --git a/fs/ubifs/scan.c b/fs/ubifs/scan.c
index 0ed8247..892ebfe 100644
--- a/fs/ubifs/scan.c
+++ b/fs/ubifs/scan.c
@@ -238,12 +238,12 @@ void ubifs_scanned_corruption(const struct ubifs_info *c, int lnum, int offs,
{
int len;
- ubifs_err("corrupted data at LEB %d:%d", lnum, offs);
+ ubifs_err("corruption at LEB %d:%d", lnum, offs);
if (dbg_failure_mode)
return;
len = c->leb_size - offs;
- if (len > 4096)
- len = 4096;
+ if (len > 8192)
+ len = 8192;
dbg_err("first %d bytes from LEB %d:%d", len, lnum, offs);
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 4, buf, len, 1);
}
@@ -256,7 +256,9 @@ void ubifs_scanned_corruption(const struct ubifs_info *c, int lnum, int offs,
* @sbuf: scan buffer (must be c->leb_size)
*
* This function scans LEB number @lnum and returns complete information about
- * its contents. Returns an error code in case of failure.
+ * its contents. Returns the scaned information in case of success and,
+ * %-EUCLEAN if the LEB neads recovery, and other negative error codes in case
+ * of failure.
*/
struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum,
int offs, void *sbuf)
@@ -279,7 +281,6 @@ struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum,
cond_resched();
ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 0);
-
if (ret > 0) {
/* Padding bytes or a valid padding node */
offs += ret;
@@ -304,7 +305,8 @@ struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum,
goto corrupted;
default:
dbg_err("unknown");
- goto corrupted;
+ err = -EINVAL;
+ goto error;
}
err = ubifs_add_snod(c, sleb, buf, offs);
@@ -317,8 +319,10 @@ struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum,
len -= node_len;
}
- if (offs % c->min_io_size)
- goto corrupted;
+ if (offs % c->min_io_size) {
+ ubifs_err("empty space starts at non-aligned offset %d", offs);
+ goto corrupted;;
+ }
ubifs_end_scan(c, sleb, lnum, offs);
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 79fad43..26d2e0d 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -797,7 +797,7 @@ static int alloc_wbufs(struct ubifs_info *c)
* does not need to be synchronized by timer.
*/
c->jheads[GCHD].wbuf.dtype = UBI_LONGTERM;
- c->jheads[GCHD].wbuf.softlimit = ktime_set(0, 0);
+ c->jheads[GCHD].wbuf.no_timer = 1;
return 0;
}
@@ -986,7 +986,7 @@ static int ubifs_parse_options(struct ubifs_info *c, char *options,
switch (token) {
/*
* %Opt_fast_unmount and %Opt_norm_unmount options are ignored.
- * We accepte them in order to be backware-compatible. But this
+ * We accept them in order to be backward-compatible. But this
* should be removed at some point.
*/
case Opt_fast_unmount:
@@ -1287,6 +1287,9 @@ static int mount_ubifs(struct ubifs_info *c)
if (err)
goto out_journal;
+ /* Calculate 'min_idx_lebs' after journal replay */
+ c->min_idx_lebs = ubifs_calc_min_idx_lebs(c);
+
err = ubifs_mount_orphans(c, c->need_recovery, mounted_read_only);
if (err)
goto out_orphans;
@@ -1754,10 +1757,8 @@ static void ubifs_put_super(struct super_block *sb)
/* Synchronize write-buffers */
if (c->jheads)
- for (i = 0; i < c->jhead_cnt; i++) {
+ for (i = 0; i < c->jhead_cnt; i++)
ubifs_wbuf_sync(&c->jheads[i].wbuf);
- hrtimer_cancel(&c->jheads[i].wbuf.timer);
- }
/*
* On fatal errors c->ro_media is set to 1, in which case we do
@@ -1975,7 +1976,8 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
err = bdi_init(&c->bdi);
if (err)
goto out_close;
- err = bdi_register(&c->bdi, NULL, "ubifs");
+ err = bdi_register(&c->bdi, NULL, "ubifs_%d_%d",
+ c->vi.ubi_num, c->vi.vol_id);
if (err)
goto out_bdi;
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index 1bf01d8..a293490 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -95,8 +95,9 @@
*/
#define BGT_NAME_PATTERN "ubifs_bgt%d_%d"
-/* Default write-buffer synchronization timeout in seconds */
-#define DEFAULT_WBUF_TIMEOUT_SECS 5
+/* Write-buffer synchronization timeout interval in seconds */
+#define WBUF_TIMEOUT_SOFTLIMIT 3
+#define WBUF_TIMEOUT_HARDLIMIT 5
/* Maximum possible inode number (only 32-bit inodes are supported now) */
#define MAX_INUM 0xFFFFFFFF
@@ -654,7 +655,8 @@ typedef int (*ubifs_lpt_scan_callback)(struct ubifs_info *c,
* @delta: hard and soft timeouts delta (the timer expire inteval is @softlimit
* and @softlimit + @delta)
* @timer: write-buffer timer
- * @need_sync: it is set if its timer expired and needs sync
+ * @no_timer: non-zero if this write-buffer does not have a timer
+ * @need_sync: non-zero if the timer expired and the wbuf needs sync'ing
* @next_ino: points to the next position of the following inode number
* @inodes: stores the inode numbers of the nodes which are in wbuf
*
@@ -683,7 +685,8 @@ struct ubifs_wbuf {
ktime_t softlimit;
unsigned long long delta;
struct hrtimer timer;
- int need_sync;
+ unsigned int no_timer:1;
+ unsigned int need_sync:1;
int next_ino;
ino_t *inodes;
};
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 6832135..9d1b8c2 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -1087,11 +1087,23 @@ static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
struct udf_inode_info *vati;
uint32_t pos;
struct virtualAllocationTable20 *vat20;
+ sector_t blocks = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
/* VAT file entry is in the last recorded block */
ino.partitionReferenceNum = type1_index;
ino.logicalBlockNum = sbi->s_last_block - map->s_partition_root;
sbi->s_vat_inode = udf_iget(sb, &ino);
+ if (!sbi->s_vat_inode &&
+ sbi->s_last_block != blocks - 1) {
+ printk(KERN_NOTICE "UDF-fs: Failed to read VAT inode from the"
+ " last recorded block (%lu), retrying with the last "
+ "block of the device (%lu).\n",
+ (unsigned long)sbi->s_last_block,
+ (unsigned long)blocks - 1);
+ ino.partitionReferenceNum = type1_index;
+ ino.logicalBlockNum = blocks - 1 - map->s_partition_root;
+ sbi->s_vat_inode = udf_iget(sb, &ino);
+ }
if (!sbi->s_vat_inode)
return 1;
diff --git a/fs/xfs/linux-2.6/kmem.c b/fs/xfs/linux-2.6/kmem.c
index 1cd3b55..2d3f90a 100644
--- a/fs/xfs/linux-2.6/kmem.c
+++ b/fs/xfs/linux-2.6/kmem.c
@@ -53,7 +53,7 @@ kmem_alloc(size_t size, unsigned int __nocast flags)
printk(KERN_ERR "XFS: possible memory allocation "
"deadlock in %s (mode:0x%x)\n",
__func__, lflags);
- congestion_wait(WRITE, HZ/50);
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
} while (1);
}
@@ -130,7 +130,7 @@ kmem_zone_alloc(kmem_zone_t *zone, unsigned int __nocast flags)
printk(KERN_ERR "XFS: possible memory allocation "
"deadlock in %s (mode:0x%x)\n",
__func__, lflags);
- congestion_wait(WRITE, HZ/50);
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
} while (1);
}
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 7ec89fc..aecf251 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -1268,6 +1268,14 @@ xfs_vm_writepage(
if (!page_has_buffers(page))
create_empty_buffers(page, 1 << inode->i_blkbits, 0);
+
+ /*
+ * VM calculation for nr_to_write seems off. Bump it way
+ * up, this gets simple streaming writes zippy again.
+ * To be reviewed again after Jens' writeback changes.
+ */
+ wbc->nr_to_write *= 4;
+
/*
* Convert delayed allocate, unwritten or unmapped space
* to real space and flush out to disk.
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 1418b91..965df12 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -412,7 +412,7 @@ _xfs_buf_lookup_pages(
XFS_STATS_INC(xb_page_retries);
xfsbufd_wakeup(0, gfp_mask);
- congestion_wait(WRITE, HZ/50);
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
goto retry;
}
@@ -770,7 +770,7 @@ xfs_buf_associate_memory(
bp->b_pages = NULL;
bp->b_addr = mem;
- rval = _xfs_buf_get_pages(bp, page_count, 0);
+ rval = _xfs_buf_get_pages(bp, page_count, XBF_DONT_BLOCK);
if (rval)
return rval;
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index f4e2554..0542fd5 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -41,7 +41,6 @@
#include "xfs_ioctl.h"
#include <linux/dcache.h>
-#include <linux/smp_lock.h>
static struct vm_operations_struct xfs_file_vm_ops;
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index 58973bb..8070b34 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -680,8 +680,8 @@ xfs_vn_fiemap(
else
bm.bmv_length = BTOBB(length);
- /* our formatter will tell xfs_getbmap when to stop. */
- bm.bmv_count = MAXEXTNUM;
+ /* We add one because in getbmap world count includes the header */
+ bm.bmv_count = fieinfo->fi_extents_max + 1;
bm.bmv_iflags = BMV_IF_PREALLOC;
if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR)
bm.bmv_iflags |= BMV_IF_ATTRFORK;
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c
index db15feb..4ece190 100644
--- a/fs/xfs/xfs_attr.c
+++ b/fs/xfs/xfs_attr.c
@@ -2010,7 +2010,9 @@ xfs_attr_rmtval_get(xfs_da_args_t *args)
dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock);
blkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
error = xfs_read_buf(mp, mp->m_ddev_targp, dblkno,
- blkcnt, XFS_BUF_LOCK, &bp);
+ blkcnt,
+ XFS_BUF_LOCK | XBF_DONT_BLOCK,
+ &bp);
if (error)
return(error);
@@ -2141,8 +2143,8 @@ xfs_attr_rmtval_set(xfs_da_args_t *args)
dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock),
blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
- bp = xfs_buf_get_flags(mp->m_ddev_targp, dblkno,
- blkcnt, XFS_BUF_LOCK);
+ bp = xfs_buf_get_flags(mp->m_ddev_targp, dblkno, blkcnt,
+ XFS_BUF_LOCK | XBF_DONT_BLOCK);
ASSERT(bp);
ASSERT(!XFS_BUF_GETERROR(bp));
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 7928b99..8ee5b5a 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -6009,7 +6009,7 @@ xfs_getbmap(
*/
error = ENOMEM;
subnex = 16;
- map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL);
+ map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL | KM_NOFS);
if (!map)
goto out_unlock_ilock;
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index e9df995..2671738 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -120,8 +120,8 @@ xfs_btree_check_sblock(
XFS_RANDOM_BTREE_CHECK_SBLOCK))) {
if (bp)
xfs_buftrace("SBTREE ERROR", bp);
- XFS_ERROR_REPORT("xfs_btree_check_sblock", XFS_ERRLEVEL_LOW,
- cur->bc_mp);
+ XFS_CORRUPTION_ERROR("xfs_btree_check_sblock",
+ XFS_ERRLEVEL_LOW, cur->bc_mp, block);
return XFS_ERROR(EFSCORRUPTED);
}
return 0;
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
index 9ff6e57..2847bbc 100644
--- a/fs/xfs/xfs_da_btree.c
+++ b/fs/xfs/xfs_da_btree.c
@@ -2201,7 +2201,7 @@ kmem_zone_t *xfs_dabuf_zone; /* dabuf zone */
xfs_da_state_t *
xfs_da_state_alloc(void)
{
- return kmem_zone_zalloc(xfs_da_state_zone, KM_SLEEP);
+ return kmem_zone_zalloc(xfs_da_state_zone, KM_NOFS);
}
/*
@@ -2261,9 +2261,9 @@ xfs_da_buf_make(int nbuf, xfs_buf_t **bps, inst_t *ra)
int off;
if (nbuf == 1)
- dabuf = kmem_zone_alloc(xfs_dabuf_zone, KM_SLEEP);
+ dabuf = kmem_zone_alloc(xfs_dabuf_zone, KM_NOFS);
else
- dabuf = kmem_alloc(XFS_DA_BUF_SIZE(nbuf), KM_SLEEP);
+ dabuf = kmem_alloc(XFS_DA_BUF_SIZE(nbuf), KM_NOFS);
dabuf->dirty = 0;
#ifdef XFS_DABUF_DEBUG
dabuf->ra = ra;
diff --git a/fs/xfs/xfs_dir2.c b/fs/xfs/xfs_dir2.c
index c657bec..bb1d58eb 100644
--- a/fs/xfs/xfs_dir2.c
+++ b/fs/xfs/xfs_dir2.c
@@ -256,7 +256,7 @@ xfs_dir_cilookup_result(
!(args->op_flags & XFS_DA_OP_CILOOKUP))
return EEXIST;
- args->value = kmem_alloc(len, KM_MAYFAIL);
+ args->value = kmem_alloc(len, KM_NOFS | KM_MAYFAIL);
if (!args->value)
return ENOMEM;
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index cbd451b..2d0b3e1 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -167,17 +167,25 @@ xfs_growfs_data_private(
new = nb - mp->m_sb.sb_dblocks;
oagcount = mp->m_sb.sb_agcount;
if (nagcount > oagcount) {
+ void *new_perag, *old_perag;
+
xfs_filestream_flush(mp);
+
+ new_perag = kmem_zalloc(sizeof(xfs_perag_t) * nagcount,
+ KM_MAYFAIL);
+ if (!new_perag)
+ return XFS_ERROR(ENOMEM);
+
down_write(&mp->m_peraglock);
- mp->m_perag = kmem_realloc(mp->m_perag,
- sizeof(xfs_perag_t) * nagcount,
- sizeof(xfs_perag_t) * oagcount,
- KM_SLEEP);
- memset(&mp->m_perag[oagcount], 0,
- (nagcount - oagcount) * sizeof(xfs_perag_t));
+ memcpy(new_perag, mp->m_perag, sizeof(xfs_perag_t) * oagcount);
+ old_perag = mp->m_perag;
+ mp->m_perag = new_perag;
+
mp->m_flags |= XFS_MOUNT_32BITINODES;
nagimax = xfs_initialize_perag(mp, nagcount);
up_write(&mp->m_peraglock);
+
+ kmem_free(old_perag);
}
tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFS);
tp->t_flags |= XFS_TRANS_RESERVE;
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index 5fcec6f..34ec869 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -64,6 +64,10 @@ xfs_inode_alloc(
ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
if (!ip)
return NULL;
+ if (inode_init_always(mp->m_super, VFS_I(ip))) {
+ kmem_zone_free(xfs_inode_zone, ip);
+ return NULL;
+ }
ASSERT(atomic_read(&ip->i_iocount) == 0);
ASSERT(atomic_read(&ip->i_pincount) == 0);
@@ -105,17 +109,6 @@ xfs_inode_alloc(
#ifdef XFS_DIR2_TRACE
ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_NOFS);
#endif
- /*
- * Now initialise the VFS inode. We do this after the xfs_inode
- * initialisation as internal failures will result in ->destroy_inode
- * being called and that will pass down through the reclaim path and
- * free the XFS inode. This path requires the XFS inode to already be
- * initialised. Hence if this call fails, the xfs_inode has already
- * been freed and we should not reference it at all in the error
- * handling.
- */
- if (!inode_init_always(mp->m_super, VFS_I(ip)))
- return NULL;
/* prevent anyone from using this yet */
VFS_I(ip)->i_state = I_NEW|I_LOCK;
@@ -123,6 +116,71 @@ xfs_inode_alloc(
return ip;
}
+STATIC void
+xfs_inode_free(
+ struct xfs_inode *ip)
+{
+ switch (ip->i_d.di_mode & S_IFMT) {
+ case S_IFREG:
+ case S_IFDIR:
+ case S_IFLNK:
+ xfs_idestroy_fork(ip, XFS_DATA_FORK);
+ break;
+ }
+
+ if (ip->i_afp)
+ xfs_idestroy_fork(ip, XFS_ATTR_FORK);
+
+#ifdef XFS_INODE_TRACE
+ ktrace_free(ip->i_trace);
+#endif
+#ifdef XFS_BMAP_TRACE
+ ktrace_free(ip->i_xtrace);
+#endif
+#ifdef XFS_BTREE_TRACE
+ ktrace_free(ip->i_btrace);
+#endif
+#ifdef XFS_RW_TRACE
+ ktrace_free(ip->i_rwtrace);
+#endif
+#ifdef XFS_ILOCK_TRACE
+ ktrace_free(ip->i_lock_trace);
+#endif
+#ifdef XFS_DIR2_TRACE
+ ktrace_free(ip->i_dir_trace);
+#endif
+
+ if (ip->i_itemp) {
+ /*
+ * Only if we are shutting down the fs will we see an
+ * inode still in the AIL. If it is there, we should remove
+ * it to prevent a use-after-free from occurring.
+ */
+ xfs_log_item_t *lip = &ip->i_itemp->ili_item;
+ struct xfs_ail *ailp = lip->li_ailp;
+
+ ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
+ XFS_FORCED_SHUTDOWN(ip->i_mount));
+ if (lip->li_flags & XFS_LI_IN_AIL) {
+ spin_lock(&ailp->xa_lock);
+ if (lip->li_flags & XFS_LI_IN_AIL)
+ xfs_trans_ail_delete(ailp, lip);
+ else
+ spin_unlock(&ailp->xa_lock);
+ }
+ xfs_inode_item_destroy(ip);
+ ip->i_itemp = NULL;
+ }
+
+ /* asserts to verify all state is correct here */
+ ASSERT(atomic_read(&ip->i_iocount) == 0);
+ ASSERT(atomic_read(&ip->i_pincount) == 0);
+ ASSERT(!spin_is_locked(&ip->i_flags_lock));
+ ASSERT(completion_done(&ip->i_flush));
+
+ kmem_zone_free(xfs_inode_zone, ip);
+}
+
/*
* Check the validity of the inode we just found it the cache
*/
@@ -167,7 +225,7 @@ xfs_iget_cache_hit(
* errors cleanly, then tag it so it can be set up correctly
* later.
*/
- if (!inode_init_always(mp->m_super, VFS_I(ip))) {
+ if (inode_init_always(mp->m_super, VFS_I(ip))) {
error = ENOMEM;
goto out_error;
}
@@ -299,7 +357,8 @@ out_preload_end:
if (lock_flags)
xfs_iunlock(ip, lock_flags);
out_destroy:
- xfs_destroy_inode(ip);
+ __destroy_inode(VFS_I(ip));
+ xfs_inode_free(ip);
return error;
}
@@ -504,62 +563,7 @@ xfs_ireclaim(
xfs_qm_dqdetach(ip);
xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
- switch (ip->i_d.di_mode & S_IFMT) {
- case S_IFREG:
- case S_IFDIR:
- case S_IFLNK:
- xfs_idestroy_fork(ip, XFS_DATA_FORK);
- break;
- }
-
- if (ip->i_afp)
- xfs_idestroy_fork(ip, XFS_ATTR_FORK);
-
-#ifdef XFS_INODE_TRACE
- ktrace_free(ip->i_trace);
-#endif
-#ifdef XFS_BMAP_TRACE
- ktrace_free(ip->i_xtrace);
-#endif
-#ifdef XFS_BTREE_TRACE
- ktrace_free(ip->i_btrace);
-#endif
-#ifdef XFS_RW_TRACE
- ktrace_free(ip->i_rwtrace);
-#endif
-#ifdef XFS_ILOCK_TRACE
- ktrace_free(ip->i_lock_trace);
-#endif
-#ifdef XFS_DIR2_TRACE
- ktrace_free(ip->i_dir_trace);
-#endif
- if (ip->i_itemp) {
- /*
- * Only if we are shutting down the fs will we see an
- * inode still in the AIL. If it is there, we should remove
- * it to prevent a use-after-free from occurring.
- */
- xfs_log_item_t *lip = &ip->i_itemp->ili_item;
- struct xfs_ail *ailp = lip->li_ailp;
-
- ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
- XFS_FORCED_SHUTDOWN(ip->i_mount));
- if (lip->li_flags & XFS_LI_IN_AIL) {
- spin_lock(&ailp->xa_lock);
- if (lip->li_flags & XFS_LI_IN_AIL)
- xfs_trans_ail_delete(ailp, lip);
- else
- spin_unlock(&ailp->xa_lock);
- }
- xfs_inode_item_destroy(ip);
- ip->i_itemp = NULL;
- }
- /* asserts to verify all state is correct here */
- ASSERT(atomic_read(&ip->i_iocount) == 0);
- ASSERT(atomic_read(&ip->i_pincount) == 0);
- ASSERT(!spin_is_locked(&ip->i_flags_lock));
- ASSERT(completion_done(&ip->i_flush));
- kmem_zone_free(xfs_inode_zone, ip);
+ xfs_inode_free(ip);
}
/*
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 1f22d65..da428b3 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -343,6 +343,16 @@ xfs_iformat(
return XFS_ERROR(EFSCORRUPTED);
}
+ if (unlikely((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) &&
+ !ip->i_mount->m_rtdev_targp)) {
+ xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
+ "corrupt dinode %Lu, has realtime flag set.",
+ ip->i_ino);
+ XFS_CORRUPTION_ERROR("xfs_iformat(realtime)",
+ XFS_ERRLEVEL_LOW, ip->i_mount, dip);
+ return XFS_ERROR(EFSCORRUPTED);
+ }
+
switch (ip->i_d.di_mode & S_IFMT) {
case S_IFIFO:
case S_IFCHR:
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 1804f86..65f24a3 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -310,23 +310,6 @@ static inline struct inode *VFS_I(struct xfs_inode *ip)
}
/*
- * Get rid of a partially initialized inode.
- *
- * We have to go through destroy_inode to make sure allocations
- * from init_inode_always like the security data are undone.
- *
- * We mark the inode bad so that it takes the short cut in
- * the reclaim path instead of going through the flush path
- * which doesn't make sense for an inode that has never seen the
- * light of day.
- */
-static inline void xfs_destroy_inode(struct xfs_inode *ip)
-{
- make_bad_inode(VFS_I(ip));
- return destroy_inode(VFS_I(ip));
-}
-
-/*
* i_flags helper functions
*/
static inline void
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 3750f04..9dbdff3 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -3180,7 +3180,7 @@ try_again:
STATIC void
xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog)
{
- ASSERT(spin_is_locked(&log->l_icloglock));
+ assert_spin_locked(&log->l_icloglock);
if (iclog->ic_state == XLOG_STATE_ACTIVE) {
xlog_state_switch_iclogs(log, iclog, 0);
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index c4eca5e..492d75b 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -538,7 +538,9 @@ xfs_readlink_bmap(
d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock);
byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount);
- bp = xfs_buf_read(mp->m_ddev_targp, d, BTOBB(byte_cnt), 0);
+ bp = xfs_buf_read_flags(mp->m_ddev_targp, d, BTOBB(byte_cnt),
+ XBF_LOCK | XBF_MAPPED |
+ XBF_DONT_BLOCK);
error = XFS_BUF_GETERROR(bp);
if (error) {
xfs_ioerror_alert("xfs_readlink",
OpenPOWER on IntegriCloud