summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2010-05-21 21:27:26 +0200
committerJens Axboe <jens.axboe@oracle.com>2010-05-21 21:27:26 +0200
commitee9a3607fb03e804ddf624544105f4e34260c380 (patch)
treece41b6e0fa10982a306f6c142a92dbf3c9961284 /fs
parentb492e95be0ae672922f4734acf3f5d35c30be948 (diff)
parentd515e86e639890b33a09390d062b0831664f04a2 (diff)
downloadop-kernel-dev-ee9a3607fb03e804ddf624544105f4e34260c380.zip
op-kernel-dev-ee9a3607fb03e804ddf624544105f4e34260c380.tar.gz
Merge branch 'master' into for-2.6.35
Conflicts: fs/ext3/fsync.c Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/autofs4/root.c5
-rw-r--r--fs/btrfs/ioctl.c5
-rw-r--r--fs/cachefiles/internal.h1
-rw-r--r--fs/cachefiles/namei.c98
-rw-r--r--fs/cachefiles/security.c4
-rw-r--r--fs/ceph/addr.c8
-rw-r--r--fs/ceph/auth.c1
-rw-r--r--fs/ceph/auth_none.h2
-rw-r--r--fs/ceph/auth_x.c32
-rw-r--r--fs/ceph/caps.c21
-rw-r--r--fs/ceph/dir.c9
-rw-r--r--fs/ceph/file.c3
-rw-r--r--fs/ceph/inode.c8
-rw-r--r--fs/ceph/mds_client.c34
-rw-r--r--fs/ceph/messenger.c39
-rw-r--r--fs/ceph/messenger.h1
-rw-r--r--fs/ceph/osd_client.c26
-rw-r--r--fs/ceph/osd_client.h3
-rw-r--r--fs/ceph/osdmap.c29
-rw-r--r--fs/ceph/osdmap.h2
-rw-r--r--fs/ceph/rados.h1
-rw-r--r--fs/ceph/snap.c24
-rw-r--r--fs/ceph/super.c30
-rw-r--r--fs/ceph/super.h1
-rw-r--r--fs/cifs/asn1.c103
-rw-r--r--fs/cifs/cifs_debug.c48
-rw-r--r--fs/cifs/cifs_debug.h42
-rw-r--r--fs/cifs/cifs_dfs_ref.c34
-rw-r--r--fs/cifs/cifs_spnego.c6
-rw-r--r--fs/cifs/cifs_unicode.c5
-rw-r--r--fs/cifs/cifsacl.c76
-rw-r--r--fs/cifs/cifsencrypt.c10
-rw-r--r--fs/cifs/cifsfs.c163
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsglob.h10
-rw-r--r--fs/cifs/cifsproto.h30
-rw-r--r--fs/cifs/cifssmb.c437
-rw-r--r--fs/cifs/connect.c639
-rw-r--r--fs/cifs/dir.c91
-rw-r--r--fs/cifs/dns_resolve.c16
-rw-r--r--fs/cifs/export.c2
-rw-r--r--fs/cifs/file.c222
-rw-r--r--fs/cifs/inode.c144
-rw-r--r--fs/cifs/ioctl.c10
-rw-r--r--fs/cifs/link.c10
-rw-r--r--fs/cifs/misc.c81
-rw-r--r--fs/cifs/netmisc.c16
-rw-r--r--fs/cifs/readdir.c85
-rw-r--r--fs/cifs/sess.c81
-rw-r--r--fs/cifs/transport.c92
-rw-r--r--fs/cifs/xattr.c40
-rw-r--r--fs/compat.c2
-rw-r--r--fs/compat_ioctl.c3
-rw-r--r--fs/configfs/dir.c4
-rw-r--r--fs/dlm/lock.c5
-rw-r--r--fs/dlm/user.c88
-rw-r--r--fs/eventpoll.c3
-rw-r--r--fs/exec.c2
-rw-r--r--fs/exofs/exofs.h2
-rw-r--r--fs/ext2/balloc.c6
-rw-r--r--fs/ext2/ialloc.c9
-rw-r--r--fs/ext2/inode.c7
-rw-r--r--fs/ext2/super.c99
-rw-r--r--fs/ext2/xattr.c2
-rw-r--r--fs/ext3/balloc.c6
-rw-r--r--fs/ext3/fsync.c20
-rw-r--r--fs/ext3/inode.c2
-rw-r--r--fs/ext3/super.c77
-rw-r--r--fs/ext4/inode.c2
-rw-r--r--fs/fcntl.c66
-rw-r--r--fs/gfs2/aops.c8
-rw-r--r--fs/gfs2/bmap.c17
-rw-r--r--fs/gfs2/dir.c2
-rw-r--r--fs/gfs2/export.c2
-rw-r--r--fs/gfs2/glock.c3
-rw-r--r--fs/gfs2/incore.h11
-rw-r--r--fs/gfs2/inode.c101
-rw-r--r--fs/gfs2/inode.h5
-rw-r--r--fs/gfs2/log.c158
-rw-r--r--fs/gfs2/log.h1
-rw-r--r--fs/gfs2/lops.c2
-rw-r--r--fs/gfs2/main.c2
-rw-r--r--fs/gfs2/meta_io.c5
-rw-r--r--fs/gfs2/ops_fstype.c19
-rw-r--r--fs/gfs2/quota.c114
-rw-r--r--fs/gfs2/rgrp.c68
-rw-r--r--fs/gfs2/super.c11
-rw-r--r--fs/gfs2/sys.c6
-rw-r--r--fs/gfs2/trans.c18
-rw-r--r--fs/inode.c2
-rw-r--r--fs/jbd/commit.c8
-rw-r--r--fs/jbd/journal.c33
-rw-r--r--fs/jbd2/journal.c2
-rw-r--r--fs/jffs2/background.c3
-rw-r--r--fs/jffs2/erase.c12
-rw-r--r--fs/jffs2/fs.c10
-rw-r--r--fs/jffs2/gc.c17
-rw-r--r--fs/jffs2/nodelist.h10
-rw-r--r--fs/jffs2/nodemgmt.c28
-rw-r--r--fs/jffs2/os-linux.h3
-rw-r--r--fs/jffs2/scan.c4
-rw-r--r--fs/jffs2/super.c2
-rw-r--r--fs/jffs2/wbuf.c8
-rw-r--r--fs/jfs/file.c2
-rw-r--r--fs/jfs/jfs_dmap.c2
-rw-r--r--fs/jfs/super.c13
-rw-r--r--fs/libfs.c35
-rw-r--r--fs/logfs/dev_bdev.c6
-rw-r--r--fs/logfs/dev_mtd.c26
-rw-r--r--fs/logfs/dir.c2
-rw-r--r--fs/logfs/file.c16
-rw-r--r--fs/logfs/gc.c49
-rw-r--r--fs/logfs/inode.c6
-rw-r--r--fs/logfs/journal.c7
-rw-r--r--fs/logfs/logfs.h15
-rw-r--r--fs/logfs/logfs_abi.h10
-rw-r--r--fs/logfs/readwrite.c19
-rw-r--r--fs/logfs/segment.c7
-rw-r--r--fs/logfs/super.c22
-rw-r--r--fs/namei.c27
-rw-r--r--fs/namespace.c19
-rw-r--r--fs/nfs/client.c57
-rw-r--r--fs/nfs/delegation.c88
-rw-r--r--fs/nfs/dir.c145
-rw-r--r--fs/nfs/file.c15
-rw-r--r--fs/nfs/fscache.c3
-rw-r--r--fs/nfs/getroot.c191
-rw-r--r--fs/nfs/inode.c58
-rw-r--r--fs/nfs/internal.h4
-rw-r--r--fs/nfs/iostat.h6
-rw-r--r--fs/nfs/namespace.c20
-rw-r--r--fs/nfs/nfs3acl.c23
-rw-r--r--fs/nfs/nfs3proc.c128
-rw-r--r--fs/nfs/nfs3xdr.c2
-rw-r--r--fs/nfs/nfs4_fs.h8
-rw-r--r--fs/nfs/nfs4namespace.c12
-rw-r--r--fs/nfs/nfs4proc.c177
-rw-r--r--fs/nfs/nfs4state.c36
-rw-r--r--fs/nfs/nfs4xdr.c22
-rw-r--r--fs/nfs/nfsroot.c14
-rw-r--r--fs/nfs/pagelist.c14
-rw-r--r--fs/nfs/proc.c144
-rw-r--r--fs/nfs/read.c4
-rw-r--r--fs/nfs/super.c150
-rw-r--r--fs/nfs/unlink.c4
-rw-r--r--fs/nfs/write.c55
-rw-r--r--fs/nfsd/export.c44
-rw-r--r--fs/nfsd/nfs4callback.c140
-rw-r--r--fs/nfsd/nfs4proc.c50
-rw-r--r--fs/nfsd/nfs4state.c376
-rw-r--r--fs/nfsd/nfs4xdr.c27
-rw-r--r--fs/nfsd/nfsctl.c64
-rw-r--r--fs/nfsd/nfsd.h6
-rw-r--r--fs/nfsd/nfssvc.c2
-rw-r--r--fs/nfsd/state.h47
-rw-r--r--fs/nfsd/vfs.c8
-rw-r--r--fs/nfsd/vfs.h1
-rw-r--r--fs/nfsd/xdr4.h11
-rw-r--r--fs/nilfs2/alloc.c154
-rw-r--r--fs/nilfs2/alloc.h7
-rw-r--r--fs/nilfs2/btree.c91
-rw-r--r--fs/nilfs2/btree.h23
-rw-r--r--fs/nilfs2/inode.c4
-rw-r--r--fs/nilfs2/recovery.c2
-rw-r--r--fs/nilfs2/segbuf.c70
-rw-r--r--fs/nilfs2/segbuf.h10
-rw-r--r--fs/nilfs2/segment.c157
-rw-r--r--fs/nilfs2/segment.h6
-rw-r--r--fs/nilfs2/super.c219
-rw-r--r--fs/nilfs2/the_nilfs.c10
-rw-r--r--fs/notify/inotify/Kconfig1
-rw-r--r--fs/notify/inotify/inotify_fsnotify.c2
-rw-r--r--fs/notify/inotify/inotify_user.c16
-rw-r--r--fs/ocfs2/Makefile1
-rw-r--r--fs/ocfs2/alloc.c908
-rw-r--r--fs/ocfs2/alloc.h12
-rw-r--r--fs/ocfs2/aops.c3
-rw-r--r--fs/ocfs2/buffer_head_io.c2
-rw-r--r--fs/ocfs2/cluster/masklog.c1
-rw-r--r--fs/ocfs2/cluster/masklog.h1
-rw-r--r--fs/ocfs2/cluster/tcp.c3
-rw-r--r--fs/ocfs2/dir.c75
-rw-r--r--fs/ocfs2/dlm/dlmast.c13
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h4
-rw-r--r--fs/ocfs2/dlm/dlmconvert.c4
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c28
-rw-r--r--fs/ocfs2/dlm/dlmlock.c6
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c30
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c27
-rw-r--r--fs/ocfs2/dlm/dlmthread.c16
-rw-r--r--fs/ocfs2/dlm/dlmunlock.c3
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c14
-rw-r--r--fs/ocfs2/dlmglue.c3
-rw-r--r--fs/ocfs2/file.c268
-rw-r--r--fs/ocfs2/inode.c113
-rw-r--r--fs/ocfs2/inode.h4
-rw-r--r--fs/ocfs2/journal.c26
-rw-r--r--fs/ocfs2/journal.h15
-rw-r--r--fs/ocfs2/localalloc.c275
-rw-r--r--fs/ocfs2/localalloc.h3
-rw-r--r--fs/ocfs2/mmap.c48
-rw-r--r--fs/ocfs2/namei.c149
-rw-r--r--fs/ocfs2/ocfs2.h22
-rw-r--r--fs/ocfs2/ocfs2_fs.h144
-rw-r--r--fs/ocfs2/quota.h12
-rw-r--r--fs/ocfs2/quota_global.c351
-rw-r--r--fs/ocfs2/quota_local.c183
-rw-r--r--fs/ocfs2/refcounttree.c77
-rw-r--r--fs/ocfs2/refcounttree.h4
-rw-r--r--fs/ocfs2/reservations.c847
-rw-r--r--fs/ocfs2/reservations.h159
-rw-r--r--fs/ocfs2/resize.c19
-rw-r--r--fs/ocfs2/suballoc.c688
-rw-r--r--fs/ocfs2/suballoc.h21
-rw-r--r--fs/ocfs2/super.c92
-rw-r--r--fs/ocfs2/super.h7
-rw-r--r--fs/ocfs2/xattr.c103
-rw-r--r--fs/omfs/inode.c1
-rw-r--r--fs/proc/array.c3
-rw-r--r--fs/proc/base.c10
-rw-r--r--fs/proc/inode.c4
-rw-r--r--fs/proc/kcore.c1
-rw-r--r--fs/proc/kmsg.c1
-rw-r--r--fs/proc/task_mmu.c19
-rw-r--r--fs/proc/vmcore.c1
-rw-r--r--fs/quota/dquot.c275
-rw-r--r--fs/quota/quota.c63
-rw-r--r--fs/quota/quota_tree.c50
-rw-r--r--fs/quota/quota_tree.h6
-rw-r--r--fs/quota/quota_v1.c4
-rw-r--r--fs/quota/quota_v2.c6
-rw-r--r--fs/ramfs/inode.c2
-rw-r--r--fs/reiserfs/inode.c3
-rw-r--r--fs/sysfs/bin.c26
-rw-r--r--fs/sysfs/dir.c114
-rw-r--r--fs/sysfs/file.c17
-rw-r--r--fs/sysfs/group.c6
-rw-r--r--fs/sysfs/inode.c6
-rw-r--r--fs/sysfs/mount.c95
-rw-r--r--fs/sysfs/symlink.c36
-rw-r--r--fs/sysfs/sysfs.h34
-rw-r--r--fs/sysv/dir.c2
-rw-r--r--fs/timerfd.c25
-rw-r--r--fs/ubifs/io.c1
-rw-r--r--fs/udf/dir.c2
-rw-r--r--fs/udf/file.c45
-rw-r--r--fs/udf/udfdecl.h3
-rw-r--r--fs/ufs/inode.c2
-rw-r--r--fs/ufs/namei.c2
-rw-r--r--fs/ufs/symlink.c8
-rw-r--r--fs/ufs/truncate.c10
-rw-r--r--fs/ufs/ufs.h2
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c231
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c27
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.h2
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl32.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c5
-rw-r--r--fs/xfs/linux-2.6/xfs_quotaops.c8
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c15
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c203
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.h7
-rw-r--r--fs/xfs/linux-2.6/xfs_trace.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_trace.h150
-rw-r--r--fs/xfs/quota/xfs_dquot.c193
-rw-r--r--fs/xfs/quota/xfs_dquot.h35
-rw-r--r--fs/xfs/quota/xfs_dquot_item.c30
-rw-r--r--fs/xfs/quota/xfs_qm.c609
-rw-r--r--fs/xfs/quota/xfs_qm.h23
-rw-r--r--fs/xfs/quota/xfs_qm_stats.c2
-rw-r--r--fs/xfs/quota/xfs_qm_syscalls.c165
-rw-r--r--fs/xfs/quota/xfs_quota_priv.h102
-rw-r--r--fs/xfs/quota/xfs_trans_dquot.c29
-rw-r--r--fs/xfs/xfs_ag.h1
-rw-r--r--fs/xfs/xfs_bmap.c2
-rw-r--r--fs/xfs/xfs_buf_item.c55
-rw-r--r--fs/xfs/xfs_buf_item.h2
-rw-r--r--fs/xfs/xfs_error.c30
-rw-r--r--fs/xfs/xfs_error.h9
-rw-r--r--fs/xfs/xfs_extfree_item.c18
-rw-r--r--fs/xfs/xfs_inode.c2
-rw-r--r--fs/xfs/xfs_inode_item.c21
-rw-r--r--fs/xfs/xfs_iomap.c123
-rw-r--r--fs/xfs/xfs_iomap.h47
-rw-r--r--fs/xfs/xfs_log.c702
-rw-r--r--fs/xfs/xfs_log.h13
-rw-r--r--fs/xfs/xfs_log_priv.h12
-rw-r--r--fs/xfs/xfs_log_recover.c311
-rw-r--r--fs/xfs/xfs_mount.c7
-rw-r--r--fs/xfs/xfs_mount.h1
-rw-r--r--fs/xfs/xfs_quota.h3
-rw-r--r--fs/xfs/xfs_trans.c760
-rw-r--r--fs/xfs/xfs_trans.h14
-rw-r--r--fs/xfs/xfs_trans_buf.c187
295 files changed, 9784 insertions, 7248 deletions
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index 109a6c6..e8e5e63 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -177,8 +177,7 @@ static int try_to_fill_dentry(struct dentry *dentry, int flags)
}
/* Trigger mount for path component or follow link */
} else if (ino->flags & AUTOFS_INF_PENDING ||
- autofs4_need_mount(flags) ||
- current->link_count) {
+ autofs4_need_mount(flags)) {
DPRINTK("waiting for mount name=%.*s",
dentry->d_name.len, dentry->d_name.name);
@@ -262,7 +261,7 @@ static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
spin_unlock(&dcache_lock);
spin_unlock(&sbi->fs_lock);
- status = try_to_fill_dentry(dentry, 0);
+ status = try_to_fill_dentry(dentry, nd->flags);
if (status)
goto out_error;
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index e84ef60..97a9783 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1481,12 +1481,17 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
ret = -EBADF;
goto out_drop_write;
}
+
src = src_file->f_dentry->d_inode;
ret = -EINVAL;
if (src == inode)
goto out_fput;
+ /* the src must be open for reading */
+ if (!(src_file->f_mode & FMODE_READ))
+ goto out_fput;
+
ret = -EISDIR;
if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode))
goto out_fput;
diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
index f7c255f..a8cd821 100644
--- a/fs/cachefiles/internal.h
+++ b/fs/cachefiles/internal.h
@@ -34,6 +34,7 @@ struct cachefiles_object {
loff_t i_size; /* object size */
unsigned long flags;
#define CACHEFILES_OBJECT_ACTIVE 0 /* T if marked active */
+#define CACHEFILES_OBJECT_BURIED 1 /* T if preemptively buried */
atomic_t usage; /* object usage count */
uint8_t type; /* object type */
uint8_t new; /* T if object new */
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index d5db84a1e..f4a7840 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -93,6 +93,59 @@ static noinline void cachefiles_printk_object(struct cachefiles_object *object,
}
/*
+ * mark the owner of a dentry, if there is one, to indicate that that dentry
+ * has been preemptively deleted
+ * - the caller must hold the i_mutex on the dentry's parent as required to
+ * call vfs_unlink(), vfs_rmdir() or vfs_rename()
+ */
+static void cachefiles_mark_object_buried(struct cachefiles_cache *cache,
+ struct dentry *dentry)
+{
+ struct cachefiles_object *object;
+ struct rb_node *p;
+
+ _enter(",'%*.*s'",
+ dentry->d_name.len, dentry->d_name.len, dentry->d_name.name);
+
+ write_lock(&cache->active_lock);
+
+ p = cache->active_nodes.rb_node;
+ while (p) {
+ object = rb_entry(p, struct cachefiles_object, active_node);
+ if (object->dentry > dentry)
+ p = p->rb_left;
+ else if (object->dentry < dentry)
+ p = p->rb_right;
+ else
+ goto found_dentry;
+ }
+
+ write_unlock(&cache->active_lock);
+ _leave(" [no owner]");
+ return;
+
+ /* found the dentry for */
+found_dentry:
+ kdebug("preemptive burial: OBJ%x [%s] %p",
+ object->fscache.debug_id,
+ fscache_object_states[object->fscache.state],
+ dentry);
+
+ if (object->fscache.state < FSCACHE_OBJECT_DYING) {
+ printk(KERN_ERR "\n");
+ printk(KERN_ERR "CacheFiles: Error:"
+ " Can't preemptively bury live object\n");
+ cachefiles_printk_object(object, NULL);
+ } else if (test_and_set_bit(CACHEFILES_OBJECT_BURIED, &object->flags)) {
+ printk(KERN_ERR "CacheFiles: Error:"
+ " Object already preemptively buried\n");
+ }
+
+ write_unlock(&cache->active_lock);
+ _leave(" [owner marked]");
+}
+
+/*
* record the fact that an object is now active
*/
static int cachefiles_mark_object_active(struct cachefiles_cache *cache,
@@ -219,7 +272,8 @@ requeue:
*/
static int cachefiles_bury_object(struct cachefiles_cache *cache,
struct dentry *dir,
- struct dentry *rep)
+ struct dentry *rep,
+ bool preemptive)
{
struct dentry *grave, *trap;
char nbuffer[8 + 8 + 1];
@@ -229,11 +283,16 @@ static int cachefiles_bury_object(struct cachefiles_cache *cache,
dir->d_name.len, dir->d_name.len, dir->d_name.name,
rep->d_name.len, rep->d_name.len, rep->d_name.name);
+ _debug("remove %p from %p", rep, dir);
+
/* non-directories can just be unlinked */
if (!S_ISDIR(rep->d_inode->i_mode)) {
_debug("unlink stale object");
ret = vfs_unlink(dir->d_inode, rep);
+ if (preemptive)
+ cachefiles_mark_object_buried(cache, rep);
+
mutex_unlock(&dir->d_inode->i_mutex);
if (ret == -EIO)
@@ -325,6 +384,9 @@ try_again:
if (ret != 0 && ret != -ENOMEM)
cachefiles_io_error(cache, "Rename failed with error %d", ret);
+ if (preemptive)
+ cachefiles_mark_object_buried(cache, rep);
+
unlock_rename(cache->graveyard, dir);
dput(grave);
_leave(" = 0");
@@ -340,7 +402,7 @@ int cachefiles_delete_object(struct cachefiles_cache *cache,
struct dentry *dir;
int ret;
- _enter(",{%p}", object->dentry);
+ _enter(",OBJ%x{%p}", object->fscache.debug_id, object->dentry);
ASSERT(object->dentry);
ASSERT(object->dentry->d_inode);
@@ -350,15 +412,25 @@ int cachefiles_delete_object(struct cachefiles_cache *cache,
mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
- /* we need to check that our parent is _still_ our parent - it may have
- * been renamed */
- if (dir == object->dentry->d_parent) {
- ret = cachefiles_bury_object(cache, dir, object->dentry);
- } else {
- /* it got moved, presumably by cachefilesd culling it, so it's
- * no longer in the key path and we can ignore it */
+ if (test_bit(CACHEFILES_OBJECT_BURIED, &object->flags)) {
+ /* object allocation for the same key preemptively deleted this
+ * object's file so that it could create its own file */
+ _debug("object preemptively buried");
mutex_unlock(&dir->d_inode->i_mutex);
ret = 0;
+ } else {
+ /* we need to check that our parent is _still_ our parent - it
+ * may have been renamed */
+ if (dir == object->dentry->d_parent) {
+ ret = cachefiles_bury_object(cache, dir,
+ object->dentry, false);
+ } else {
+ /* it got moved, presumably by cachefilesd culling it,
+ * so it's no longer in the key path and we can ignore
+ * it */
+ mutex_unlock(&dir->d_inode->i_mutex);
+ ret = 0;
+ }
}
dput(dir);
@@ -381,7 +453,9 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent,
const char *name;
int ret, nlen;
- _enter("{%p},,%s,", parent->dentry, key);
+ _enter("OBJ%x{%p},OBJ%x,%s,",
+ parent->fscache.debug_id, parent->dentry,
+ object->fscache.debug_id, key);
cache = container_of(parent->fscache.cache,
struct cachefiles_cache, cache);
@@ -509,7 +583,7 @@ lookup_again:
* mutex) */
object->dentry = NULL;
- ret = cachefiles_bury_object(cache, dir, next);
+ ret = cachefiles_bury_object(cache, dir, next, true);
dput(next);
next = NULL;
@@ -828,7 +902,7 @@ int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir,
/* actually remove the victim (drops the dir mutex) */
_debug("bury");
- ret = cachefiles_bury_object(cache, dir, victim);
+ ret = cachefiles_bury_object(cache, dir, victim, false);
if (ret < 0)
goto error;
diff --git a/fs/cachefiles/security.c b/fs/cachefiles/security.c
index b5808cd..039b501 100644
--- a/fs/cachefiles/security.c
+++ b/fs/cachefiles/security.c
@@ -77,6 +77,8 @@ static int cachefiles_check_cache_dir(struct cachefiles_cache *cache,
/*
* check the security details of the on-disk cache
* - must be called with security override in force
+ * - must return with a security override in force - even in the case of an
+ * error
*/
int cachefiles_determine_cache_security(struct cachefiles_cache *cache,
struct dentry *root,
@@ -99,6 +101,8 @@ int cachefiles_determine_cache_security(struct cachefiles_cache *cache,
* which create files */
ret = set_create_files_as(new, root->d_inode);
if (ret < 0) {
+ abort_creds(new);
+ cachefiles_begin_secure(cache, _saved_cred);
_leave(" = %d [cfa]", ret);
return ret;
}
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 4125937..a9005d8 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -504,12 +504,11 @@ static void writepages_finish(struct ceph_osd_request *req,
int i;
struct ceph_snap_context *snapc = req->r_snapc;
struct address_space *mapping = inode->i_mapping;
- struct writeback_control *wbc = req->r_wbc;
__s32 rc = -EIO;
u64 bytes = 0;
struct ceph_client *client = ceph_inode_to_client(inode);
long writeback_stat;
- unsigned issued = __ceph_caps_issued(ci, NULL);
+ unsigned issued = ceph_caps_issued(ci);
/* parse reply */
replyhead = msg->front.iov_base;
@@ -546,10 +545,6 @@ static void writepages_finish(struct ceph_osd_request *req,
clear_bdi_congested(&client->backing_dev_info,
BLK_RW_ASYNC);
- if (i >= wrote) {
- dout("inode %p skipping page %p\n", inode, page);
- wbc->pages_skipped++;
- }
ceph_put_snap_context((void *)page->private);
page->private = 0;
ClearPagePrivate(page);
@@ -799,7 +794,6 @@ get_more_pages:
alloc_page_vec(client, req);
req->r_callback = writepages_finish;
req->r_inode = inode;
- req->r_wbc = wbc;
}
/* note position of first page in pvec */
diff --git a/fs/ceph/auth.c b/fs/ceph/auth.c
index f6394b9..818afe7 100644
--- a/fs/ceph/auth.c
+++ b/fs/ceph/auth.c
@@ -3,6 +3,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/err.h>
+#include <linux/slab.h>
#include "types.h"
#include "auth_none.h"
diff --git a/fs/ceph/auth_none.h b/fs/ceph/auth_none.h
index 56c0553..8164df1 100644
--- a/fs/ceph/auth_none.h
+++ b/fs/ceph/auth_none.h
@@ -1,6 +1,8 @@
#ifndef _FS_CEPH_AUTH_NONE_H
#define _FS_CEPH_AUTH_NONE_H
+#include <linux/slab.h>
+
#include "auth.h"
/*
diff --git a/fs/ceph/auth_x.c b/fs/ceph/auth_x.c
index d9001a4..fee5a08d 100644
--- a/fs/ceph/auth_x.c
+++ b/fs/ceph/auth_x.c
@@ -12,8 +12,6 @@
#include "auth.h"
#include "decode.h"
-struct kmem_cache *ceph_x_ticketbuf_cachep;
-
#define TEMP_TICKET_BUF_LEN 256
static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed);
@@ -131,13 +129,12 @@ static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
char *ticket_buf;
u8 struct_v;
- dbuf = kmem_cache_alloc(ceph_x_ticketbuf_cachep, GFP_NOFS | GFP_ATOMIC);
+ dbuf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS);
if (!dbuf)
return -ENOMEM;
ret = -ENOMEM;
- ticket_buf = kmem_cache_alloc(ceph_x_ticketbuf_cachep,
- GFP_NOFS | GFP_ATOMIC);
+ ticket_buf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS);
if (!ticket_buf)
goto out_dbuf;
@@ -251,9 +248,9 @@ static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
ret = 0;
out:
- kmem_cache_free(ceph_x_ticketbuf_cachep, ticket_buf);
+ kfree(ticket_buf);
out_dbuf:
- kmem_cache_free(ceph_x_ticketbuf_cachep, dbuf);
+ kfree(dbuf);
return ret;
bad:
@@ -605,8 +602,6 @@ static void ceph_x_destroy(struct ceph_auth_client *ac)
remove_ticket_handler(ac, th);
}
- kmem_cache_destroy(ceph_x_ticketbuf_cachep);
-
kfree(ac->private);
ac->private = NULL;
}
@@ -641,26 +636,20 @@ int ceph_x_init(struct ceph_auth_client *ac)
int ret;
dout("ceph_x_init %p\n", ac);
+ ret = -ENOMEM;
xi = kzalloc(sizeof(*xi), GFP_NOFS);
if (!xi)
- return -ENOMEM;
+ goto out;
- ret = -ENOMEM;
- ceph_x_ticketbuf_cachep = kmem_cache_create("ceph_x_ticketbuf",
- TEMP_TICKET_BUF_LEN, 8,
- (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD),
- NULL);
- if (!ceph_x_ticketbuf_cachep)
- goto done_nomem;
ret = -EINVAL;
if (!ac->secret) {
pr_err("no secret set (for auth_x protocol)\n");
- goto done_nomem;
+ goto out_nomem;
}
ret = ceph_crypto_key_unarmor(&xi->secret, ac->secret);
if (ret)
- goto done_nomem;
+ goto out_nomem;
xi->starting = true;
xi->ticket_handlers = RB_ROOT;
@@ -670,10 +659,9 @@ int ceph_x_init(struct ceph_auth_client *ac)
ac->ops = &ceph_x_ops;
return 0;
-done_nomem:
+out_nomem:
kfree(xi);
- if (ceph_x_ticketbuf_cachep)
- kmem_cache_destroy(ceph_x_ticketbuf_cachep);
+out:
return ret;
}
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index aa2239f..d940053 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -858,6 +858,8 @@ static int __ceph_is_any_caps(struct ceph_inode_info *ci)
}
/*
+ * Remove a cap. Take steps to deal with a racing iterate_session_caps.
+ *
* caller should hold i_lock.
* caller will not hold session s_mutex if called from destroy_inode.
*/
@@ -866,15 +868,10 @@ void __ceph_remove_cap(struct ceph_cap *cap)
struct ceph_mds_session *session = cap->session;
struct ceph_inode_info *ci = cap->ci;
struct ceph_mds_client *mdsc = &ceph_client(ci->vfs_inode.i_sb)->mdsc;
+ int removed = 0;
dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
- /* remove from inode list */
- rb_erase(&cap->ci_node, &ci->i_caps);
- cap->ci = NULL;
- if (ci->i_auth_cap == cap)
- ci->i_auth_cap = NULL;
-
/* remove from session list */
spin_lock(&session->s_cap_lock);
if (session->s_cap_iterator == cap) {
@@ -885,10 +882,18 @@ void __ceph_remove_cap(struct ceph_cap *cap)
list_del_init(&cap->session_caps);
session->s_nr_caps--;
cap->session = NULL;
+ removed = 1;
}
+ /* protect backpointer with s_cap_lock: see iterate_session_caps */
+ cap->ci = NULL;
spin_unlock(&session->s_cap_lock);
- if (cap->session == NULL)
+ /* remove from inode list */
+ rb_erase(&cap->ci_node, &ci->i_caps);
+ if (ci->i_auth_cap == cap)
+ ci->i_auth_cap = NULL;
+
+ if (removed)
ceph_put_cap(cap);
if (!__ceph_is_any_caps(ci) && ci->i_snap_realm) {
@@ -1861,8 +1866,8 @@ static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
} else {
pr_err("%p auth cap %p not mds%d ???\n", inode,
cap, session->s_mds);
- spin_unlock(&inode->i_lock);
}
+ spin_unlock(&inode->i_lock);
}
}
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index ea8ee2e..650d2db 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -880,7 +880,16 @@ static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
* do_request, above). If there is no trace, we need
* to do it here.
*/
+
+ /* d_move screws up d_subdirs order */
+ ceph_i_clear(new_dir, CEPH_I_COMPLETE);
+
d_move(old_dentry, new_dentry);
+
+ /* ensure target dentry is invalidated, despite
+ rehashing bug in vfs_rename_dir */
+ new_dentry->d_time = jiffies;
+ ceph_dentry(new_dentry)->lease_shared_gen = 0;
}
ceph_mdsc_put_request(req);
return err;
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 4add3d5..ed6f197 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -665,7 +665,8 @@ more:
* throw out any page cache pages in this range. this
* may block.
*/
- truncate_inode_pages_range(inode->i_mapping, pos, pos+len);
+ truncate_inode_pages_range(inode->i_mapping, pos,
+ (pos+len) | (PAGE_CACHE_SIZE-1));
} else {
pages = alloc_page_vector(num_pages);
if (IS_ERR(pages)) {
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 26f883c..85b4d2f 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -733,6 +733,10 @@ no_change:
__ceph_get_fmode(ci, cap_fmode);
spin_unlock(&inode->i_lock);
}
+ } else if (cap_fmode >= 0) {
+ pr_warning("mds issued no caps on %llx.%llx\n",
+ ceph_vinop(inode));
+ __ceph_get_fmode(ci, cap_fmode);
}
/* update delegation info? */
@@ -997,6 +1001,10 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
dn, dn->d_name.len, dn->d_name.name);
dout("fill_trace doing d_move %p -> %p\n",
req->r_old_dentry, dn);
+
+ /* d_move screws up d_subdirs order */
+ ceph_i_clear(dir, CEPH_I_COMPLETE);
+
d_move(req->r_old_dentry, dn);
dout(" src %p '%.*s' dst %p '%.*s'\n",
req->r_old_dentry,
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 60a9a4a..24561a5 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -736,9 +736,10 @@ static void cleanup_cap_releases(struct ceph_mds_session *session)
}
/*
- * Helper to safely iterate over all caps associated with a session.
+ * Helper to safely iterate over all caps associated with a session, with
+ * special care taken to handle a racing __ceph_remove_cap().
*
- * caller must hold session s_mutex
+ * Caller must hold session s_mutex.
*/
static int iterate_session_caps(struct ceph_mds_session *session,
int (*cb)(struct inode *, struct ceph_cap *,
@@ -2136,7 +2137,7 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc, int mds)
struct ceph_mds_session *session = NULL;
struct ceph_msg *reply;
struct rb_node *p;
- int err;
+ int err = -ENOMEM;
struct ceph_pagelist *pagelist;
pr_info("reconnect to recovering mds%d\n", mds);
@@ -2185,7 +2186,7 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc, int mds)
goto fail;
err = iterate_session_caps(session, encode_caps_cb, pagelist);
if (err < 0)
- goto out;
+ goto fail;
/*
* snaprealms. we provide mds with the ino, seq (version), and
@@ -2213,28 +2214,31 @@ send:
reply->nr_pages = calc_pages_for(0, pagelist->length);
ceph_con_send(&session->s_con, reply);
- if (session) {
- session->s_state = CEPH_MDS_SESSION_OPEN;
- __wake_requests(mdsc, &session->s_waiting);
- }
+ session->s_state = CEPH_MDS_SESSION_OPEN;
+ mutex_unlock(&session->s_mutex);
+
+ mutex_lock(&mdsc->mutex);
+ __wake_requests(mdsc, &session->s_waiting);
+ mutex_unlock(&mdsc->mutex);
+
+ ceph_put_mds_session(session);
-out:
up_read(&mdsc->snap_rwsem);
- if (session) {
- mutex_unlock(&session->s_mutex);
- ceph_put_mds_session(session);
- }
mutex_lock(&mdsc->mutex);
return;
fail:
ceph_msg_put(reply);
+ up_read(&mdsc->snap_rwsem);
+ mutex_unlock(&session->s_mutex);
+ ceph_put_mds_session(session);
fail_nomsg:
ceph_pagelist_release(pagelist);
kfree(pagelist);
fail_nopagelist:
- pr_err("ENOMEM preparing reconnect for mds%d\n", mds);
- goto out;
+ pr_err("error %d preparing reconnect for mds%d\n", err, mds);
+ mutex_lock(&mdsc->mutex);
+ return;
}
diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c
index cdaaa13..cd4fadb 100644
--- a/fs/ceph/messenger.c
+++ b/fs/ceph/messenger.c
@@ -492,7 +492,14 @@ static void prepare_write_message(struct ceph_connection *con)
list_move_tail(&m->list_head, &con->out_sent);
}
- m->hdr.seq = cpu_to_le64(++con->out_seq);
+ /*
+ * only assign outgoing seq # if we haven't sent this message
+ * yet. if it is requeued, resend with it's original seq.
+ */
+ if (m->needs_out_seq) {
+ m->hdr.seq = cpu_to_le64(++con->out_seq);
+ m->needs_out_seq = false;
+ }
dout("prepare_write_message %p seq %lld type %d len %d+%d+%d %d pgs\n",
m, con->out_seq, le16_to_cpu(m->hdr.type),
@@ -1334,6 +1341,7 @@ static int read_partial_message(struct ceph_connection *con)
unsigned front_len, middle_len, data_len, data_off;
int datacrc = con->msgr->nocrc;
int skip;
+ u64 seq;
dout("read_partial_message con %p msg %p\n", con, m);
@@ -1368,6 +1376,25 @@ static int read_partial_message(struct ceph_connection *con)
return -EIO;
data_off = le16_to_cpu(con->in_hdr.data_off);
+ /* verify seq# */
+ seq = le64_to_cpu(con->in_hdr.seq);
+ if ((s64)seq - (s64)con->in_seq < 1) {
+ pr_info("skipping %s%lld %s seq %lld, expected %lld\n",
+ ENTITY_NAME(con->peer_name),
+ pr_addr(&con->peer_addr.in_addr),
+ seq, con->in_seq + 1);
+ con->in_base_pos = -front_len - middle_len - data_len -
+ sizeof(m->footer);
+ con->in_tag = CEPH_MSGR_TAG_READY;
+ con->in_seq++;
+ return 0;
+ } else if ((s64)seq - (s64)con->in_seq > 1) {
+ pr_err("read_partial_message bad seq %lld expected %lld\n",
+ seq, con->in_seq + 1);
+ con->error_msg = "bad message sequence # for incoming message";
+ return -EBADMSG;
+ }
+
/* allocate message? */
if (!con->in_msg) {
dout("got hdr type %d front %d data %d\n", con->in_hdr.type,
@@ -1379,6 +1406,7 @@ static int read_partial_message(struct ceph_connection *con)
con->in_base_pos = -front_len - middle_len - data_len -
sizeof(m->footer);
con->in_tag = CEPH_MSGR_TAG_READY;
+ con->in_seq++;
return 0;
}
if (IS_ERR(con->in_msg)) {
@@ -1965,6 +1993,8 @@ void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len));
+ msg->needs_out_seq = true;
+
/* queue */
mutex_lock(&con->mutex);
BUG_ON(!list_empty(&msg->list_head));
@@ -2030,6 +2060,7 @@ void ceph_con_revoke_message(struct ceph_connection *con, struct ceph_msg *msg)
ceph_msg_put(con->in_msg);
con->in_msg = NULL;
con->in_tag = CEPH_MSGR_TAG_READY;
+ con->in_seq++;
} else {
dout("con_revoke_pages %p msg %p pages %p no-op\n",
con, con->in_msg, msg);
@@ -2063,15 +2094,19 @@ struct ceph_msg *ceph_msg_new(int type, int front_len,
kref_init(&m->kref);
INIT_LIST_HEAD(&m->list_head);
+ m->hdr.tid = 0;
m->hdr.type = cpu_to_le16(type);
+ m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT);
+ m->hdr.version = 0;
m->hdr.front_len = cpu_to_le32(front_len);
m->hdr.middle_len = 0;
m->hdr.data_len = cpu_to_le32(page_len);
m->hdr.data_off = cpu_to_le16(page_off);
- m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT);
+ m->hdr.reserved = 0;
m->footer.front_crc = 0;
m->footer.middle_crc = 0;
m->footer.data_crc = 0;
+ m->footer.flags = 0;
m->front_max = front_len;
m->front_is_vmalloc = false;
m->more_to_follow = false;
diff --git a/fs/ceph/messenger.h b/fs/ceph/messenger.h
index a343dae..a5caf91 100644
--- a/fs/ceph/messenger.h
+++ b/fs/ceph/messenger.h
@@ -86,6 +86,7 @@ struct ceph_msg {
struct kref kref;
bool front_is_vmalloc;
bool more_to_follow;
+ bool needs_out_seq;
int front_max;
struct ceph_msgpool *pool;
diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c
index c7b4ded..3514f71 100644
--- a/fs/ceph/osd_client.c
+++ b/fs/ceph/osd_client.c
@@ -565,7 +565,8 @@ static int __map_osds(struct ceph_osd_client *osdc,
{
struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base;
struct ceph_pg pgid;
- int o = -1;
+ int acting[CEPH_PG_MAX_SIZE];
+ int o = -1, num = 0;
int err;
dout("map_osds %p tid %lld\n", req, req->r_tid);
@@ -576,10 +577,16 @@ static int __map_osds(struct ceph_osd_client *osdc,
pgid = reqhead->layout.ol_pgid;
req->r_pgid = pgid;
- o = ceph_calc_pg_primary(osdc->osdmap, pgid);
+ err = ceph_calc_pg_acting(osdc->osdmap, pgid, acting);
+ if (err > 0) {
+ o = acting[0];
+ num = err;
+ }
if ((req->r_osd && req->r_osd->o_osd == o &&
- req->r_sent >= req->r_osd->o_incarnation) ||
+ req->r_sent >= req->r_osd->o_incarnation &&
+ req->r_num_pg_osds == num &&
+ memcmp(req->r_pg_osds, acting, sizeof(acting[0])*num) == 0) ||
(req->r_osd == NULL && o == -1))
return 0; /* no change */
@@ -587,6 +594,10 @@ static int __map_osds(struct ceph_osd_client *osdc,
req->r_tid, le32_to_cpu(pgid.pool), le16_to_cpu(pgid.ps), o,
req->r_osd ? req->r_osd->o_osd : -1);
+ /* record full pg acting set */
+ memcpy(req->r_pg_osds, acting, sizeof(acting[0]) * num);
+ req->r_num_pg_osds = num;
+
if (req->r_osd) {
__cancel_request(req);
list_del_init(&req->r_osd_item);
@@ -612,7 +623,7 @@ static int __map_osds(struct ceph_osd_client *osdc,
__remove_osd_from_lru(req->r_osd);
list_add(&req->r_osd_item, &req->r_osd->o_requests);
}
- err = 1; /* osd changed */
+ err = 1; /* osd or pg changed */
out:
return err;
@@ -779,16 +790,18 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
struct ceph_osd_request *req;
u64 tid;
int numops, object_len, flags;
+ s32 result;
tid = le64_to_cpu(msg->hdr.tid);
if (msg->front.iov_len < sizeof(*rhead))
goto bad;
numops = le32_to_cpu(rhead->num_ops);
object_len = le32_to_cpu(rhead->object_len);
+ result = le32_to_cpu(rhead->result);
if (msg->front.iov_len != sizeof(*rhead) + object_len +
numops * sizeof(struct ceph_osd_op))
goto bad;
- dout("handle_reply %p tid %llu\n", msg, tid);
+ dout("handle_reply %p tid %llu result %d\n", msg, tid, (int)result);
/* lookup */
mutex_lock(&osdc->request_mutex);
@@ -834,7 +847,8 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
dout("handle_reply tid %llu flags %d\n", tid, flags);
/* either this is a read, or we got the safe response */
- if ((flags & CEPH_OSD_FLAG_ONDISK) ||
+ if (result < 0 ||
+ (flags & CEPH_OSD_FLAG_ONDISK) ||
((flags & CEPH_OSD_FLAG_WRITE) == 0))
__unregister_request(osdc, req);
diff --git a/fs/ceph/osd_client.h b/fs/ceph/osd_client.h
index b075991..ce77698 100644
--- a/fs/ceph/osd_client.h
+++ b/fs/ceph/osd_client.h
@@ -48,6 +48,8 @@ struct ceph_osd_request {
struct list_head r_osd_item;
struct ceph_osd *r_osd;
struct ceph_pg r_pgid;
+ int r_pg_osds[CEPH_PG_MAX_SIZE];
+ int r_num_pg_osds;
struct ceph_connection *r_con_filling_msg;
@@ -66,7 +68,6 @@ struct ceph_osd_request {
struct list_head r_unsafe_item;
struct inode *r_inode; /* for use by callbacks */
- struct writeback_control *r_wbc; /* ditto */
char r_oid[40]; /* object name */
int r_oid_len;
diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c
index 2e2c15e..cfdd8f4 100644
--- a/fs/ceph/osdmap.c
+++ b/fs/ceph/osdmap.c
@@ -1041,12 +1041,33 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
}
/*
+ * Return acting set for given pgid.
+ */
+int ceph_calc_pg_acting(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
+ int *acting)
+{
+ int rawosds[CEPH_PG_MAX_SIZE], *osds;
+ int i, o, num = CEPH_PG_MAX_SIZE;
+
+ osds = calc_pg_raw(osdmap, pgid, rawosds, &num);
+ if (!osds)
+ return -1;
+
+ /* primary is first up osd */
+ o = 0;
+ for (i = 0; i < num; i++)
+ if (ceph_osd_is_up(osdmap, osds[i]))
+ acting[o++] = osds[i];
+ return o;
+}
+
+/*
* Return primary osd for given pgid, or -1 if none.
*/
int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, struct ceph_pg pgid)
{
- int rawosds[10], *osds;
- int i, num = ARRAY_SIZE(rawosds);
+ int rawosds[CEPH_PG_MAX_SIZE], *osds;
+ int i, num = CEPH_PG_MAX_SIZE;
osds = calc_pg_raw(osdmap, pgid, rawosds, &num);
if (!osds)
@@ -1054,9 +1075,7 @@ int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, struct ceph_pg pgid)
/* primary is first up osd */
for (i = 0; i < num; i++)
- if (ceph_osd_is_up(osdmap, osds[i])) {
+ if (ceph_osd_is_up(osdmap, osds[i]))
return osds[i];
- break;
- }
return -1;
}
diff --git a/fs/ceph/osdmap.h b/fs/ceph/osdmap.h
index 8bc9f1e..970b547 100644
--- a/fs/ceph/osdmap.h
+++ b/fs/ceph/osdmap.h
@@ -120,6 +120,8 @@ extern int ceph_calc_object_layout(struct ceph_object_layout *ol,
const char *oid,
struct ceph_file_layout *fl,
struct ceph_osdmap *osdmap);
+extern int ceph_calc_pg_acting(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
+ int *acting);
extern int ceph_calc_pg_primary(struct ceph_osdmap *osdmap,
struct ceph_pg pgid);
diff --git a/fs/ceph/rados.h b/fs/ceph/rados.h
index a1fc1d0..fd56451 100644
--- a/fs/ceph/rados.h
+++ b/fs/ceph/rados.h
@@ -58,6 +58,7 @@ struct ceph_timespec {
#define CEPH_PG_LAYOUT_LINEAR 2
#define CEPH_PG_LAYOUT_HYBRID 3
+#define CEPH_PG_MAX_SIZE 16 /* max # osds in a single pg */
/*
* placement group.
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 2b88126..d5114db 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -869,16 +869,20 @@ skip_inode:
continue;
ci = ceph_inode(inode);
spin_lock(&inode->i_lock);
- if (!ci->i_snap_realm)
- goto split_skip_inode;
- ceph_put_snap_realm(mdsc, ci->i_snap_realm);
- spin_lock(&realm->inodes_with_caps_lock);
- list_add(&ci->i_snap_realm_item,
- &realm->inodes_with_caps);
- ci->i_snap_realm = realm;
- spin_unlock(&realm->inodes_with_caps_lock);
- ceph_get_snap_realm(mdsc, realm);
-split_skip_inode:
+ if (list_empty(&ci->i_snap_realm_item)) {
+ struct ceph_snap_realm *oldrealm =
+ ci->i_snap_realm;
+
+ dout(" moving %p to split realm %llx %p\n",
+ inode, realm->ino, realm);
+ spin_lock(&realm->inodes_with_caps_lock);
+ list_add(&ci->i_snap_realm_item,
+ &realm->inodes_with_caps);
+ ci->i_snap_realm = realm;
+ spin_unlock(&realm->inodes_with_caps_lock);
+ ceph_get_snap_realm(mdsc, realm);
+ ceph_put_snap_realm(mdsc, oldrealm);
+ }
spin_unlock(&inode->i_lock);
iput(inode);
}
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 75d02ea..110857b 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -47,10 +47,20 @@ const char *ceph_file_part(const char *s, int len)
*/
static void ceph_put_super(struct super_block *s)
{
- struct ceph_client *cl = ceph_client(s);
+ struct ceph_client *client = ceph_sb_to_client(s);
dout("put_super\n");
- ceph_mdsc_close_sessions(&cl->mdsc);
+ ceph_mdsc_close_sessions(&client->mdsc);
+
+ /*
+ * ensure we release the bdi before put_anon_super releases
+ * the device name.
+ */
+ if (s->s_bdi == &client->backing_dev_info) {
+ bdi_unregister(&client->backing_dev_info);
+ s->s_bdi = NULL;
+ }
+
return;
}
@@ -636,6 +646,8 @@ static void ceph_destroy_client(struct ceph_client *client)
destroy_workqueue(client->pg_inv_wq);
destroy_workqueue(client->trunc_wq);
+ bdi_destroy(&client->backing_dev_info);
+
if (client->msgr)
ceph_messenger_destroy(client->msgr);
mempool_destroy(client->wb_pagevec_pool);
@@ -876,14 +888,14 @@ static int ceph_register_bdi(struct super_block *sb, struct ceph_client *client)
{
int err;
- sb->s_bdi = &client->backing_dev_info;
-
/* set ra_pages based on rsize mount option? */
if (client->mount_args->rsize >= PAGE_CACHE_SIZE)
client->backing_dev_info.ra_pages =
(client->mount_args->rsize + PAGE_CACHE_SIZE - 1)
>> PAGE_SHIFT;
err = bdi_register_dev(&client->backing_dev_info, sb->s_dev);
+ if (!err)
+ sb->s_bdi = &client->backing_dev_info;
return err;
}
@@ -957,9 +969,6 @@ static void ceph_kill_sb(struct super_block *s)
dout("kill_sb %p\n", s);
ceph_mdsc_pre_umount(&client->mdsc);
kill_anon_super(s); /* will call put_super after sb is r/o */
- if (s->s_bdi == &client->backing_dev_info)
- bdi_unregister(&client->backing_dev_info);
- bdi_destroy(&client->backing_dev_info);
ceph_destroy_client(client);
}
@@ -996,9 +1005,10 @@ static int __init init_ceph(void)
if (ret)
goto out_icache;
- pr_info("loaded %d.%d.%d (mon/mds/osd proto %d/%d/%d)\n",
- CEPH_VERSION_MAJOR, CEPH_VERSION_MINOR, CEPH_VERSION_PATCH,
- CEPH_MONC_PROTOCOL, CEPH_MDSC_PROTOCOL, CEPH_OSDC_PROTOCOL);
+ pr_info("loaded (mon/mds/osd proto %d/%d/%d, osdmap %d/%d %d/%d)\n",
+ CEPH_MONC_PROTOCOL, CEPH_MDSC_PROTOCOL, CEPH_OSDC_PROTOCOL,
+ CEPH_OSDMAP_VERSION, CEPH_OSDMAP_VERSION_EXT,
+ CEPH_OSDMAP_INC_VERSION, CEPH_OSDMAP_INC_VERSION_EXT);
return 0;
out_icache:
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index e30dfbb..13513b8 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -10,6 +10,7 @@
#include <linux/fs.h>
#include <linux/mempool.h>
#include <linux/pagemap.h>
+#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/writeback.h>
#include <linux/slab.h>
diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
index a20bea5..cfd1ce3 100644
--- a/fs/cifs/asn1.c
+++ b/fs/cifs/asn1.c
@@ -492,17 +492,13 @@ compare_oid(unsigned long *oid1, unsigned int oid1len,
int
decode_negTokenInit(unsigned char *security_blob, int length,
- enum securityEnum *secType)
+ struct TCP_Server_Info *server)
{
struct asn1_ctx ctx;
unsigned char *end;
unsigned char *sequence_end;
unsigned long *oid = NULL;
unsigned int cls, con, tag, oidlen, rc;
- bool use_ntlmssp = false;
- bool use_kerberos = false;
- bool use_kerberosu2u = false;
- bool use_mskerberos = false;
/* cifs_dump_mem(" Received SecBlob ", security_blob, length); */
@@ -510,11 +506,11 @@ decode_negTokenInit(unsigned char *security_blob, int length,
/* GSSAPI header */
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
- cFYI(1, ("Error decoding negTokenInit header"));
+ cFYI(1, "Error decoding negTokenInit header");
return 0;
} else if ((cls != ASN1_APL) || (con != ASN1_CON)
|| (tag != ASN1_EOC)) {
- cFYI(1, ("cls = %d con = %d tag = %d", cls, con, tag));
+ cFYI(1, "cls = %d con = %d tag = %d", cls, con, tag);
return 0;
}
@@ -535,56 +531,52 @@ decode_negTokenInit(unsigned char *security_blob, int length,
/* SPNEGO OID not present or garbled -- bail out */
if (!rc) {
- cFYI(1, ("Error decoding negTokenInit header"));
+ cFYI(1, "Error decoding negTokenInit header");
return 0;
}
/* SPNEGO */
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
- cFYI(1, ("Error decoding negTokenInit"));
+ cFYI(1, "Error decoding negTokenInit");
return 0;
} else if ((cls != ASN1_CTX) || (con != ASN1_CON)
|| (tag != ASN1_EOC)) {
- cFYI(1,
- ("cls = %d con = %d tag = %d end = %p (%d) exit 0",
- cls, con, tag, end, *end));
+ cFYI(1, "cls = %d con = %d tag = %d end = %p (%d) exit 0",
+ cls, con, tag, end, *end);
return 0;
}
/* negTokenInit */
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
- cFYI(1, ("Error decoding negTokenInit"));
+ cFYI(1, "Error decoding negTokenInit");
return 0;
} else if ((cls != ASN1_UNI) || (con != ASN1_CON)
|| (tag != ASN1_SEQ)) {
- cFYI(1,
- ("cls = %d con = %d tag = %d end = %p (%d) exit 1",
- cls, con, tag, end, *end));
+ cFYI(1, "cls = %d con = %d tag = %d end = %p (%d) exit 1",
+ cls, con, tag, end, *end);
return 0;
}
/* sequence */
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
- cFYI(1, ("Error decoding 2nd part of negTokenInit"));
+ cFYI(1, "Error decoding 2nd part of negTokenInit");
return 0;
} else if ((cls != ASN1_CTX) || (con != ASN1_CON)
|| (tag != ASN1_EOC)) {
- cFYI(1,
- ("cls = %d con = %d tag = %d end = %p (%d) exit 0",
- cls, con, tag, end, *end));
+ cFYI(1, "cls = %d con = %d tag = %d end = %p (%d) exit 0",
+ cls, con, tag, end, *end);
return 0;
}
/* sequence of */
if (asn1_header_decode
(&ctx, &sequence_end, &cls, &con, &tag) == 0) {
- cFYI(1, ("Error decoding 2nd part of negTokenInit"));
+ cFYI(1, "Error decoding 2nd part of negTokenInit");
return 0;
} else if ((cls != ASN1_UNI) || (con != ASN1_CON)
|| (tag != ASN1_SEQ)) {
- cFYI(1,
- ("cls = %d con = %d tag = %d end = %p (%d) exit 1",
- cls, con, tag, end, *end));
+ cFYI(1, "cls = %d con = %d tag = %d end = %p (%d) exit 1",
+ cls, con, tag, end, *end);
return 0;
}
@@ -592,37 +584,33 @@ decode_negTokenInit(unsigned char *security_blob, int length,
while (!asn1_eoc_decode(&ctx, sequence_end)) {
rc = asn1_header_decode(&ctx, &end, &cls, &con, &tag);
if (!rc) {
- cFYI(1,
- ("Error decoding negTokenInit hdr exit2"));
+ cFYI(1, "Error decoding negTokenInit hdr exit2");
return 0;
}
if ((tag == ASN1_OJI) && (con == ASN1_PRI)) {
if (asn1_oid_decode(&ctx, end, &oid, &oidlen)) {
- cFYI(1, ("OID len = %d oid = 0x%lx 0x%lx "
- "0x%lx 0x%lx", oidlen, *oid,
- *(oid + 1), *(oid + 2), *(oid + 3)));
+ cFYI(1, "OID len = %d oid = 0x%lx 0x%lx "
+ "0x%lx 0x%lx", oidlen, *oid,
+ *(oid + 1), *(oid + 2), *(oid + 3));
if (compare_oid(oid, oidlen, MSKRB5_OID,
- MSKRB5_OID_LEN) &&
- !use_mskerberos)
- use_mskerberos = true;
+ MSKRB5_OID_LEN))
+ server->sec_mskerberos = true;
else if (compare_oid(oid, oidlen, KRB5U2U_OID,
- KRB5U2U_OID_LEN) &&
- !use_kerberosu2u)
- use_kerberosu2u = true;
+ KRB5U2U_OID_LEN))
+ server->sec_kerberosu2u = true;
else if (compare_oid(oid, oidlen, KRB5_OID,
- KRB5_OID_LEN) &&
- !use_kerberos)
- use_kerberos = true;
+ KRB5_OID_LEN))
+ server->sec_kerberos = true;
else if (compare_oid(oid, oidlen, NTLMSSP_OID,
NTLMSSP_OID_LEN))
- use_ntlmssp = true;
+ server->sec_ntlmssp = true;
kfree(oid);
}
} else {
- cFYI(1, ("Should be an oid what is going on?"));
+ cFYI(1, "Should be an oid what is going on?");
}
}
@@ -632,54 +620,47 @@ decode_negTokenInit(unsigned char *security_blob, int length,
no mechListMic (e.g. NTLMSSP instead of KRB5) */
if (ctx.error == ASN1_ERR_DEC_EMPTY)
goto decode_negtoken_exit;
- cFYI(1, ("Error decoding last part negTokenInit exit3"));
+ cFYI(1, "Error decoding last part negTokenInit exit3");
return 0;
} else if ((cls != ASN1_CTX) || (con != ASN1_CON)) {
/* tag = 3 indicating mechListMIC */
- cFYI(1, ("Exit 4 cls = %d con = %d tag = %d end = %p (%d)",
- cls, con, tag, end, *end));
+ cFYI(1, "Exit 4 cls = %d con = %d tag = %d end = %p (%d)",
+ cls, con, tag, end, *end);
return 0;
}
/* sequence */
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
- cFYI(1, ("Error decoding last part negTokenInit exit5"));
+ cFYI(1, "Error decoding last part negTokenInit exit5");
return 0;
} else if ((cls != ASN1_UNI) || (con != ASN1_CON)
|| (tag != ASN1_SEQ)) {
- cFYI(1, ("cls = %d con = %d tag = %d end = %p (%d)",
- cls, con, tag, end, *end));
+ cFYI(1, "cls = %d con = %d tag = %d end = %p (%d)",
+ cls, con, tag, end, *end);
}
/* sequence of */
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
- cFYI(1, ("Error decoding last part negTokenInit exit 7"));
+ cFYI(1, "Error decoding last part negTokenInit exit 7");
return 0;
} else if ((cls != ASN1_CTX) || (con != ASN1_CON)) {
- cFYI(1, ("Exit 8 cls = %d con = %d tag = %d end = %p (%d)",
- cls, con, tag, end, *end));
+ cFYI(1, "Exit 8 cls = %d con = %d tag = %d end = %p (%d)",
+ cls, con, tag, end, *end);
return 0;
}
/* general string */
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
- cFYI(1, ("Error decoding last part negTokenInit exit9"));
+ cFYI(1, "Error decoding last part negTokenInit exit9");
return 0;
} else if ((cls != ASN1_UNI) || (con != ASN1_PRI)
|| (tag != ASN1_GENSTR)) {
- cFYI(1, ("Exit10 cls = %d con = %d tag = %d end = %p (%d)",
- cls, con, tag, end, *end));
+ cFYI(1, "Exit10 cls = %d con = %d tag = %d end = %p (%d)",
+ cls, con, tag, end, *end);
return 0;
}
- cFYI(1, ("Need to call asn1_octets_decode() function for %s",
- ctx.pointer)); /* is this UTF-8 or ASCII? */
+ cFYI(1, "Need to call asn1_octets_decode() function for %s",
+ ctx.pointer); /* is this UTF-8 or ASCII? */
decode_negtoken_exit:
- if (use_kerberos)
- *secType = Kerberos;
- else if (use_mskerberos)
- *secType = MSKerberos;
- else if (use_ntlmssp)
- *secType = RawNTLMSSP;
-
return 1;
}
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 42cec2a..4fce6e6 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -60,10 +60,10 @@ cifs_dump_mem(char *label, void *data, int length)
#ifdef CONFIG_CIFS_DEBUG2
void cifs_dump_detail(struct smb_hdr *smb)
{
- cERROR(1, ("Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d",
+ cERROR(1, "Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d",
smb->Command, smb->Status.CifsError,
- smb->Flags, smb->Flags2, smb->Mid, smb->Pid));
- cERROR(1, ("smb buf %p len %d", smb, smbCalcSize_LE(smb)));
+ smb->Flags, smb->Flags2, smb->Mid, smb->Pid);
+ cERROR(1, "smb buf %p len %d", smb, smbCalcSize_LE(smb));
}
@@ -75,25 +75,25 @@ void cifs_dump_mids(struct TCP_Server_Info *server)
if (server == NULL)
return;
- cERROR(1, ("Dump pending requests:"));
+ cERROR(1, "Dump pending requests:");
spin_lock(&GlobalMid_Lock);
list_for_each(tmp, &server->pending_mid_q) {
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
- cERROR(1, ("State: %d Cmd: %d Pid: %d Tsk: %p Mid %d",
+ cERROR(1, "State: %d Cmd: %d Pid: %d Tsk: %p Mid %d",
mid_entry->midState,
(int)mid_entry->command,
mid_entry->pid,
mid_entry->tsk,
- mid_entry->mid));
+ mid_entry->mid);
#ifdef CONFIG_CIFS_STATS2
- cERROR(1, ("IsLarge: %d buf: %p time rcv: %ld now: %ld",
+ cERROR(1, "IsLarge: %d buf: %p time rcv: %ld now: %ld",
mid_entry->largeBuf,
mid_entry->resp_buf,
mid_entry->when_received,
- jiffies));
+ jiffies);
#endif /* STATS2 */
- cERROR(1, ("IsMult: %d IsEnd: %d", mid_entry->multiRsp,
- mid_entry->multiEnd));
+ cERROR(1, "IsMult: %d IsEnd: %d", mid_entry->multiRsp,
+ mid_entry->multiEnd);
if (mid_entry->resp_buf) {
cifs_dump_detail(mid_entry->resp_buf);
cifs_dump_mem("existing buf: ",
@@ -716,7 +716,7 @@ static const struct file_operations cifs_multiuser_mount_proc_fops = {
static int cifs_security_flags_proc_show(struct seq_file *m, void *v)
{
- seq_printf(m, "0x%x\n", extended_security);
+ seq_printf(m, "0x%x\n", global_secflags);
return 0;
}
@@ -744,13 +744,13 @@ static ssize_t cifs_security_flags_proc_write(struct file *file,
/* single char or single char followed by null */
c = flags_string[0];
if (c == '0' || c == 'n' || c == 'N') {
- extended_security = CIFSSEC_DEF; /* default */
+ global_secflags = CIFSSEC_DEF; /* default */
return count;
} else if (c == '1' || c == 'y' || c == 'Y') {
- extended_security = CIFSSEC_MAX;
+ global_secflags = CIFSSEC_MAX;
return count;
} else if (!isdigit(c)) {
- cERROR(1, ("invalid flag %c", c));
+ cERROR(1, "invalid flag %c", c);
return -EINVAL;
}
}
@@ -758,26 +758,26 @@ static ssize_t cifs_security_flags_proc_write(struct file *file,
flags = simple_strtoul(flags_string, NULL, 0);
- cFYI(1, ("sec flags 0x%x", flags));
+ cFYI(1, "sec flags 0x%x", flags);
if (flags <= 0) {
- cERROR(1, ("invalid security flags %s", flags_string));
+ cERROR(1, "invalid security flags %s", flags_string);
return -EINVAL;
}
if (flags & ~CIFSSEC_MASK) {
- cERROR(1, ("attempt to set unsupported security flags 0x%x",
- flags & ~CIFSSEC_MASK));
+ cERROR(1, "attempt to set unsupported security flags 0x%x",
+ flags & ~CIFSSEC_MASK);
return -EINVAL;
}
/* flags look ok - update the global security flags for cifs module */
- extended_security = flags;
- if (extended_security & CIFSSEC_MUST_SIGN) {
+ global_secflags = flags;
+ if (global_secflags & CIFSSEC_MUST_SIGN) {
/* requiring signing implies signing is allowed */
- extended_security |= CIFSSEC_MAY_SIGN;
- cFYI(1, ("packet signing now required"));
- } else if ((extended_security & CIFSSEC_MAY_SIGN) == 0) {
- cFYI(1, ("packet signing disabled"));
+ global_secflags |= CIFSSEC_MAY_SIGN;
+ cFYI(1, "packet signing now required");
+ } else if ((global_secflags & CIFSSEC_MAY_SIGN) == 0) {
+ cFYI(1, "packet signing disabled");
}
/* BB should we turn on MAY flags for other MUST options? */
return count;
diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h
index 5eb3b83..aa31689 100644
--- a/fs/cifs/cifs_debug.h
+++ b/fs/cifs/cifs_debug.h
@@ -43,34 +43,54 @@ void dump_smb(struct smb_hdr *, int);
*/
#ifdef CIFS_DEBUG
-
/* information message: e.g., configuration, major event */
extern int cifsFYI;
-#define cifsfyi(format,arg...) if (cifsFYI & CIFS_INFO) printk(KERN_DEBUG " " __FILE__ ": " format "\n" "" , ## arg)
+#define cifsfyi(fmt, arg...) \
+do { \
+ if (cifsFYI & CIFS_INFO) \
+ printk(KERN_DEBUG "%s: " fmt "\n", __FILE__, ##arg); \
+} while (0)
-#define cFYI(button,prspec) if (button) cifsfyi prspec
+#define cFYI(set, fmt, arg...) \
+do { \
+ if (set) \
+ cifsfyi(fmt, ##arg); \
+} while (0)
-#define cifswarn(format, arg...) printk(KERN_WARNING ": " format "\n" , ## arg)
+#define cifswarn(fmt, arg...) \
+ printk(KERN_WARNING fmt "\n", ##arg)
/* debug event message: */
extern int cifsERROR;
-#define cEVENT(format,arg...) if (cifsERROR) printk(KERN_EVENT __FILE__ ": " format "\n" , ## arg)
+#define cEVENT(fmt, arg...) \
+do { \
+ if (cifsERROR) \
+ printk(KERN_EVENT "%s: " fmt "\n", __FILE__, ##arg); \
+} while (0)
/* error event message: e.g., i/o error */
-#define cifserror(format,arg...) if (cifsERROR) printk(KERN_ERR " CIFS VFS: " format "\n" "" , ## arg)
+#define cifserror(fmt, arg...) \
+do { \
+ if (cifsERROR) \
+ printk(KERN_ERR "CIFS VFS: " fmt "\n", ##arg); \
+} while (0)
-#define cERROR(button, prspec) if (button) cifserror prspec
+#define cERROR(set, fmt, arg...) \
+do { \
+ if (set) \
+ cifserror(fmt, ##arg); \
+} while (0)
/*
* debug OFF
* ---------
*/
#else /* _CIFS_DEBUG */
-#define cERROR(button, prspec)
-#define cEVENT(format, arg...)
-#define cFYI(button, prspec)
-#define cifserror(format, arg...)
+#define cERROR(set, fmt, arg...)
+#define cEVENT(fmt, arg...)
+#define cFYI(set, fmt, arg...)
+#define cifserror(fmt, arg...)
#endif /* _CIFS_DEBUG */
#endif /* _H_CIFS_DEBUG */
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index 78e4d2a..ac19a6f 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -85,8 +85,8 @@ static char *cifs_get_share_name(const char *node_name)
/* find server name end */
pSep = memchr(UNC+2, '\\', len-2);
if (!pSep) {
- cERROR(1, ("%s: no server name end in node name: %s",
- __func__, node_name));
+ cERROR(1, "%s: no server name end in node name: %s",
+ __func__, node_name);
kfree(UNC);
return ERR_PTR(-EINVAL);
}
@@ -142,8 +142,8 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
rc = dns_resolve_server_name_to_ip(*devname, &srvIP);
if (rc != 0) {
- cERROR(1, ("%s: Failed to resolve server part of %s to IP: %d",
- __func__, *devname, rc));
+ cERROR(1, "%s: Failed to resolve server part of %s to IP: %d",
+ __func__, *devname, rc);
goto compose_mount_options_err;
}
/* md_len = strlen(...) + 12 for 'sep+prefixpath='
@@ -217,8 +217,8 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
strcat(mountdata, fullpath + ref->path_consumed);
}
- /*cFYI(1,("%s: parent mountdata: %s", __func__,sb_mountdata));*/
- /*cFYI(1, ("%s: submount mountdata: %s", __func__, mountdata ));*/
+ /*cFYI(1, "%s: parent mountdata: %s", __func__,sb_mountdata);*/
+ /*cFYI(1, "%s: submount mountdata: %s", __func__, mountdata );*/
compose_mount_options_out:
kfree(srvIP);
@@ -294,11 +294,11 @@ static int add_mount_helper(struct vfsmount *newmnt, struct nameidata *nd,
static void dump_referral(const struct dfs_info3_param *ref)
{
- cFYI(1, ("DFS: ref path: %s", ref->path_name));
- cFYI(1, ("DFS: node path: %s", ref->node_name));
- cFYI(1, ("DFS: fl: %hd, srv_type: %hd", ref->flags, ref->server_type));
- cFYI(1, ("DFS: ref_flags: %hd, path_consumed: %hd", ref->ref_flag,
- ref->path_consumed));
+ cFYI(1, "DFS: ref path: %s", ref->path_name);
+ cFYI(1, "DFS: node path: %s", ref->node_name);
+ cFYI(1, "DFS: fl: %hd, srv_type: %hd", ref->flags, ref->server_type);
+ cFYI(1, "DFS: ref_flags: %hd, path_consumed: %hd", ref->ref_flag,
+ ref->path_consumed);
}
@@ -314,7 +314,7 @@ cifs_dfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
int rc = 0;
struct vfsmount *mnt = ERR_PTR(-ENOENT);
- cFYI(1, ("in %s", __func__));
+ cFYI(1, "in %s", __func__);
BUG_ON(IS_ROOT(dentry));
xid = GetXid();
@@ -352,15 +352,15 @@ cifs_dfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
/* connect to a node */
len = strlen(referrals[i].node_name);
if (len < 2) {
- cERROR(1, ("%s: Net Address path too short: %s",
- __func__, referrals[i].node_name));
+ cERROR(1, "%s: Net Address path too short: %s",
+ __func__, referrals[i].node_name);
rc = -EINVAL;
goto out_err;
}
mnt = cifs_dfs_do_refmount(nd->path.mnt,
nd->path.dentry, referrals + i);
- cFYI(1, ("%s: cifs_dfs_do_refmount:%s , mnt:%p", __func__,
- referrals[i].node_name, mnt));
+ cFYI(1, "%s: cifs_dfs_do_refmount:%s , mnt:%p", __func__,
+ referrals[i].node_name, mnt);
/* complete mount procedure if we accured submount */
if (!IS_ERR(mnt))
@@ -378,7 +378,7 @@ out:
FreeXid(xid);
free_dfs_info_array(referrals, num_referrals);
kfree(full_path);
- cFYI(1, ("leaving %s" , __func__));
+ cFYI(1, "leaving %s" , __func__);
return ERR_PTR(rc);
out_err:
path_put(&nd->path);
diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c
index 310d12f..379bd7d 100644
--- a/fs/cifs/cifs_spnego.c
+++ b/fs/cifs/cifs_spnego.c
@@ -133,9 +133,9 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo)
dp = description + strlen(description);
/* for now, only sec=krb5 and sec=mskrb5 are valid */
- if (server->secType == Kerberos)
+ if (server->sec_kerberos)
sprintf(dp, ";sec=krb5");
- else if (server->secType == MSKerberos)
+ else if (server->sec_mskerberos)
sprintf(dp, ";sec=mskrb5");
else
goto out;
@@ -149,7 +149,7 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo)
dp = description + strlen(description);
sprintf(dp, ";pid=0x%x", current->pid);
- cFYI(1, ("key description = %s", description));
+ cFYI(1, "key description = %s", description);
spnego_key = request_key(&cifs_spnego_key_type, description, "");
#ifdef CONFIG_CIFS_DEBUG2
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index d07676b..430f510 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -200,9 +200,8 @@ cifs_strtoUCS(__le16 *to, const char *from, int len,
/* works for 2.4.0 kernel or later */
charlen = codepage->char2uni(from, len, &wchar_to[i]);
if (charlen < 1) {
- cERROR(1,
- ("strtoUCS: char2uni of %d returned %d",
- (int)*from, charlen));
+ cERROR(1, "strtoUCS: char2uni of %d returned %d",
+ (int)*from, charlen);
/* A question mark */
to[i] = cpu_to_le16(0x003f);
charlen = 1;
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index 9b716d0..85d7cf7 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -87,11 +87,11 @@ int match_sid(struct cifs_sid *ctsid)
continue; /* all sub_auth values do not match */
}
- cFYI(1, ("matching sid: %s\n", wksidarr[i].sidname));
+ cFYI(1, "matching sid: %s\n", wksidarr[i].sidname);
return 0; /* sids compare/match */
}
- cFYI(1, ("No matching sid"));
+ cFYI(1, "No matching sid");
return -1;
}
@@ -208,14 +208,14 @@ static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode,
*pbits_to_set &= ~S_IXUGO;
return;
} else if (type != ACCESS_ALLOWED) {
- cERROR(1, ("unknown access control type %d", type));
+ cERROR(1, "unknown access control type %d", type);
return;
}
/* else ACCESS_ALLOWED type */
if (flags & GENERIC_ALL) {
*pmode |= (S_IRWXUGO & (*pbits_to_set));
- cFYI(DBG2, ("all perms"));
+ cFYI(DBG2, "all perms");
return;
}
if ((flags & GENERIC_WRITE) ||
@@ -228,7 +228,7 @@ static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode,
((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
*pmode |= (S_IXUGO & (*pbits_to_set));
- cFYI(DBG2, ("access flags 0x%x mode now 0x%x", flags, *pmode));
+ cFYI(DBG2, "access flags 0x%x mode now 0x%x", flags, *pmode);
return;
}
@@ -257,7 +257,7 @@ static void mode_to_access_flags(umode_t mode, umode_t bits_to_use,
if (mode & S_IXUGO)
*pace_flags |= SET_FILE_EXEC_RIGHTS;
- cFYI(DBG2, ("mode: 0x%x, access flags now 0x%x", mode, *pace_flags));
+ cFYI(DBG2, "mode: 0x%x, access flags now 0x%x", mode, *pace_flags);
return;
}
@@ -297,24 +297,24 @@ static void dump_ace(struct cifs_ace *pace, char *end_of_acl)
/* validate that we do not go past end of acl */
if (le16_to_cpu(pace->size) < 16) {
- cERROR(1, ("ACE too small, %d", le16_to_cpu(pace->size)));
+ cERROR(1, "ACE too small %d", le16_to_cpu(pace->size));
return;
}
if (end_of_acl < (char *)pace + le16_to_cpu(pace->size)) {
- cERROR(1, ("ACL too small to parse ACE"));
+ cERROR(1, "ACL too small to parse ACE");
return;
}
num_subauth = pace->sid.num_subauth;
if (num_subauth) {
int i;
- cFYI(1, ("ACE revision %d num_auth %d type %d flags %d size %d",
+ cFYI(1, "ACE revision %d num_auth %d type %d flags %d size %d",
pace->sid.revision, pace->sid.num_subauth, pace->type,
- pace->flags, le16_to_cpu(pace->size)));
+ pace->flags, le16_to_cpu(pace->size));
for (i = 0; i < num_subauth; ++i) {
- cFYI(1, ("ACE sub_auth[%d]: 0x%x", i,
- le32_to_cpu(pace->sid.sub_auth[i])));
+ cFYI(1, "ACE sub_auth[%d]: 0x%x", i,
+ le32_to_cpu(pace->sid.sub_auth[i]));
}
/* BB add length check to make sure that we do not have huge
@@ -347,13 +347,13 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
/* validate that we do not go past end of acl */
if (end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) {
- cERROR(1, ("ACL too small to parse DACL"));
+ cERROR(1, "ACL too small to parse DACL");
return;
}
- cFYI(DBG2, ("DACL revision %d size %d num aces %d",
+ cFYI(DBG2, "DACL revision %d size %d num aces %d",
le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size),
- le32_to_cpu(pdacl->num_aces)));
+ le32_to_cpu(pdacl->num_aces));
/* reset rwx permissions for user/group/other.
Also, if num_aces is 0 i.e. DACL has no ACEs,
@@ -437,25 +437,25 @@ static int parse_sid(struct cifs_sid *psid, char *end_of_acl)
/* validate that we do not go past end of ACL - sid must be at least 8
bytes long (assuming no sub-auths - e.g. the null SID */
if (end_of_acl < (char *)psid + 8) {
- cERROR(1, ("ACL too small to parse SID %p", psid));
+ cERROR(1, "ACL too small to parse SID %p", psid);
return -EINVAL;
}
if (psid->num_subauth) {
#ifdef CONFIG_CIFS_DEBUG2
int i;
- cFYI(1, ("SID revision %d num_auth %d",
- psid->revision, psid->num_subauth));
+ cFYI(1, "SID revision %d num_auth %d",
+ psid->revision, psid->num_subauth);
for (i = 0; i < psid->num_subauth; i++) {
- cFYI(1, ("SID sub_auth[%d]: 0x%x ", i,
- le32_to_cpu(psid->sub_auth[i])));
+ cFYI(1, "SID sub_auth[%d]: 0x%x ", i,
+ le32_to_cpu(psid->sub_auth[i]));
}
/* BB add length check to make sure that we do not have huge
num auths and therefore go off the end */
- cFYI(1, ("RID 0x%x",
- le32_to_cpu(psid->sub_auth[psid->num_subauth-1])));
+ cFYI(1, "RID 0x%x",
+ le32_to_cpu(psid->sub_auth[psid->num_subauth-1]));
#endif
}
@@ -482,11 +482,11 @@ static int parse_sec_desc(struct cifs_ntsd *pntsd, int acl_len,
le32_to_cpu(pntsd->gsidoffset));
dacloffset = le32_to_cpu(pntsd->dacloffset);
dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
- cFYI(DBG2, ("revision %d type 0x%x ooffset 0x%x goffset 0x%x "
+ cFYI(DBG2, "revision %d type 0x%x ooffset 0x%x goffset 0x%x "
"sacloffset 0x%x dacloffset 0x%x",
pntsd->revision, pntsd->type, le32_to_cpu(pntsd->osidoffset),
le32_to_cpu(pntsd->gsidoffset),
- le32_to_cpu(pntsd->sacloffset), dacloffset));
+ le32_to_cpu(pntsd->sacloffset), dacloffset);
/* cifs_dump_mem("owner_sid: ", owner_sid_ptr, 64); */
rc = parse_sid(owner_sid_ptr, end_of_acl);
if (rc)
@@ -500,7 +500,7 @@ static int parse_sec_desc(struct cifs_ntsd *pntsd, int acl_len,
parse_dacl(dacl_ptr, end_of_acl, owner_sid_ptr,
group_sid_ptr, fattr);
else
- cFYI(1, ("no ACL")); /* BB grant all or default perms? */
+ cFYI(1, "no ACL"); /* BB grant all or default perms? */
/* cifscred->uid = owner_sid_ptr->rid;
cifscred->gid = group_sid_ptr->rid;
@@ -563,7 +563,7 @@ static struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
FreeXid(xid);
- cFYI(1, ("GetCIFSACL rc = %d ACL len %d", rc, *pacllen));
+ cFYI(1, "GetCIFSACL rc = %d ACL len %d", rc, *pacllen);
return pntsd;
}
@@ -581,12 +581,12 @@ static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
&fid, &oplock, NULL, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc) {
- cERROR(1, ("Unable to open file to get ACL"));
+ cERROR(1, "Unable to open file to get ACL");
goto out;
}
rc = CIFSSMBGetCIFSACL(xid, cifs_sb->tcon, fid, &pntsd, pacllen);
- cFYI(1, ("GetCIFSACL rc = %d ACL len %d", rc, *pacllen));
+ cFYI(1, "GetCIFSACL rc = %d ACL len %d", rc, *pacllen);
CIFSSMBClose(xid, cifs_sb->tcon, fid);
out:
@@ -621,7 +621,7 @@ static int set_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb, __u16 fid,
rc = CIFSSMBSetCIFSACL(xid, cifs_sb->tcon, fid, pnntsd, acllen);
FreeXid(xid);
- cFYI(DBG2, ("SetCIFSACL rc = %d", rc));
+ cFYI(DBG2, "SetCIFSACL rc = %d", rc);
return rc;
}
@@ -638,12 +638,12 @@ static int set_cifs_acl_by_path(struct cifs_sb_info *cifs_sb, const char *path,
&fid, &oplock, NULL, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc) {
- cERROR(1, ("Unable to open file to set ACL"));
+ cERROR(1, "Unable to open file to set ACL");
goto out;
}
rc = CIFSSMBSetCIFSACL(xid, cifs_sb->tcon, fid, pnntsd, acllen);
- cFYI(DBG2, ("SetCIFSACL rc = %d", rc));
+ cFYI(DBG2, "SetCIFSACL rc = %d", rc);
CIFSSMBClose(xid, cifs_sb->tcon, fid);
out:
@@ -659,7 +659,7 @@ static int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
struct cifsFileInfo *open_file;
int rc;
- cFYI(DBG2, ("set ACL for %s from mode 0x%x", path, inode->i_mode));
+ cFYI(DBG2, "set ACL for %s from mode 0x%x", path, inode->i_mode);
open_file = find_readable_file(CIFS_I(inode));
if (!open_file)
@@ -679,7 +679,7 @@ cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
u32 acllen = 0;
int rc = 0;
- cFYI(DBG2, ("converting ACL to mode for %s", path));
+ cFYI(DBG2, "converting ACL to mode for %s", path);
if (pfid)
pntsd = get_cifs_acl_by_fid(cifs_sb, *pfid, &acllen);
@@ -690,7 +690,7 @@ cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
if (pntsd)
rc = parse_sec_desc(pntsd, acllen, fattr);
if (rc)
- cFYI(1, ("parse sec desc failed rc = %d", rc));
+ cFYI(1, "parse sec desc failed rc = %d", rc);
kfree(pntsd);
return;
@@ -704,7 +704,7 @@ int mode_to_acl(struct inode *inode, const char *path, __u64 nmode)
struct cifs_ntsd *pntsd = NULL; /* acl obtained from server */
struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */
- cFYI(DBG2, ("set ACL from mode for %s", path));
+ cFYI(DBG2, "set ACL from mode for %s", path);
/* Get the security descriptor */
pntsd = get_cifs_acl(CIFS_SB(inode->i_sb), inode, path, &secdesclen);
@@ -721,19 +721,19 @@ int mode_to_acl(struct inode *inode, const char *path, __u64 nmode)
DEFSECDESCLEN : secdesclen;
pnntsd = kmalloc(secdesclen, GFP_KERNEL);
if (!pnntsd) {
- cERROR(1, ("Unable to allocate security descriptor"));
+ cERROR(1, "Unable to allocate security descriptor");
kfree(pntsd);
return -ENOMEM;
}
rc = build_sec_desc(pntsd, pnntsd, inode, nmode);
- cFYI(DBG2, ("build_sec_desc rc: %d", rc));
+ cFYI(DBG2, "build_sec_desc rc: %d", rc);
if (!rc) {
/* Set the security descriptor */
rc = set_cifs_acl(pnntsd, secdesclen, inode, path);
- cFYI(DBG2, ("set_cifs_acl rc: %d", rc));
+ cFYI(DBG2, "set_cifs_acl rc: %d", rc);
}
kfree(pnntsd);
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index fbe9864..847628d 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -103,7 +103,7 @@ static int cifs_calc_signature2(const struct kvec *iov, int n_vec,
if (iov[i].iov_len == 0)
continue;
if (iov[i].iov_base == NULL) {
- cERROR(1, ("null iovec entry"));
+ cERROR(1, "null iovec entry");
return -EIO;
}
/* The first entry includes a length field (which does not get
@@ -181,8 +181,8 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu,
/* Do not need to verify session setups with signature "BSRSPYL " */
if (memcmp(cifs_pdu->Signature.SecuritySignature, "BSRSPYL ", 8) == 0)
- cFYI(1, ("dummy signature received for smb command 0x%x",
- cifs_pdu->Command));
+ cFYI(1, "dummy signature received for smb command 0x%x",
+ cifs_pdu->Command);
/* save off the origiginal signature so we can modify the smb and check
its signature against what the server sent */
@@ -291,7 +291,7 @@ void calc_lanman_hash(const char *password, const char *cryptkey, bool encrypt,
if (password)
strncpy(password_with_pad, password, CIFS_ENCPWD_SIZE);
- if (!encrypt && extended_security & CIFSSEC_MAY_PLNTXT) {
+ if (!encrypt && global_secflags & CIFSSEC_MAY_PLNTXT) {
memset(lnm_session_key, 0, CIFS_SESS_KEY_SIZE);
memcpy(lnm_session_key, password_with_pad,
CIFS_ENCPWD_SIZE);
@@ -398,7 +398,7 @@ void setup_ntlmv2_rsp(struct cifsSesInfo *ses, char *resp_buf,
/* calculate buf->ntlmv2_hash */
rc = calc_ntlmv2_hash(ses, nls_cp);
if (rc)
- cERROR(1, ("could not get v2 hash rc %d", rc));
+ cERROR(1, "could not get v2 hash rc %d", rc);
CalcNTLMv2_response(ses, resp_buf);
/* now calculate the MAC key for NTLMv2 */
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index ad235d6..78c02eb 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -49,10 +49,6 @@
#include "cifs_spnego.h"
#define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
-#ifdef CONFIG_CIFS_QUOTA
-static const struct quotactl_ops cifs_quotactl_ops;
-#endif /* QUOTA */
-
int cifsFYI = 0;
int cifsERROR = 1;
int traceSMB = 0;
@@ -61,7 +57,7 @@ unsigned int experimEnabled = 0;
unsigned int linuxExtEnabled = 1;
unsigned int lookupCacheEnabled = 1;
unsigned int multiuser_mount = 0;
-unsigned int extended_security = CIFSSEC_DEF;
+unsigned int global_secflags = CIFSSEC_DEF;
/* unsigned int ntlmv2_support = 0; */
unsigned int sign_CIFS_PDUs = 1;
static const struct super_operations cifs_super_ops;
@@ -86,8 +82,6 @@ extern mempool_t *cifs_sm_req_poolp;
extern mempool_t *cifs_req_poolp;
extern mempool_t *cifs_mid_poolp;
-extern struct kmem_cache *cifs_oplock_cachep;
-
static int
cifs_read_super(struct super_block *sb, void *data,
const char *devname, int silent)
@@ -135,8 +129,7 @@ cifs_read_super(struct super_block *sb, void *data,
if (rc) {
if (!silent)
- cERROR(1,
- ("cifs_mount failed w/return code = %d", rc));
+ cERROR(1, "cifs_mount failed w/return code = %d", rc);
goto out_mount_failed;
}
@@ -146,9 +139,6 @@ cifs_read_super(struct super_block *sb, void *data,
/* if (cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512)
sb->s_blocksize =
cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */
-#ifdef CONFIG_CIFS_QUOTA
- sb->s_qcop = &cifs_quotactl_ops;
-#endif
sb->s_blocksize = CIFS_MAX_MSGSIZE;
sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
inode = cifs_root_iget(sb, ROOT_I);
@@ -168,7 +158,7 @@ cifs_read_super(struct super_block *sb, void *data,
#ifdef CONFIG_CIFS_EXPERIMENTAL
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
- cFYI(1, ("export ops supported"));
+ cFYI(1, "export ops supported");
sb->s_export_op = &cifs_export_ops;
}
#endif /* EXPERIMENTAL */
@@ -176,7 +166,7 @@ cifs_read_super(struct super_block *sb, void *data,
return 0;
out_no_root:
- cERROR(1, ("cifs_read_super: get root inode failed"));
+ cERROR(1, "cifs_read_super: get root inode failed");
if (inode)
iput(inode);
@@ -203,10 +193,10 @@ cifs_put_super(struct super_block *sb)
int rc = 0;
struct cifs_sb_info *cifs_sb;
- cFYI(1, ("In cifs_put_super"));
+ cFYI(1, "In cifs_put_super");
cifs_sb = CIFS_SB(sb);
if (cifs_sb == NULL) {
- cFYI(1, ("Empty cifs superblock info passed to unmount"));
+ cFYI(1, "Empty cifs superblock info passed to unmount");
return;
}
@@ -214,7 +204,7 @@ cifs_put_super(struct super_block *sb)
rc = cifs_umount(sb, cifs_sb);
if (rc)
- cERROR(1, ("cifs_umount failed with return code %d", rc));
+ cERROR(1, "cifs_umount failed with return code %d", rc);
#ifdef CONFIG_CIFS_DFS_UPCALL
if (cifs_sb->mountdata) {
kfree(cifs_sb->mountdata);
@@ -300,7 +290,6 @@ static int cifs_permission(struct inode *inode, int mask)
static struct kmem_cache *cifs_inode_cachep;
static struct kmem_cache *cifs_req_cachep;
static struct kmem_cache *cifs_mid_cachep;
-struct kmem_cache *cifs_oplock_cachep;
static struct kmem_cache *cifs_sm_req_cachep;
mempool_t *cifs_sm_req_poolp;
mempool_t *cifs_req_poolp;
@@ -432,106 +421,6 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m)
return 0;
}
-#ifdef CONFIG_CIFS_QUOTA
-int cifs_xquota_set(struct super_block *sb, int quota_type, qid_t qid,
- struct fs_disk_quota *pdquota)
-{
- int xid;
- int rc = 0;
- struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
- struct cifsTconInfo *pTcon;
-
- if (cifs_sb)
- pTcon = cifs_sb->tcon;
- else
- return -EIO;
-
-
- xid = GetXid();
- if (pTcon) {
- cFYI(1, ("set type: 0x%x id: %d", quota_type, qid));
- } else
- rc = -EIO;
-
- FreeXid(xid);
- return rc;
-}
-
-int cifs_xquota_get(struct super_block *sb, int quota_type, qid_t qid,
- struct fs_disk_quota *pdquota)
-{
- int xid;
- int rc = 0;
- struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
- struct cifsTconInfo *pTcon;
-
- if (cifs_sb)
- pTcon = cifs_sb->tcon;
- else
- return -EIO;
-
- xid = GetXid();
- if (pTcon) {
- cFYI(1, ("set type: 0x%x id: %d", quota_type, qid));
- } else
- rc = -EIO;
-
- FreeXid(xid);
- return rc;
-}
-
-int cifs_xstate_set(struct super_block *sb, unsigned int flags, int operation)
-{
- int xid;
- int rc = 0;
- struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
- struct cifsTconInfo *pTcon;
-
- if (cifs_sb)
- pTcon = cifs_sb->tcon;
- else
- return -EIO;
-
- xid = GetXid();
- if (pTcon) {
- cFYI(1, ("flags: 0x%x operation: 0x%x", flags, operation));
- } else
- rc = -EIO;
-
- FreeXid(xid);
- return rc;
-}
-
-int cifs_xstate_get(struct super_block *sb, struct fs_quota_stat *qstats)
-{
- int xid;
- int rc = 0;
- struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
- struct cifsTconInfo *pTcon;
-
- if (cifs_sb)
- pTcon = cifs_sb->tcon;
- else
- return -EIO;
-
- xid = GetXid();
- if (pTcon) {
- cFYI(1, ("pqstats %p", qstats));
- } else
- rc = -EIO;
-
- FreeXid(xid);
- return rc;
-}
-
-static const struct quotactl_ops cifs_quotactl_ops = {
- .set_xquota = cifs_xquota_set,
- .get_xquota = cifs_xquota_get,
- .set_xstate = cifs_xstate_set,
- .get_xstate = cifs_xstate_get,
-};
-#endif
-
static void cifs_umount_begin(struct super_block *sb)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
@@ -558,7 +447,7 @@ static void cifs_umount_begin(struct super_block *sb)
/* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
/* cancel_notify_requests(tcon); */
if (tcon->ses && tcon->ses->server) {
- cFYI(1, ("wake up tasks now - umount begin not complete"));
+ cFYI(1, "wake up tasks now - umount begin not complete");
wake_up_all(&tcon->ses->server->request_q);
wake_up_all(&tcon->ses->server->response_q);
msleep(1); /* yield */
@@ -609,7 +498,7 @@ cifs_get_sb(struct file_system_type *fs_type,
int rc;
struct super_block *sb = sget(fs_type, NULL, set_anon_super, NULL);
- cFYI(1, ("Devname: %s flags: %d ", dev_name, flags));
+ cFYI(1, "Devname: %s flags: %d ", dev_name, flags);
if (IS_ERR(sb))
return PTR_ERR(sb);
@@ -656,7 +545,6 @@ static loff_t cifs_llseek(struct file *file, loff_t offset, int origin)
return generic_file_llseek_unlocked(file, offset, origin);
}
-#ifdef CONFIG_CIFS_EXPERIMENTAL
static int cifs_setlease(struct file *file, long arg, struct file_lock **lease)
{
/* note that this is called by vfs setlease with the BKL held
@@ -685,7 +573,6 @@ static int cifs_setlease(struct file *file, long arg, struct file_lock **lease)
else
return -EAGAIN;
}
-#endif
struct file_system_type cifs_fs_type = {
.owner = THIS_MODULE,
@@ -762,10 +649,7 @@ const struct file_operations cifs_file_ops = {
#ifdef CONFIG_CIFS_POSIX
.unlocked_ioctl = cifs_ioctl,
#endif /* CONFIG_CIFS_POSIX */
-
-#ifdef CONFIG_CIFS_EXPERIMENTAL
.setlease = cifs_setlease,
-#endif /* CONFIG_CIFS_EXPERIMENTAL */
};
const struct file_operations cifs_file_direct_ops = {
@@ -784,9 +668,7 @@ const struct file_operations cifs_file_direct_ops = {
.unlocked_ioctl = cifs_ioctl,
#endif /* CONFIG_CIFS_POSIX */
.llseek = cifs_llseek,
-#ifdef CONFIG_CIFS_EXPERIMENTAL
.setlease = cifs_setlease,
-#endif /* CONFIG_CIFS_EXPERIMENTAL */
};
const struct file_operations cifs_file_nobrl_ops = {
.read = do_sync_read,
@@ -803,10 +685,7 @@ const struct file_operations cifs_file_nobrl_ops = {
#ifdef CONFIG_CIFS_POSIX
.unlocked_ioctl = cifs_ioctl,
#endif /* CONFIG_CIFS_POSIX */
-
-#ifdef CONFIG_CIFS_EXPERIMENTAL
.setlease = cifs_setlease,
-#endif /* CONFIG_CIFS_EXPERIMENTAL */
};
const struct file_operations cifs_file_direct_nobrl_ops = {
@@ -824,9 +703,7 @@ const struct file_operations cifs_file_direct_nobrl_ops = {
.unlocked_ioctl = cifs_ioctl,
#endif /* CONFIG_CIFS_POSIX */
.llseek = cifs_llseek,
-#ifdef CONFIG_CIFS_EXPERIMENTAL
.setlease = cifs_setlease,
-#endif /* CONFIG_CIFS_EXPERIMENTAL */
};
const struct file_operations cifs_dir_ops = {
@@ -878,7 +755,7 @@ cifs_init_request_bufs(void)
} else {
CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
}
-/* cERROR(1,("CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize)); */
+/* cERROR(1, "CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize); */
cifs_req_cachep = kmem_cache_create("cifs_request",
CIFSMaxBufSize +
MAX_CIFS_HDR_SIZE, 0,
@@ -890,7 +767,7 @@ cifs_init_request_bufs(void)
cifs_min_rcv = 1;
else if (cifs_min_rcv > 64) {
cifs_min_rcv = 64;
- cERROR(1, ("cifs_min_rcv set to maximum (64)"));
+ cERROR(1, "cifs_min_rcv set to maximum (64)");
}
cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
@@ -921,7 +798,7 @@ cifs_init_request_bufs(void)
cifs_min_small = 2;
else if (cifs_min_small > 256) {
cifs_min_small = 256;
- cFYI(1, ("cifs_min_small set to maximum (256)"));
+ cFYI(1, "cifs_min_small set to maximum (256)");
}
cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
@@ -962,15 +839,6 @@ cifs_init_mids(void)
return -ENOMEM;
}
- cifs_oplock_cachep = kmem_cache_create("cifs_oplock_structs",
- sizeof(struct oplock_q_entry), 0,
- SLAB_HWCACHE_ALIGN, NULL);
- if (cifs_oplock_cachep == NULL) {
- mempool_destroy(cifs_mid_poolp);
- kmem_cache_destroy(cifs_mid_cachep);
- return -ENOMEM;
- }
-
return 0;
}
@@ -979,7 +847,6 @@ cifs_destroy_mids(void)
{
mempool_destroy(cifs_mid_poolp);
kmem_cache_destroy(cifs_mid_cachep);
- kmem_cache_destroy(cifs_oplock_cachep);
}
static int __init
@@ -1019,10 +886,10 @@ init_cifs(void)
if (cifs_max_pending < 2) {
cifs_max_pending = 2;
- cFYI(1, ("cifs_max_pending set to min of 2"));
+ cFYI(1, "cifs_max_pending set to min of 2");
} else if (cifs_max_pending > 256) {
cifs_max_pending = 256;
- cFYI(1, ("cifs_max_pending set to max of 256"));
+ cFYI(1, "cifs_max_pending set to max of 256");
}
rc = cifs_init_inodecache();
@@ -1080,7 +947,7 @@ init_cifs(void)
static void __exit
exit_cifs(void)
{
- cFYI(DBG2, ("exit_cifs"));
+ cFYI(DBG2, "exit_cifs");
cifs_proc_clean();
#ifdef CONFIG_CIFS_DFS_UPCALL
cifs_dfs_release_automount_timer();
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 7aa57ec..0242ff9 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -114,5 +114,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
extern const struct export_operations cifs_export_ops;
#endif /* EXPERIMENTAL */
-#define CIFS_VERSION "1.62"
+#define CIFS_VERSION "1.64"
#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index ecf0ffb..a88479c 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -87,7 +87,6 @@ enum securityEnum {
RawNTLMSSP, /* NTLMSSP without SPNEGO, NTLMv2 hash */
/* NTLMSSP, */ /* can use rawNTLMSSP instead of NTLMSSP via SPNEGO */
Kerberos, /* Kerberos via SPNEGO */
- MSKerberos, /* MS Kerberos via SPNEGO */
};
enum protocolEnum {
@@ -185,6 +184,12 @@ struct TCP_Server_Info {
struct mac_key mac_signing_key;
char ntlmv2_hash[16];
unsigned long lstrp; /* when we got last response from this server */
+ u16 dialect; /* dialect index that server chose */
+ /* extended security flavors that server supports */
+ bool sec_kerberos; /* supports plain Kerberos */
+ bool sec_mskerberos; /* supports legacy MS Kerberos */
+ bool sec_kerberosu2u; /* supports U2U Kerberos */
+ bool sec_ntlmssp; /* supports NTLMSSP */
};
/*
@@ -502,6 +507,7 @@ struct dfs_info3_param {
#define CIFS_FATTR_DFS_REFERRAL 0x1
#define CIFS_FATTR_DELETE_PENDING 0x2
#define CIFS_FATTR_NEED_REVAL 0x4
+#define CIFS_FATTR_INO_COLLISION 0x8
struct cifs_fattr {
u32 cf_flags;
@@ -717,7 +723,7 @@ GLOBAL_EXTERN unsigned int multiuser_mount; /* if enabled allows new sessions
GLOBAL_EXTERN unsigned int oplockEnabled;
GLOBAL_EXTERN unsigned int experimEnabled;
GLOBAL_EXTERN unsigned int lookupCacheEnabled;
-GLOBAL_EXTERN unsigned int extended_security; /* if on, session setup sent
+GLOBAL_EXTERN unsigned int global_secflags; /* if on, session setup sent
with more secure ntlmssp2 challenge/resp */
GLOBAL_EXTERN unsigned int sign_CIFS_PDUs; /* enable smb packet signing */
GLOBAL_EXTERN unsigned int linuxExtEnabled;/*enable Linux/Unix CIFS extensions*/
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 39e47f4..fb1657e 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -39,8 +39,20 @@ extern int smb_send(struct TCP_Server_Info *, struct smb_hdr *,
unsigned int /* length */);
extern unsigned int _GetXid(void);
extern void _FreeXid(unsigned int);
-#define GetXid() (int)_GetXid(); cFYI(1,("CIFS VFS: in %s as Xid: %d with uid: %d",__func__, xid,current_fsuid()));
-#define FreeXid(curr_xid) {_FreeXid(curr_xid); cFYI(1,("CIFS VFS: leaving %s (xid = %d) rc = %d",__func__,curr_xid,(int)rc));}
+#define GetXid() \
+({ \
+ int __xid = (int)_GetXid(); \
+ cFYI(1, "CIFS VFS: in %s as Xid: %d with uid: %d", \
+ __func__, __xid, current_fsuid()); \
+ __xid; \
+})
+
+#define FreeXid(curr_xid) \
+do { \
+ _FreeXid(curr_xid); \
+ cFYI(1, "CIFS VFS: leaving %s (xid = %d) rc = %d", \
+ __func__, curr_xid, (int)rc); \
+} while (0)
extern char *build_path_from_dentry(struct dentry *);
extern char *cifs_build_path_to_root(struct cifs_sb_info *cifs_sb);
extern char *build_wildcard_path_from_dentry(struct dentry *direntry);
@@ -73,7 +85,7 @@ extern struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *);
extern unsigned int smbCalcSize(struct smb_hdr *ptr);
extern unsigned int smbCalcSize_LE(struct smb_hdr *ptr);
extern int decode_negTokenInit(unsigned char *security_blob, int length,
- enum securityEnum *secType);
+ struct TCP_Server_Info *server);
extern int cifs_convert_address(char *src, void *dst);
extern int map_smb_to_linux_error(struct smb_hdr *smb, int logErr);
extern void header_assemble(struct smb_hdr *, char /* command */ ,
@@ -83,7 +95,6 @@ extern int small_smb_init_no_tc(const int smb_cmd, const int wct,
struct cifsSesInfo *ses,
void **request_buf);
extern int CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses,
- const int stage,
const struct nls_table *nls_cp);
extern __u16 GetNextMid(struct TCP_Server_Info *server);
extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601);
@@ -95,8 +106,11 @@ extern struct cifsFileInfo *cifs_new_fileinfo(struct inode *newinode,
__u16 fileHandle, struct file *file,
struct vfsmount *mnt, unsigned int oflags);
extern int cifs_posix_open(char *full_path, struct inode **pinode,
- struct vfsmount *mnt, int mode, int oflags,
- __u32 *poplock, __u16 *pnetfid, int xid);
+ struct vfsmount *mnt,
+ struct super_block *sb,
+ int mode, int oflags,
+ __u32 *poplock, __u16 *pnetfid, int xid);
+void cifs_fill_uniqueid(struct super_block *sb, struct cifs_fattr *fattr);
extern void cifs_unix_basic_to_fattr(struct cifs_fattr *fattr,
FILE_UNIX_BASIC_INFO *info,
struct cifs_sb_info *cifs_sb);
@@ -125,7 +139,9 @@ extern void cifs_dfs_release_automount_timer(void);
void cifs_proc_init(void);
void cifs_proc_clean(void);
-extern int cifs_setup_session(unsigned int xid, struct cifsSesInfo *pSesInfo,
+extern int cifs_negotiate_protocol(unsigned int xid,
+ struct cifsSesInfo *ses);
+extern int cifs_setup_session(unsigned int xid, struct cifsSesInfo *ses,
struct nls_table *nls_info);
extern int CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 5d3f29f..c65c341 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1,7 +1,7 @@
/*
* fs/cifs/cifssmb.c
*
- * Copyright (C) International Business Machines Corp., 2002,2009
+ * Copyright (C) International Business Machines Corp., 2002,2010
* Author(s): Steve French (sfrench@us.ibm.com)
*
* Contains the routines for constructing the SMB PDUs themselves
@@ -130,8 +130,8 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command)
if (smb_command != SMB_COM_WRITE_ANDX &&
smb_command != SMB_COM_OPEN_ANDX &&
smb_command != SMB_COM_TREE_DISCONNECT) {
- cFYI(1, ("can not send cmd %d while umounting",
- smb_command));
+ cFYI(1, "can not send cmd %d while umounting",
+ smb_command);
return -ENODEV;
}
}
@@ -157,7 +157,7 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command)
* back on-line
*/
if (!tcon->retry || ses->status == CifsExiting) {
- cFYI(1, ("gave up waiting on reconnect in smb_init"));
+ cFYI(1, "gave up waiting on reconnect in smb_init");
return -EHOSTDOWN;
}
}
@@ -172,7 +172,8 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command)
* reconnect the same SMB session
*/
mutex_lock(&ses->session_mutex);
- if (ses->need_reconnect)
+ rc = cifs_negotiate_protocol(0, ses);
+ if (rc == 0 && ses->need_reconnect)
rc = cifs_setup_session(0, ses, nls_codepage);
/* do we need to reconnect tcon? */
@@ -184,7 +185,7 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command)
mark_open_files_invalid(tcon);
rc = CIFSTCon(0, ses, tcon->treeName, tcon, nls_codepage);
mutex_unlock(&ses->session_mutex);
- cFYI(1, ("reconnect tcon rc = %d", rc));
+ cFYI(1, "reconnect tcon rc = %d", rc);
if (rc)
goto out;
@@ -355,7 +356,6 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
struct TCP_Server_Info *server;
u16 count;
unsigned int secFlags;
- u16 dialect;
if (ses->server)
server = ses->server;
@@ -372,9 +372,9 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
if (ses->overrideSecFlg & (~(CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL)))
secFlags = ses->overrideSecFlg; /* BB FIXME fix sign flags? */
else /* if override flags set only sign/seal OR them with global auth */
- secFlags = extended_security | ses->overrideSecFlg;
+ secFlags = global_secflags | ses->overrideSecFlg;
- cFYI(1, ("secFlags 0x%x", secFlags));
+ cFYI(1, "secFlags 0x%x", secFlags);
pSMB->hdr.Mid = GetNextMid(server);
pSMB->hdr.Flags2 |= (SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS);
@@ -382,14 +382,14 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
if ((secFlags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5)
pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC;
else if ((secFlags & CIFSSEC_AUTH_MASK) == CIFSSEC_MAY_KRB5) {
- cFYI(1, ("Kerberos only mechanism, enable extended security"));
+ cFYI(1, "Kerberos only mechanism, enable extended security");
pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC;
}
#ifdef CONFIG_CIFS_EXPERIMENTAL
else if ((secFlags & CIFSSEC_MUST_NTLMSSP) == CIFSSEC_MUST_NTLMSSP)
pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC;
else if ((secFlags & CIFSSEC_AUTH_MASK) == CIFSSEC_MAY_NTLMSSP) {
- cFYI(1, ("NTLMSSP only mechanism, enable extended security"));
+ cFYI(1, "NTLMSSP only mechanism, enable extended security");
pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC;
}
#endif
@@ -408,10 +408,10 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
if (rc != 0)
goto neg_err_exit;
- dialect = le16_to_cpu(pSMBr->DialectIndex);
- cFYI(1, ("Dialect: %d", dialect));
+ server->dialect = le16_to_cpu(pSMBr->DialectIndex);
+ cFYI(1, "Dialect: %d", server->dialect);
/* Check wct = 1 error case */
- if ((pSMBr->hdr.WordCount < 13) || (dialect == BAD_PROT)) {
+ if ((pSMBr->hdr.WordCount < 13) || (server->dialect == BAD_PROT)) {
/* core returns wct = 1, but we do not ask for core - otherwise
small wct just comes when dialect index is -1 indicating we
could not negotiate a common dialect */
@@ -419,8 +419,8 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
goto neg_err_exit;
#ifdef CONFIG_CIFS_WEAK_PW_HASH
} else if ((pSMBr->hdr.WordCount == 13)
- && ((dialect == LANMAN_PROT)
- || (dialect == LANMAN2_PROT))) {
+ && ((server->dialect == LANMAN_PROT)
+ || (server->dialect == LANMAN2_PROT))) {
__s16 tmp;
struct lanman_neg_rsp *rsp = (struct lanman_neg_rsp *)pSMBr;
@@ -428,8 +428,8 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
(secFlags & CIFSSEC_MAY_PLNTXT))
server->secType = LANMAN;
else {
- cERROR(1, ("mount failed weak security disabled"
- " in /proc/fs/cifs/SecurityFlags"));
+ cERROR(1, "mount failed weak security disabled"
+ " in /proc/fs/cifs/SecurityFlags");
rc = -EOPNOTSUPP;
goto neg_err_exit;
}
@@ -462,9 +462,9 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
utc = CURRENT_TIME;
ts = cnvrtDosUnixTm(rsp->SrvTime.Date,
rsp->SrvTime.Time, 0);
- cFYI(1, ("SrvTime %d sec since 1970 (utc: %d) diff: %d",
+ cFYI(1, "SrvTime %d sec since 1970 (utc: %d) diff: %d",
(int)ts.tv_sec, (int)utc.tv_sec,
- (int)(utc.tv_sec - ts.tv_sec)));
+ (int)(utc.tv_sec - ts.tv_sec));
val = (int)(utc.tv_sec - ts.tv_sec);
seconds = abs(val);
result = (seconds / MIN_TZ_ADJ) * MIN_TZ_ADJ;
@@ -478,7 +478,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
server->timeAdj = (int)tmp;
server->timeAdj *= 60; /* also in seconds */
}
- cFYI(1, ("server->timeAdj: %d seconds", server->timeAdj));
+ cFYI(1, "server->timeAdj: %d seconds", server->timeAdj);
/* BB get server time for time conversions and add
@@ -493,14 +493,14 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
goto neg_err_exit;
}
- cFYI(1, ("LANMAN negotiated"));
+ cFYI(1, "LANMAN negotiated");
/* we will not end up setting signing flags - as no signing
was in LANMAN and server did not return the flags on */
goto signing_check;
#else /* weak security disabled */
} else if (pSMBr->hdr.WordCount == 13) {
- cERROR(1, ("mount failed, cifs module not built "
- "with CIFS_WEAK_PW_HASH support"));
+ cERROR(1, "mount failed, cifs module not built "
+ "with CIFS_WEAK_PW_HASH support");
rc = -EOPNOTSUPP;
#endif /* WEAK_PW_HASH */
goto neg_err_exit;
@@ -512,14 +512,14 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
/* else wct == 17 NTLM */
server->secMode = pSMBr->SecurityMode;
if ((server->secMode & SECMODE_USER) == 0)
- cFYI(1, ("share mode security"));
+ cFYI(1, "share mode security");
if ((server->secMode & SECMODE_PW_ENCRYPT) == 0)
#ifdef CONFIG_CIFS_WEAK_PW_HASH
if ((secFlags & CIFSSEC_MAY_PLNTXT) == 0)
#endif /* CIFS_WEAK_PW_HASH */
- cERROR(1, ("Server requests plain text password"
- " but client support disabled"));
+ cERROR(1, "Server requests plain text password"
+ " but client support disabled");
if ((secFlags & CIFSSEC_MUST_NTLMV2) == CIFSSEC_MUST_NTLMV2)
server->secType = NTLMv2;
@@ -539,7 +539,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
#endif */
else {
rc = -EOPNOTSUPP;
- cERROR(1, ("Invalid security type"));
+ cERROR(1, "Invalid security type");
goto neg_err_exit;
}
/* else ... any others ...? */
@@ -551,7 +551,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
server->maxBuf = min(le32_to_cpu(pSMBr->MaxBufferSize),
(__u32) CIFSMaxBufSize + MAX_CIFS_HDR_SIZE);
server->max_rw = le32_to_cpu(pSMBr->MaxRawSize);
- cFYI(DBG2, ("Max buf = %d", ses->server->maxBuf));
+ cFYI(DBG2, "Max buf = %d", ses->server->maxBuf);
GETU32(ses->server->sessid) = le32_to_cpu(pSMBr->SessionKey);
server->capabilities = le32_to_cpu(pSMBr->Capabilities);
server->timeAdj = (int)(__s16)le16_to_cpu(pSMBr->ServerTimeZone);
@@ -582,7 +582,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
if (memcmp(server->server_GUID,
pSMBr->u.extended_response.
GUID, 16) != 0) {
- cFYI(1, ("server UID changed"));
+ cFYI(1, "server UID changed");
memcpy(server->server_GUID,
pSMBr->u.extended_response.GUID,
16);
@@ -597,13 +597,19 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
server->secType = RawNTLMSSP;
} else {
rc = decode_negTokenInit(pSMBr->u.extended_response.
- SecurityBlob,
- count - 16,
- &server->secType);
+ SecurityBlob, count - 16,
+ server);
if (rc == 1)
rc = 0;
else
rc = -EINVAL;
+
+ if (server->sec_kerberos || server->sec_mskerberos)
+ server->secType = Kerberos;
+ else if (server->sec_ntlmssp)
+ server->secType = RawNTLMSSP;
+ else
+ rc = -EOPNOTSUPP;
}
} else
server->capabilities &= ~CAP_EXTENDED_SECURITY;
@@ -614,22 +620,21 @@ signing_check:
if ((secFlags & CIFSSEC_MAY_SIGN) == 0) {
/* MUST_SIGN already includes the MAY_SIGN FLAG
so if this is zero it means that signing is disabled */
- cFYI(1, ("Signing disabled"));
+ cFYI(1, "Signing disabled");
if (server->secMode & SECMODE_SIGN_REQUIRED) {
- cERROR(1, ("Server requires "
+ cERROR(1, "Server requires "
"packet signing to be enabled in "
- "/proc/fs/cifs/SecurityFlags."));
+ "/proc/fs/cifs/SecurityFlags.");
rc = -EOPNOTSUPP;
}
server->secMode &=
~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
} else if ((secFlags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN) {
/* signing required */
- cFYI(1, ("Must sign - secFlags 0x%x", secFlags));
+ cFYI(1, "Must sign - secFlags 0x%x", secFlags);
if ((server->secMode &
(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED)) == 0) {
- cERROR(1,
- ("signing required but server lacks support"));
+ cERROR(1, "signing required but server lacks support");
rc = -EOPNOTSUPP;
} else
server->secMode |= SECMODE_SIGN_REQUIRED;
@@ -643,7 +648,7 @@ signing_check:
neg_err_exit:
cifs_buf_release(pSMB);
- cFYI(1, ("negprot rc %d", rc));
+ cFYI(1, "negprot rc %d", rc);
return rc;
}
@@ -653,7 +658,7 @@ CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon)
struct smb_hdr *smb_buffer;
int rc = 0;
- cFYI(1, ("In tree disconnect"));
+ cFYI(1, "In tree disconnect");
/* BB: do we need to check this? These should never be NULL. */
if ((tcon->ses == NULL) || (tcon->ses->server == NULL))
@@ -675,7 +680,7 @@ CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon)
rc = SendReceiveNoRsp(xid, tcon->ses, smb_buffer, 0);
if (rc)
- cFYI(1, ("Tree disconnect failed %d", rc));
+ cFYI(1, "Tree disconnect failed %d", rc);
/* No need to return error on this operation if tid invalidated and
closed on server already e.g. due to tcp session crashing */
@@ -691,7 +696,7 @@ CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses)
LOGOFF_ANDX_REQ *pSMB;
int rc = 0;
- cFYI(1, ("In SMBLogoff for session disconnect"));
+ cFYI(1, "In SMBLogoff for session disconnect");
/*
* BB: do we need to check validity of ses and server? They should
@@ -744,7 +749,7 @@ CIFSPOSIXDelFile(const int xid, struct cifsTconInfo *tcon, const char *fileName,
int bytes_returned = 0;
__u16 params, param_offset, offset, byte_count;
- cFYI(1, ("In POSIX delete"));
+ cFYI(1, "In POSIX delete");
PsxDelete:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -796,7 +801,7 @@ PsxDelete:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc)
- cFYI(1, ("Posix delete returned %d", rc));
+ cFYI(1, "Posix delete returned %d", rc);
cifs_buf_release(pSMB);
cifs_stats_inc(&tcon->num_deletes);
@@ -843,7 +848,7 @@ DelFileRetry:
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_deletes);
if (rc)
- cFYI(1, ("Error in RMFile = %d", rc));
+ cFYI(1, "Error in RMFile = %d", rc);
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
@@ -862,7 +867,7 @@ CIFSSMBRmDir(const int xid, struct cifsTconInfo *tcon, const char *dirName,
int bytes_returned;
int name_len;
- cFYI(1, ("In CIFSSMBRmDir"));
+ cFYI(1, "In CIFSSMBRmDir");
RmDirRetry:
rc = smb_init(SMB_COM_DELETE_DIRECTORY, 0, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -887,7 +892,7 @@ RmDirRetry:
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_rmdirs);
if (rc)
- cFYI(1, ("Error in RMDir = %d", rc));
+ cFYI(1, "Error in RMDir = %d", rc);
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
@@ -905,7 +910,7 @@ CIFSSMBMkDir(const int xid, struct cifsTconInfo *tcon,
int bytes_returned;
int name_len;
- cFYI(1, ("In CIFSSMBMkDir"));
+ cFYI(1, "In CIFSSMBMkDir");
MkDirRetry:
rc = smb_init(SMB_COM_CREATE_DIRECTORY, 0, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -930,7 +935,7 @@ MkDirRetry:
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_mkdirs);
if (rc)
- cFYI(1, ("Error in Mkdir = %d", rc));
+ cFYI(1, "Error in Mkdir = %d", rc);
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
@@ -953,7 +958,7 @@ CIFSPOSIXCreate(const int xid, struct cifsTconInfo *tcon, __u32 posix_flags,
OPEN_PSX_REQ *pdata;
OPEN_PSX_RSP *psx_rsp;
- cFYI(1, ("In POSIX Create"));
+ cFYI(1, "In POSIX Create");
PsxCreat:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -1007,11 +1012,11 @@ PsxCreat:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cFYI(1, ("Posix create returned %d", rc));
+ cFYI(1, "Posix create returned %d", rc);
goto psx_create_err;
}
- cFYI(1, ("copying inode info"));
+ cFYI(1, "copying inode info");
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
if (rc || (pSMBr->ByteCount < sizeof(OPEN_PSX_RSP))) {
@@ -1033,11 +1038,11 @@ PsxCreat:
/* check to make sure response data is there */
if (psx_rsp->ReturnedLevel != cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC)) {
pRetData->Type = cpu_to_le32(-1); /* unknown */
- cFYI(DBG2, ("unknown type"));
+ cFYI(DBG2, "unknown type");
} else {
if (pSMBr->ByteCount < sizeof(OPEN_PSX_RSP)
+ sizeof(FILE_UNIX_BASIC_INFO)) {
- cERROR(1, ("Open response data too small"));
+ cERROR(1, "Open response data too small");
pRetData->Type = cpu_to_le32(-1);
goto psx_create_err;
}
@@ -1084,7 +1089,7 @@ static __u16 convert_disposition(int disposition)
ofun = SMBOPEN_OCREATE | SMBOPEN_OTRUNC;
break;
default:
- cFYI(1, ("unknown disposition %d", disposition));
+ cFYI(1, "unknown disposition %d", disposition);
ofun = SMBOPEN_OAPPEND; /* regular open */
}
return ofun;
@@ -1175,7 +1180,7 @@ OldOpenRetry:
(struct smb_hdr *)pSMBr, &bytes_returned, CIFS_LONG_OP);
cifs_stats_inc(&tcon->num_opens);
if (rc) {
- cFYI(1, ("Error in Open = %d", rc));
+ cFYI(1, "Error in Open = %d", rc);
} else {
/* BB verify if wct == 15 */
@@ -1288,7 +1293,7 @@ openRetry:
(struct smb_hdr *)pSMBr, &bytes_returned, CIFS_LONG_OP);
cifs_stats_inc(&tcon->num_opens);
if (rc) {
- cFYI(1, ("Error in Open = %d", rc));
+ cFYI(1, "Error in Open = %d", rc);
} else {
*pOplock = pSMBr->OplockLevel; /* 1 byte no need to le_to_cpu */
*netfid = pSMBr->Fid; /* cifs fid stays in le */
@@ -1326,7 +1331,7 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon, const int netfid,
int resp_buf_type = 0;
struct kvec iov[1];
- cFYI(1, ("Reading %d bytes on fid %d", count, netfid));
+ cFYI(1, "Reading %d bytes on fid %d", count, netfid);
if (tcon->ses->capabilities & CAP_LARGE_FILES)
wct = 12;
else {
@@ -1371,7 +1376,7 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon, const int netfid,
cifs_stats_inc(&tcon->num_reads);
pSMBr = (READ_RSP *)iov[0].iov_base;
if (rc) {
- cERROR(1, ("Send error in read = %d", rc));
+ cERROR(1, "Send error in read = %d", rc);
} else {
int data_length = le16_to_cpu(pSMBr->DataLengthHigh);
data_length = data_length << 16;
@@ -1381,15 +1386,15 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon, const int netfid,
/*check that DataLength would not go beyond end of SMB */
if ((data_length > CIFSMaxBufSize)
|| (data_length > count)) {
- cFYI(1, ("bad length %d for count %d",
- data_length, count));
+ cFYI(1, "bad length %d for count %d",
+ data_length, count);
rc = -EIO;
*nbytes = 0;
} else {
pReadData = (char *) (&pSMBr->hdr.Protocol) +
le16_to_cpu(pSMBr->DataOffset);
/* if (rc = copy_to_user(buf, pReadData, data_length)) {
- cERROR(1,("Faulting on read rc = %d",rc));
+ cERROR(1, "Faulting on read rc = %d",rc);
rc = -EFAULT;
}*/ /* can not use copy_to_user when using page cache*/
if (*buf)
@@ -1433,7 +1438,7 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
*nbytes = 0;
- /* cFYI(1, ("write at %lld %d bytes", offset, count));*/
+ /* cFYI(1, "write at %lld %d bytes", offset, count);*/
if (tcon->ses == NULL)
return -ECONNABORTED;
@@ -1514,7 +1519,7 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
(struct smb_hdr *) pSMBr, &bytes_returned, long_op);
cifs_stats_inc(&tcon->num_writes);
if (rc) {
- cFYI(1, ("Send error in write = %d", rc));
+ cFYI(1, "Send error in write = %d", rc);
} else {
*nbytes = le16_to_cpu(pSMBr->CountHigh);
*nbytes = (*nbytes) << 16;
@@ -1551,7 +1556,7 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
*nbytes = 0;
- cFYI(1, ("write2 at %lld %d bytes", (long long)offset, count));
+ cFYI(1, "write2 at %lld %d bytes", (long long)offset, count);
if (tcon->ses->capabilities & CAP_LARGE_FILES) {
wct = 14;
@@ -1606,7 +1611,7 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
long_op);
cifs_stats_inc(&tcon->num_writes);
if (rc) {
- cFYI(1, ("Send error Write2 = %d", rc));
+ cFYI(1, "Send error Write2 = %d", rc);
} else if (resp_buf_type == 0) {
/* presumably this can not happen, but best to be safe */
rc = -EIO;
@@ -1651,7 +1656,7 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
int timeout = 0;
__u16 count;
- cFYI(1, ("CIFSSMBLock timeout %d numLock %d", (int)waitFlag, numLock));
+ cFYI(1, "CIFSSMBLock timeout %d numLock %d", (int)waitFlag, numLock);
rc = small_smb_init(SMB_COM_LOCKING_ANDX, 8, tcon, (void **) &pSMB);
if (rc)
@@ -1699,7 +1704,7 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
}
cifs_stats_inc(&tcon->num_locks);
if (rc)
- cFYI(1, ("Send error in Lock = %d", rc));
+ cFYI(1, "Send error in Lock = %d", rc);
/* Note: On -EAGAIN error only caller can retry on handle based calls
since file handle passed in no longer valid */
@@ -1722,7 +1727,7 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
__u16 params, param_offset, offset, byte_count, count;
struct kvec iov[1];
- cFYI(1, ("Posix Lock"));
+ cFYI(1, "Posix Lock");
if (pLockData == NULL)
return -EINVAL;
@@ -1792,7 +1797,7 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
}
if (rc) {
- cFYI(1, ("Send error in Posix Lock = %d", rc));
+ cFYI(1, "Send error in Posix Lock = %d", rc);
} else if (get_flag) {
/* lock structure can be returned on get */
__u16 data_offset;
@@ -1849,7 +1854,7 @@ CIFSSMBClose(const int xid, struct cifsTconInfo *tcon, int smb_file_id)
{
int rc = 0;
CLOSE_REQ *pSMB = NULL;
- cFYI(1, ("In CIFSSMBClose"));
+ cFYI(1, "In CIFSSMBClose");
/* do not retry on dead session on close */
rc = small_smb_init(SMB_COM_CLOSE, 3, tcon, (void **) &pSMB);
@@ -1866,7 +1871,7 @@ CIFSSMBClose(const int xid, struct cifsTconInfo *tcon, int smb_file_id)
if (rc) {
if (rc != -EINTR) {
/* EINTR is expected when user ctl-c to kill app */
- cERROR(1, ("Send error in Close = %d", rc));
+ cERROR(1, "Send error in Close = %d", rc);
}
}
@@ -1882,7 +1887,7 @@ CIFSSMBFlush(const int xid, struct cifsTconInfo *tcon, int smb_file_id)
{
int rc = 0;
FLUSH_REQ *pSMB = NULL;
- cFYI(1, ("In CIFSSMBFlush"));
+ cFYI(1, "In CIFSSMBFlush");
rc = small_smb_init(SMB_COM_FLUSH, 1, tcon, (void **) &pSMB);
if (rc)
@@ -1893,7 +1898,7 @@ CIFSSMBFlush(const int xid, struct cifsTconInfo *tcon, int smb_file_id)
rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
cifs_stats_inc(&tcon->num_flushes);
if (rc)
- cERROR(1, ("Send error in Flush = %d", rc));
+ cERROR(1, "Send error in Flush = %d", rc);
return rc;
}
@@ -1910,7 +1915,7 @@ CIFSSMBRename(const int xid, struct cifsTconInfo *tcon,
int name_len, name_len2;
__u16 count;
- cFYI(1, ("In CIFSSMBRename"));
+ cFYI(1, "In CIFSSMBRename");
renameRetry:
rc = smb_init(SMB_COM_RENAME, 1, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -1956,7 +1961,7 @@ renameRetry:
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_renames);
if (rc)
- cFYI(1, ("Send error in rename = %d", rc));
+ cFYI(1, "Send error in rename = %d", rc);
cifs_buf_release(pSMB);
@@ -1980,7 +1985,7 @@ int CIFSSMBRenameOpenFile(const int xid, struct cifsTconInfo *pTcon,
int len_of_str;
__u16 params, param_offset, offset, count, byte_count;
- cFYI(1, ("Rename to File by handle"));
+ cFYI(1, "Rename to File by handle");
rc = smb_init(SMB_COM_TRANSACTION2, 15, pTcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
@@ -2035,7 +2040,7 @@ int CIFSSMBRenameOpenFile(const int xid, struct cifsTconInfo *pTcon,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&pTcon->num_t2renames);
if (rc)
- cFYI(1, ("Send error in Rename (by file handle) = %d", rc));
+ cFYI(1, "Send error in Rename (by file handle) = %d", rc);
cifs_buf_release(pSMB);
@@ -2057,7 +2062,7 @@ CIFSSMBCopy(const int xid, struct cifsTconInfo *tcon, const char *fromName,
int name_len, name_len2;
__u16 count;
- cFYI(1, ("In CIFSSMBCopy"));
+ cFYI(1, "In CIFSSMBCopy");
copyRetry:
rc = smb_init(SMB_COM_COPY, 1, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -2102,8 +2107,8 @@ copyRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cFYI(1, ("Send error in copy = %d with %d files copied",
- rc, le16_to_cpu(pSMBr->CopyCount)));
+ cFYI(1, "Send error in copy = %d with %d files copied",
+ rc, le16_to_cpu(pSMBr->CopyCount));
}
cifs_buf_release(pSMB);
@@ -2127,7 +2132,7 @@ CIFSUnixCreateSymLink(const int xid, struct cifsTconInfo *tcon,
int bytes_returned = 0;
__u16 params, param_offset, offset, byte_count;
- cFYI(1, ("In Symlink Unix style"));
+ cFYI(1, "In Symlink Unix style");
createSymLinkRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -2192,7 +2197,7 @@ createSymLinkRetry:
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_symlinks);
if (rc)
- cFYI(1, ("Send error in SetPathInfo create symlink = %d", rc));
+ cFYI(1, "Send error in SetPathInfo create symlink = %d", rc);
cifs_buf_release(pSMB);
@@ -2216,7 +2221,7 @@ CIFSUnixCreateHardLink(const int xid, struct cifsTconInfo *tcon,
int bytes_returned = 0;
__u16 params, param_offset, offset, byte_count;
- cFYI(1, ("In Create Hard link Unix style"));
+ cFYI(1, "In Create Hard link Unix style");
createHardLinkRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -2278,7 +2283,7 @@ createHardLinkRetry:
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_hardlinks);
if (rc)
- cFYI(1, ("Send error in SetPathInfo (hard link) = %d", rc));
+ cFYI(1, "Send error in SetPathInfo (hard link) = %d", rc);
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
@@ -2299,7 +2304,7 @@ CIFSCreateHardLink(const int xid, struct cifsTconInfo *tcon,
int name_len, name_len2;
__u16 count;
- cFYI(1, ("In CIFSCreateHardLink"));
+ cFYI(1, "In CIFSCreateHardLink");
winCreateHardLinkRetry:
rc = smb_init(SMB_COM_NT_RENAME, 4, tcon, (void **) &pSMB,
@@ -2350,7 +2355,7 @@ winCreateHardLinkRetry:
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_hardlinks);
if (rc)
- cFYI(1, ("Send error in hard link (NT rename) = %d", rc));
+ cFYI(1, "Send error in hard link (NT rename) = %d", rc);
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
@@ -2373,7 +2378,7 @@ CIFSSMBUnixQuerySymLink(const int xid, struct cifsTconInfo *tcon,
__u16 params, byte_count;
char *data_start;
- cFYI(1, ("In QPathSymLinkInfo (Unix) for path %s", searchName));
+ cFYI(1, "In QPathSymLinkInfo (Unix) for path %s", searchName);
querySymLinkRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
@@ -2420,7 +2425,7 @@ querySymLinkRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cFYI(1, ("Send error in QuerySymLinkInfo = %d", rc));
+ cFYI(1, "Send error in QuerySymLinkInfo = %d", rc);
} else {
/* decode response */
@@ -2521,21 +2526,21 @@ validate_ntransact(char *buf, char **ppparm, char **ppdata,
/* should we also check that parm and data areas do not overlap? */
if (*ppparm > end_of_smb) {
- cFYI(1, ("parms start after end of smb"));
+ cFYI(1, "parms start after end of smb");
return -EINVAL;
} else if (parm_count + *ppparm > end_of_smb) {
- cFYI(1, ("parm end after end of smb"));
+ cFYI(1, "parm end after end of smb");
return -EINVAL;
} else if (*ppdata > end_of_smb) {
- cFYI(1, ("data starts after end of smb"));
+ cFYI(1, "data starts after end of smb");
return -EINVAL;
} else if (data_count + *ppdata > end_of_smb) {
- cFYI(1, ("data %p + count %d (%p) ends after end of smb %p start %p",
+ cFYI(1, "data %p + count %d (%p) past smb end %p start %p",
*ppdata, data_count, (data_count + *ppdata),
- end_of_smb, pSMBr));
+ end_of_smb, pSMBr);
return -EINVAL;
} else if (parm_count + data_count > pSMBr->ByteCount) {
- cFYI(1, ("parm count and data count larger than SMB"));
+ cFYI(1, "parm count and data count larger than SMB");
return -EINVAL;
}
*pdatalen = data_count;
@@ -2554,7 +2559,7 @@ CIFSSMBQueryReparseLinkInfo(const int xid, struct cifsTconInfo *tcon,
struct smb_com_transaction_ioctl_req *pSMB;
struct smb_com_transaction_ioctl_rsp *pSMBr;
- cFYI(1, ("In Windows reparse style QueryLink for path %s", searchName));
+ cFYI(1, "In Windows reparse style QueryLink for path %s", searchName);
rc = smb_init(SMB_COM_NT_TRANSACT, 23, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
@@ -2583,7 +2588,7 @@ CIFSSMBQueryReparseLinkInfo(const int xid, struct cifsTconInfo *tcon,
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cFYI(1, ("Send error in QueryReparseLinkInfo = %d", rc));
+ cFYI(1, "Send error in QueryReparseLinkInfo = %d", rc);
} else { /* decode response */
__u32 data_offset = le32_to_cpu(pSMBr->DataOffset);
__u32 data_count = le32_to_cpu(pSMBr->DataCount);
@@ -2607,7 +2612,7 @@ CIFSSMBQueryReparseLinkInfo(const int xid, struct cifsTconInfo *tcon,
if ((reparse_buf->LinkNamesBuf +
reparse_buf->TargetNameOffset +
reparse_buf->TargetNameLen) > end_of_smb) {
- cFYI(1, ("reparse buf beyond SMB"));
+ cFYI(1, "reparse buf beyond SMB");
rc = -EIO;
goto qreparse_out;
}
@@ -2628,12 +2633,12 @@ CIFSSMBQueryReparseLinkInfo(const int xid, struct cifsTconInfo *tcon,
}
} else {
rc = -EIO;
- cFYI(1, ("Invalid return data count on "
- "get reparse info ioctl"));
+ cFYI(1, "Invalid return data count on "
+ "get reparse info ioctl");
}
symlinkinfo[buflen] = 0; /* just in case so the caller
does not go off the end of the buffer */
- cFYI(1, ("readlink result - %s", symlinkinfo));
+ cFYI(1, "readlink result - %s", symlinkinfo);
}
qreparse_out:
@@ -2656,7 +2661,7 @@ static void cifs_convert_ace(posix_acl_xattr_entry *ace,
ace->e_perm = cpu_to_le16(cifs_ace->cifs_e_perm);
ace->e_tag = cpu_to_le16(cifs_ace->cifs_e_tag);
ace->e_id = cpu_to_le32(le64_to_cpu(cifs_ace->cifs_uid));
- /* cFYI(1,("perm %d tag %d id %d",ace->e_perm,ace->e_tag,ace->e_id)); */
+ /* cFYI(1, "perm %d tag %d id %d",ace->e_perm,ace->e_tag,ace->e_id); */
return;
}
@@ -2682,8 +2687,8 @@ static int cifs_copy_posix_acl(char *trgt, char *src, const int buflen,
size += sizeof(struct cifs_posix_ace) * count;
/* check if we would go beyond end of SMB */
if (size_of_data_area < size) {
- cFYI(1, ("bad CIFS POSIX ACL size %d vs. %d",
- size_of_data_area, size));
+ cFYI(1, "bad CIFS POSIX ACL size %d vs. %d",
+ size_of_data_area, size);
return -EINVAL;
}
} else if (acl_type & ACL_TYPE_DEFAULT) {
@@ -2730,7 +2735,7 @@ static __u16 convert_ace_to_cifs_ace(struct cifs_posix_ace *cifs_ace,
cifs_ace->cifs_uid = cpu_to_le64(-1);
} else
cifs_ace->cifs_uid = cpu_to_le64(le32_to_cpu(local_ace->e_id));
- /*cFYI(1,("perm %d tag %d id %d",ace->e_perm,ace->e_tag,ace->e_id));*/
+ /*cFYI(1, "perm %d tag %d id %d",ace->e_perm,ace->e_tag,ace->e_id);*/
return rc;
}
@@ -2748,12 +2753,12 @@ static __u16 ACL_to_cifs_posix(char *parm_data, const char *pACL,
return 0;
count = posix_acl_xattr_count((size_t)buflen);
- cFYI(1, ("setting acl with %d entries from buf of length %d and "
+ cFYI(1, "setting acl with %d entries from buf of length %d and "
"version of %d",
- count, buflen, le32_to_cpu(local_acl->a_version)));
+ count, buflen, le32_to_cpu(local_acl->a_version));
if (le32_to_cpu(local_acl->a_version) != 2) {
- cFYI(1, ("unknown POSIX ACL version %d",
- le32_to_cpu(local_acl->a_version)));
+ cFYI(1, "unknown POSIX ACL version %d",
+ le32_to_cpu(local_acl->a_version));
return 0;
}
cifs_acl->version = cpu_to_le16(1);
@@ -2762,7 +2767,7 @@ static __u16 ACL_to_cifs_posix(char *parm_data, const char *pACL,
else if (acl_type == ACL_TYPE_DEFAULT)
cifs_acl->default_entry_count = cpu_to_le16(count);
else {
- cFYI(1, ("unknown ACL type %d", acl_type));
+ cFYI(1, "unknown ACL type %d", acl_type);
return 0;
}
for (i = 0; i < count; i++) {
@@ -2795,7 +2800,7 @@ CIFSSMBGetPosixACL(const int xid, struct cifsTconInfo *tcon,
int name_len;
__u16 params, byte_count;
- cFYI(1, ("In GetPosixACL (Unix) for path %s", searchName));
+ cFYI(1, "In GetPosixACL (Unix) for path %s", searchName);
queryAclRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
@@ -2847,7 +2852,7 @@ queryAclRetry:
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_acl_get);
if (rc) {
- cFYI(1, ("Send error in Query POSIX ACL = %d", rc));
+ cFYI(1, "Send error in Query POSIX ACL = %d", rc);
} else {
/* decode response */
@@ -2884,7 +2889,7 @@ CIFSSMBSetPosixACL(const int xid, struct cifsTconInfo *tcon,
int bytes_returned = 0;
__u16 params, byte_count, data_count, param_offset, offset;
- cFYI(1, ("In SetPosixACL (Unix) for path %s", fileName));
+ cFYI(1, "In SetPosixACL (Unix) for path %s", fileName);
setAclRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -2939,7 +2944,7 @@ setAclRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc)
- cFYI(1, ("Set POSIX ACL returned %d", rc));
+ cFYI(1, "Set POSIX ACL returned %d", rc);
setACLerrorExit:
cifs_buf_release(pSMB);
@@ -2959,7 +2964,7 @@ CIFSGetExtAttr(const int xid, struct cifsTconInfo *tcon,
int bytes_returned;
__u16 params, byte_count;
- cFYI(1, ("In GetExtAttr"));
+ cFYI(1, "In GetExtAttr");
if (tcon == NULL)
return -ENODEV;
@@ -2998,7 +3003,7 @@ GetExtAttrRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cFYI(1, ("error %d in GetExtAttr", rc));
+ cFYI(1, "error %d in GetExtAttr", rc);
} else {
/* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
@@ -3013,7 +3018,7 @@ GetExtAttrRetry:
struct file_chattr_info *pfinfo;
/* BB Do we need a cast or hash here ? */
if (count != 16) {
- cFYI(1, ("Illegal size ret in GetExtAttr"));
+ cFYI(1, "Illegal size ret in GetExtAttr");
rc = -EIO;
goto GetExtAttrOut;
}
@@ -3043,7 +3048,7 @@ CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid,
QUERY_SEC_DESC_REQ *pSMB;
struct kvec iov[1];
- cFYI(1, ("GetCifsACL"));
+ cFYI(1, "GetCifsACL");
*pbuflen = 0;
*acl_inf = NULL;
@@ -3068,7 +3073,7 @@ CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid,
CIFS_STD_OP);
cifs_stats_inc(&tcon->num_acl_get);
if (rc) {
- cFYI(1, ("Send error in QuerySecDesc = %d", rc));
+ cFYI(1, "Send error in QuerySecDesc = %d", rc);
} else { /* decode response */
__le32 *parm;
__u32 parm_len;
@@ -3083,7 +3088,7 @@ CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid,
goto qsec_out;
pSMBr = (struct smb_com_ntransact_rsp *)iov[0].iov_base;
- cFYI(1, ("smb %p parm %p data %p", pSMBr, parm, *acl_inf));
+ cFYI(1, "smb %p parm %p data %p", pSMBr, parm, *acl_inf);
if (le32_to_cpu(pSMBr->ParameterCount) != 4) {
rc = -EIO; /* bad smb */
@@ -3095,8 +3100,8 @@ CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid,
acl_len = le32_to_cpu(*parm);
if (acl_len != *pbuflen) {
- cERROR(1, ("acl length %d does not match %d",
- acl_len, *pbuflen));
+ cERROR(1, "acl length %d does not match %d",
+ acl_len, *pbuflen);
if (*pbuflen > acl_len)
*pbuflen = acl_len;
}
@@ -3105,7 +3110,7 @@ CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid,
header followed by the smallest SID */
if ((*pbuflen < sizeof(struct cifs_ntsd) + 8) ||
(*pbuflen >= 64 * 1024)) {
- cERROR(1, ("bad acl length %d", *pbuflen));
+ cERROR(1, "bad acl length %d", *pbuflen);
rc = -EINVAL;
*pbuflen = 0;
} else {
@@ -3179,9 +3184,9 @@ setCifsAclRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
- cFYI(1, ("SetCIFSACL bytes_returned: %d, rc: %d", bytes_returned, rc));
+ cFYI(1, "SetCIFSACL bytes_returned: %d, rc: %d", bytes_returned, rc);
if (rc)
- cFYI(1, ("Set CIFS ACL returned %d", rc));
+ cFYI(1, "Set CIFS ACL returned %d", rc);
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
@@ -3205,7 +3210,7 @@ int SMBQueryInformation(const int xid, struct cifsTconInfo *tcon,
int bytes_returned;
int name_len;
- cFYI(1, ("In SMBQPath path %s", searchName));
+ cFYI(1, "In SMBQPath path %s", searchName);
QInfRetry:
rc = smb_init(SMB_COM_QUERY_INFORMATION, 0, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -3231,7 +3236,7 @@ QInfRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cFYI(1, ("Send error in QueryInfo = %d", rc));
+ cFYI(1, "Send error in QueryInfo = %d", rc);
} else if (pFinfo) {
struct timespec ts;
__u32 time = le32_to_cpu(pSMBr->last_write_time);
@@ -3305,7 +3310,7 @@ QFileInfoRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cFYI(1, ("Send error in QPathInfo = %d", rc));
+ cFYI(1, "Send error in QPathInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
@@ -3343,7 +3348,7 @@ CIFSSMBQPathInfo(const int xid, struct cifsTconInfo *tcon,
int name_len;
__u16 params, byte_count;
-/* cFYI(1, ("In QPathInfo path %s", searchName)); */
+/* cFYI(1, "In QPathInfo path %s", searchName); */
QPathInfoRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -3393,7 +3398,7 @@ QPathInfoRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cFYI(1, ("Send error in QPathInfo = %d", rc));
+ cFYI(1, "Send error in QPathInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
@@ -3473,14 +3478,14 @@ UnixQFileInfoRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cFYI(1, ("Send error in QPathInfo = %d", rc));
+ cFYI(1, "Send error in QPathInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
if (rc || (pSMBr->ByteCount < sizeof(FILE_UNIX_BASIC_INFO))) {
- cERROR(1, ("Malformed FILE_UNIX_BASIC_INFO response.\n"
+ cERROR(1, "Malformed FILE_UNIX_BASIC_INFO response.\n"
"Unix Extensions can be disabled on mount "
- "by specifying the nosfu mount option."));
+ "by specifying the nosfu mount option.");
rc = -EIO; /* bad smb */
} else {
__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
@@ -3512,7 +3517,7 @@ CIFSSMBUnixQPathInfo(const int xid, struct cifsTconInfo *tcon,
int name_len;
__u16 params, byte_count;
- cFYI(1, ("In QPathInfo (Unix) the path %s", searchName));
+ cFYI(1, "In QPathInfo (Unix) the path %s", searchName);
UnixQPathInfoRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -3559,14 +3564,14 @@ UnixQPathInfoRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cFYI(1, ("Send error in QPathInfo = %d", rc));
+ cFYI(1, "Send error in QPathInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
if (rc || (pSMBr->ByteCount < sizeof(FILE_UNIX_BASIC_INFO))) {
- cERROR(1, ("Malformed FILE_UNIX_BASIC_INFO response.\n"
+ cERROR(1, "Malformed FILE_UNIX_BASIC_INFO response.\n"
"Unix Extensions can be disabled on mount "
- "by specifying the nosfu mount option."));
+ "by specifying the nosfu mount option.");
rc = -EIO; /* bad smb */
} else {
__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
@@ -3600,7 +3605,7 @@ CIFSFindFirst(const int xid, struct cifsTconInfo *tcon,
int name_len;
__u16 params, byte_count;
- cFYI(1, ("In FindFirst for %s", searchName));
+ cFYI(1, "In FindFirst for %s", searchName);
findFirstRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
@@ -3677,7 +3682,7 @@ findFirstRetry:
if (rc) {/* BB add logic to retry regular search if Unix search
rejected unexpectedly by server */
/* BB Add code to handle unsupported level rc */
- cFYI(1, ("Error in FindFirst = %d", rc));
+ cFYI(1, "Error in FindFirst = %d", rc);
cifs_buf_release(pSMB);
@@ -3716,7 +3721,7 @@ findFirstRetry:
lnoff = le16_to_cpu(parms->LastNameOffset);
if (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE <
lnoff) {
- cERROR(1, ("ignoring corrupt resume name"));
+ cERROR(1, "ignoring corrupt resume name");
psrch_inf->last_entry = NULL;
return rc;
}
@@ -3744,7 +3749,7 @@ int CIFSFindNext(const int xid, struct cifsTconInfo *tcon,
int bytes_returned, name_len;
__u16 params, byte_count;
- cFYI(1, ("In FindNext"));
+ cFYI(1, "In FindNext");
if (psrch_inf->endOfSearch)
return -ENOENT;
@@ -3808,7 +3813,7 @@ int CIFSFindNext(const int xid, struct cifsTconInfo *tcon,
cifs_buf_release(pSMB);
rc = 0; /* search probably was closed at end of search*/
} else
- cFYI(1, ("FindNext returned = %d", rc));
+ cFYI(1, "FindNext returned = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
@@ -3844,15 +3849,15 @@ int CIFSFindNext(const int xid, struct cifsTconInfo *tcon,
lnoff = le16_to_cpu(parms->LastNameOffset);
if (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE <
lnoff) {
- cERROR(1, ("ignoring corrupt resume name"));
+ cERROR(1, "ignoring corrupt resume name");
psrch_inf->last_entry = NULL;
return rc;
} else
psrch_inf->last_entry =
psrch_inf->srch_entries_start + lnoff;
-/* cFYI(1,("fnxt2 entries in buf %d index_of_last %d",
- psrch_inf->entries_in_buffer, psrch_inf->index_of_last_entry)); */
+/* cFYI(1, "fnxt2 entries in buf %d index_of_last %d",
+ psrch_inf->entries_in_buffer, psrch_inf->index_of_last_entry); */
/* BB fixme add unlock here */
}
@@ -3877,7 +3882,7 @@ CIFSFindClose(const int xid, struct cifsTconInfo *tcon,
int rc = 0;
FINDCLOSE_REQ *pSMB = NULL;
- cFYI(1, ("In CIFSSMBFindClose"));
+ cFYI(1, "In CIFSSMBFindClose");
rc = small_smb_init(SMB_COM_FIND_CLOSE2, 1, tcon, (void **)&pSMB);
/* no sense returning error if session restarted
@@ -3891,7 +3896,7 @@ CIFSFindClose(const int xid, struct cifsTconInfo *tcon,
pSMB->ByteCount = 0;
rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
if (rc)
- cERROR(1, ("Send error in FindClose = %d", rc));
+ cERROR(1, "Send error in FindClose = %d", rc);
cifs_stats_inc(&tcon->num_fclose);
@@ -3914,7 +3919,7 @@ CIFSGetSrvInodeNumber(const int xid, struct cifsTconInfo *tcon,
int name_len, bytes_returned;
__u16 params, byte_count;
- cFYI(1, ("In GetSrvInodeNum for %s", searchName));
+ cFYI(1, "In GetSrvInodeNum for %s", searchName);
if (tcon == NULL)
return -ENODEV;
@@ -3964,7 +3969,7 @@ GetInodeNumberRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cFYI(1, ("error %d in QueryInternalInfo", rc));
+ cFYI(1, "error %d in QueryInternalInfo", rc);
} else {
/* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
@@ -3979,7 +3984,7 @@ GetInodeNumberRetry:
struct file_internal_info *pfinfo;
/* BB Do we need a cast or hash here ? */
if (count < 8) {
- cFYI(1, ("Illegal size ret in QryIntrnlInf"));
+ cFYI(1, "Illegal size ret in QryIntrnlInf");
rc = -EIO;
goto GetInodeNumOut;
}
@@ -4020,16 +4025,16 @@ parse_DFS_referrals(TRANSACTION2_GET_DFS_REFER_RSP *pSMBr,
*num_of_nodes = le16_to_cpu(pSMBr->NumberOfReferrals);
if (*num_of_nodes < 1) {
- cERROR(1, ("num_referrals: must be at least > 0,"
- "but we get num_referrals = %d\n", *num_of_nodes));
+ cERROR(1, "num_referrals: must be at least > 0,"
+ "but we get num_referrals = %d\n", *num_of_nodes);
rc = -EINVAL;
goto parse_DFS_referrals_exit;
}
ref = (struct dfs_referral_level_3 *) &(pSMBr->referrals);
if (ref->VersionNumber != cpu_to_le16(3)) {
- cERROR(1, ("Referrals of V%d version are not supported,"
- "should be V3", le16_to_cpu(ref->VersionNumber)));
+ cERROR(1, "Referrals of V%d version are not supported,"
+ "should be V3", le16_to_cpu(ref->VersionNumber));
rc = -EINVAL;
goto parse_DFS_referrals_exit;
}
@@ -4038,14 +4043,14 @@ parse_DFS_referrals(TRANSACTION2_GET_DFS_REFER_RSP *pSMBr,
data_end = (char *)(&(pSMBr->PathConsumed)) +
le16_to_cpu(pSMBr->t2.DataCount);
- cFYI(1, ("num_referrals: %d dfs flags: 0x%x ... \n",
+ cFYI(1, "num_referrals: %d dfs flags: 0x%x ...\n",
*num_of_nodes,
- le32_to_cpu(pSMBr->DFSFlags)));
+ le32_to_cpu(pSMBr->DFSFlags));
*target_nodes = kzalloc(sizeof(struct dfs_info3_param) *
*num_of_nodes, GFP_KERNEL);
if (*target_nodes == NULL) {
- cERROR(1, ("Failed to allocate buffer for target_nodes\n"));
+ cERROR(1, "Failed to allocate buffer for target_nodes\n");
rc = -ENOMEM;
goto parse_DFS_referrals_exit;
}
@@ -4121,7 +4126,7 @@ CIFSGetDFSRefer(const int xid, struct cifsSesInfo *ses,
*num_of_nodes = 0;
*target_nodes = NULL;
- cFYI(1, ("In GetDFSRefer the path %s", searchName));
+ cFYI(1, "In GetDFSRefer the path %s", searchName);
if (ses == NULL)
return -ENODEV;
getDFSRetry:
@@ -4188,7 +4193,7 @@ getDFSRetry:
rc = SendReceive(xid, ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cFYI(1, ("Send error in GetDFSRefer = %d", rc));
+ cFYI(1, "Send error in GetDFSRefer = %d", rc);
goto GetDFSRefExit;
}
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
@@ -4199,9 +4204,9 @@ getDFSRetry:
goto GetDFSRefExit;
}
- cFYI(1, ("Decoding GetDFSRefer response BCC: %d Offset %d",
+ cFYI(1, "Decoding GetDFSRefer response BCC: %d Offset %d",
pSMBr->ByteCount,
- le16_to_cpu(pSMBr->t2.DataOffset)));
+ le16_to_cpu(pSMBr->t2.DataOffset));
/* parse returned result into more usable form */
rc = parse_DFS_referrals(pSMBr, num_of_nodes,
@@ -4229,7 +4234,7 @@ SMBOldQFSInfo(const int xid, struct cifsTconInfo *tcon, struct kstatfs *FSData)
int bytes_returned = 0;
__u16 params, byte_count;
- cFYI(1, ("OldQFSInfo"));
+ cFYI(1, "OldQFSInfo");
oldQFSInfoRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -4262,7 +4267,7 @@ oldQFSInfoRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cFYI(1, ("Send error in QFSInfo = %d", rc));
+ cFYI(1, "Send error in QFSInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
@@ -4270,8 +4275,8 @@ oldQFSInfoRetry:
rc = -EIO; /* bad smb */
else {
__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
- cFYI(1, ("qfsinf resp BCC: %d Offset %d",
- pSMBr->ByteCount, data_offset));
+ cFYI(1, "qfsinf resp BCC: %d Offset %d",
+ pSMBr->ByteCount, data_offset);
response_data = (FILE_SYSTEM_ALLOC_INFO *)
(((char *) &pSMBr->hdr.Protocol) + data_offset);
@@ -4283,11 +4288,10 @@ oldQFSInfoRetry:
le32_to_cpu(response_data->TotalAllocationUnits);
FSData->f_bfree = FSData->f_bavail =
le32_to_cpu(response_data->FreeAllocationUnits);
- cFYI(1,
- ("Blocks: %lld Free: %lld Block size %ld",
- (unsigned long long)FSData->f_blocks,
- (unsigned long long)FSData->f_bfree,
- FSData->f_bsize));
+ cFYI(1, "Blocks: %lld Free: %lld Block size %ld",
+ (unsigned long long)FSData->f_blocks,
+ (unsigned long long)FSData->f_bfree,
+ FSData->f_bsize);
}
}
cifs_buf_release(pSMB);
@@ -4309,7 +4313,7 @@ CIFSSMBQFSInfo(const int xid, struct cifsTconInfo *tcon, struct kstatfs *FSData)
int bytes_returned = 0;
__u16 params, byte_count;
- cFYI(1, ("In QFSInfo"));
+ cFYI(1, "In QFSInfo");
QFSInfoRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -4342,7 +4346,7 @@ QFSInfoRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cFYI(1, ("Send error in QFSInfo = %d", rc));
+ cFYI(1, "Send error in QFSInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
@@ -4363,11 +4367,10 @@ QFSInfoRetry:
le64_to_cpu(response_data->TotalAllocationUnits);
FSData->f_bfree = FSData->f_bavail =
le64_to_cpu(response_data->FreeAllocationUnits);
- cFYI(1,
- ("Blocks: %lld Free: %lld Block size %ld",
- (unsigned long long)FSData->f_blocks,
- (unsigned long long)FSData->f_bfree,
- FSData->f_bsize));
+ cFYI(1, "Blocks: %lld Free: %lld Block size %ld",
+ (unsigned long long)FSData->f_blocks,
+ (unsigned long long)FSData->f_bfree,
+ FSData->f_bsize);
}
}
cifs_buf_release(pSMB);
@@ -4389,7 +4392,7 @@ CIFSSMBQFSAttributeInfo(const int xid, struct cifsTconInfo *tcon)
int bytes_returned = 0;
__u16 params, byte_count;
- cFYI(1, ("In QFSAttributeInfo"));
+ cFYI(1, "In QFSAttributeInfo");
QFSAttributeRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -4423,7 +4426,7 @@ QFSAttributeRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cERROR(1, ("Send error in QFSAttributeInfo = %d", rc));
+ cERROR(1, "Send error in QFSAttributeInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
@@ -4459,7 +4462,7 @@ CIFSSMBQFSDeviceInfo(const int xid, struct cifsTconInfo *tcon)
int bytes_returned = 0;
__u16 params, byte_count;
- cFYI(1, ("In QFSDeviceInfo"));
+ cFYI(1, "In QFSDeviceInfo");
QFSDeviceRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -4494,7 +4497,7 @@ QFSDeviceRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cFYI(1, ("Send error in QFSDeviceInfo = %d", rc));
+ cFYI(1, "Send error in QFSDeviceInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
@@ -4529,7 +4532,7 @@ CIFSSMBQFSUnixInfo(const int xid, struct cifsTconInfo *tcon)
int bytes_returned = 0;
__u16 params, byte_count;
- cFYI(1, ("In QFSUnixInfo"));
+ cFYI(1, "In QFSUnixInfo");
QFSUnixRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -4563,7 +4566,7 @@ QFSUnixRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cERROR(1, ("Send error in QFSUnixInfo = %d", rc));
+ cERROR(1, "Send error in QFSUnixInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
@@ -4598,7 +4601,7 @@ CIFSSMBSetFSUnixInfo(const int xid, struct cifsTconInfo *tcon, __u64 cap)
int bytes_returned = 0;
__u16 params, param_offset, offset, byte_count;
- cFYI(1, ("In SETFSUnixInfo"));
+ cFYI(1, "In SETFSUnixInfo");
SETFSUnixRetry:
/* BB switch to small buf init to save memory */
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
@@ -4646,7 +4649,7 @@ SETFSUnixRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cERROR(1, ("Send error in SETFSUnixInfo = %d", rc));
+ cERROR(1, "Send error in SETFSUnixInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
if (rc)
@@ -4674,7 +4677,7 @@ CIFSSMBQFSPosixInfo(const int xid, struct cifsTconInfo *tcon,
int bytes_returned = 0;
__u16 params, byte_count;
- cFYI(1, ("In QFSPosixInfo"));
+ cFYI(1, "In QFSPosixInfo");
QFSPosixRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -4708,7 +4711,7 @@ QFSPosixRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cFYI(1, ("Send error in QFSUnixInfo = %d", rc));
+ cFYI(1, "Send error in QFSUnixInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
@@ -4768,7 +4771,7 @@ CIFSSMBSetEOF(const int xid, struct cifsTconInfo *tcon, const char *fileName,
int bytes_returned = 0;
__u16 params, byte_count, data_count, param_offset, offset;
- cFYI(1, ("In SetEOF"));
+ cFYI(1, "In SetEOF");
SetEOFRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -4834,7 +4837,7 @@ SetEOFRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc)
- cFYI(1, ("SetPathInfo (file size) returned %d", rc));
+ cFYI(1, "SetPathInfo (file size) returned %d", rc);
cifs_buf_release(pSMB);
@@ -4854,8 +4857,8 @@ CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, __u64 size,
int rc = 0;
__u16 params, param_offset, offset, byte_count, count;
- cFYI(1, ("SetFileSize (via SetFileInfo) %lld",
- (long long)size));
+ cFYI(1, "SetFileSize (via SetFileInfo) %lld",
+ (long long)size);
rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB);
if (rc)
@@ -4914,9 +4917,7 @@ CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, __u64 size,
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
if (rc) {
- cFYI(1,
- ("Send error in SetFileInfo (SetFileSize) = %d",
- rc));
+ cFYI(1, "Send error in SetFileInfo (SetFileSize) = %d", rc);
}
/* Note: On -EAGAIN error only caller can retry on handle based calls
@@ -4940,7 +4941,7 @@ CIFSSMBSetFileInfo(const int xid, struct cifsTconInfo *tcon,
int rc = 0;
__u16 params, param_offset, offset, byte_count, count;
- cFYI(1, ("Set Times (via SetFileInfo)"));
+ cFYI(1, "Set Times (via SetFileInfo)");
rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB);
if (rc)
@@ -4985,7 +4986,7 @@ CIFSSMBSetFileInfo(const int xid, struct cifsTconInfo *tcon,
memcpy(data_offset, data, sizeof(FILE_BASIC_INFO));
rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
if (rc)
- cFYI(1, ("Send error in Set Time (SetFileInfo) = %d", rc));
+ cFYI(1, "Send error in Set Time (SetFileInfo) = %d", rc);
/* Note: On -EAGAIN error only caller can retry on handle based calls
since file handle passed in no longer valid */
@@ -5002,7 +5003,7 @@ CIFSSMBSetFileDisposition(const int xid, struct cifsTconInfo *tcon,
int rc = 0;
__u16 params, param_offset, offset, byte_count, count;
- cFYI(1, ("Set File Disposition (via SetFileInfo)"));
+ cFYI(1, "Set File Disposition (via SetFileInfo)");
rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB);
if (rc)
@@ -5044,7 +5045,7 @@ CIFSSMBSetFileDisposition(const int xid, struct cifsTconInfo *tcon,
*data_offset = delete_file ? 1 : 0;
rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
if (rc)
- cFYI(1, ("Send error in SetFileDisposition = %d", rc));
+ cFYI(1, "Send error in SetFileDisposition = %d", rc);
return rc;
}
@@ -5062,7 +5063,7 @@ CIFSSMBSetPathInfo(const int xid, struct cifsTconInfo *tcon,
char *data_offset;
__u16 params, param_offset, offset, byte_count, count;
- cFYI(1, ("In SetTimes"));
+ cFYI(1, "In SetTimes");
SetTimesRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
@@ -5118,7 +5119,7 @@ SetTimesRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc)
- cFYI(1, ("SetPathInfo (times) returned %d", rc));
+ cFYI(1, "SetPathInfo (times) returned %d", rc);
cifs_buf_release(pSMB);
@@ -5143,7 +5144,7 @@ CIFSSMBSetAttrLegacy(int xid, struct cifsTconInfo *tcon, char *fileName,
int bytes_returned;
int name_len;
- cFYI(1, ("In SetAttrLegacy"));
+ cFYI(1, "In SetAttrLegacy");
SetAttrLgcyRetry:
rc = smb_init(SMB_COM_SETATTR, 8, tcon, (void **) &pSMB,
@@ -5169,7 +5170,7 @@ SetAttrLgcyRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc)
- cFYI(1, ("Error in LegacySetAttr = %d", rc));
+ cFYI(1, "Error in LegacySetAttr = %d", rc);
cifs_buf_release(pSMB);
@@ -5231,7 +5232,7 @@ CIFSSMBUnixSetFileInfo(const int xid, struct cifsTconInfo *tcon,
int rc = 0;
u16 params, param_offset, offset, byte_count, count;
- cFYI(1, ("Set Unix Info (via SetFileInfo)"));
+ cFYI(1, "Set Unix Info (via SetFileInfo)");
rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB);
if (rc)
@@ -5276,7 +5277,7 @@ CIFSSMBUnixSetFileInfo(const int xid, struct cifsTconInfo *tcon,
rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
if (rc)
- cFYI(1, ("Send error in Set Time (SetFileInfo) = %d", rc));
+ cFYI(1, "Send error in Set Time (SetFileInfo) = %d", rc);
/* Note: On -EAGAIN error only caller can retry on handle based calls
since file handle passed in no longer valid */
@@ -5297,7 +5298,7 @@ CIFSSMBUnixSetPathInfo(const int xid, struct cifsTconInfo *tcon, char *fileName,
FILE_UNIX_BASIC_INFO *data_offset;
__u16 params, param_offset, offset, count, byte_count;
- cFYI(1, ("In SetUID/GID/Mode"));
+ cFYI(1, "In SetUID/GID/Mode");
setPermsRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -5353,7 +5354,7 @@ setPermsRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc)
- cFYI(1, ("SetPathInfo (perms) returned %d", rc));
+ cFYI(1, "SetPathInfo (perms) returned %d", rc);
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
@@ -5372,7 +5373,7 @@ int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon,
struct dir_notify_req *dnotify_req;
int bytes_returned;
- cFYI(1, ("In CIFSSMBNotify for file handle %d", (int)netfid));
+ cFYI(1, "In CIFSSMBNotify for file handle %d", (int)netfid);
rc = smb_init(SMB_COM_NT_TRANSACT, 23, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
@@ -5406,7 +5407,7 @@ int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon,
(struct smb_hdr *)pSMBr, &bytes_returned,
CIFS_ASYNC_OP);
if (rc) {
- cFYI(1, ("Error in Notify = %d", rc));
+ cFYI(1, "Error in Notify = %d", rc);
} else {
/* Add file to outstanding requests */
/* BB change to kmem cache alloc */
@@ -5462,7 +5463,7 @@ CIFSSMBQAllEAs(const int xid, struct cifsTconInfo *tcon,
char *end_of_smb;
__u16 params, byte_count, data_offset;
- cFYI(1, ("In Query All EAs path %s", searchName));
+ cFYI(1, "In Query All EAs path %s", searchName);
QAllEAsRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -5509,7 +5510,7 @@ QAllEAsRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cFYI(1, ("Send error in QueryAllEAs = %d", rc));
+ cFYI(1, "Send error in QueryAllEAs = %d", rc);
goto QAllEAsOut;
}
@@ -5537,16 +5538,16 @@ QAllEAsRetry:
(((char *) &pSMBr->hdr.Protocol) + data_offset);
list_len = le32_to_cpu(ea_response_data->list_len);
- cFYI(1, ("ea length %d", list_len));
+ cFYI(1, "ea length %d", list_len);
if (list_len <= 8) {
- cFYI(1, ("empty EA list returned from server"));
+ cFYI(1, "empty EA list returned from server");
goto QAllEAsOut;
}
/* make sure list_len doesn't go past end of SMB */
end_of_smb = (char *)pByteArea(&pSMBr->hdr) + BCC(&pSMBr->hdr);
if ((char *)ea_response_data + list_len > end_of_smb) {
- cFYI(1, ("EA list appears to go beyond SMB"));
+ cFYI(1, "EA list appears to go beyond SMB");
rc = -EIO;
goto QAllEAsOut;
}
@@ -5563,7 +5564,7 @@ QAllEAsRetry:
temp_ptr += 4;
/* make sure we can read name_len and value_len */
if (list_len < 0) {
- cFYI(1, ("EA entry goes beyond length of list"));
+ cFYI(1, "EA entry goes beyond length of list");
rc = -EIO;
goto QAllEAsOut;
}
@@ -5572,7 +5573,7 @@ QAllEAsRetry:
value_len = le16_to_cpu(temp_fea->value_len);
list_len -= name_len + 1 + value_len;
if (list_len < 0) {
- cFYI(1, ("EA entry goes beyond length of list"));
+ cFYI(1, "EA entry goes beyond length of list");
rc = -EIO;
goto QAllEAsOut;
}
@@ -5639,7 +5640,7 @@ CIFSSMBSetEA(const int xid, struct cifsTconInfo *tcon, const char *fileName,
int bytes_returned = 0;
__u16 params, param_offset, byte_count, offset, count;
- cFYI(1, ("In SetEA"));
+ cFYI(1, "In SetEA");
SetEARetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -5721,7 +5722,7 @@ SetEARetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc)
- cFYI(1, ("SetPathInfo (EA) returned %d", rc));
+ cFYI(1, "SetPathInfo (EA) returned %d", rc);
cifs_buf_release(pSMB);
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index d9566bf..2208f06 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -102,6 +102,7 @@ struct smb_vol {
bool sockopt_tcp_nodelay:1;
unsigned short int port;
char *prepath;
+ struct nls_table *local_nls;
};
static int ipv4_connect(struct TCP_Server_Info *server);
@@ -135,7 +136,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
spin_unlock(&GlobalMid_Lock);
server->maxBuf = 0;
- cFYI(1, ("Reconnecting tcp session"));
+ cFYI(1, "Reconnecting tcp session");
/* before reconnecting the tcp session, mark the smb session (uid)
and the tid bad so they are not used until reconnected */
@@ -153,12 +154,12 @@ cifs_reconnect(struct TCP_Server_Info *server)
/* do not want to be sending data on a socket we are freeing */
mutex_lock(&server->srv_mutex);
if (server->ssocket) {
- cFYI(1, ("State: 0x%x Flags: 0x%lx", server->ssocket->state,
- server->ssocket->flags));
+ cFYI(1, "State: 0x%x Flags: 0x%lx", server->ssocket->state,
+ server->ssocket->flags);
kernel_sock_shutdown(server->ssocket, SHUT_WR);
- cFYI(1, ("Post shutdown state: 0x%x Flags: 0x%lx",
+ cFYI(1, "Post shutdown state: 0x%x Flags: 0x%lx",
server->ssocket->state,
- server->ssocket->flags));
+ server->ssocket->flags);
sock_release(server->ssocket);
server->ssocket = NULL;
}
@@ -187,7 +188,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
else
rc = ipv4_connect(server);
if (rc) {
- cFYI(1, ("reconnect error %d", rc));
+ cFYI(1, "reconnect error %d", rc);
msleep(3000);
} else {
atomic_inc(&tcpSesReconnectCount);
@@ -223,7 +224,7 @@ static int check2ndT2(struct smb_hdr *pSMB, unsigned int maxBufSize)
/* check for plausible wct, bcc and t2 data and parm sizes */
/* check for parm and data offset going beyond end of smb */
if (pSMB->WordCount != 10) { /* coalesce_t2 depends on this */
- cFYI(1, ("invalid transact2 word count"));
+ cFYI(1, "invalid transact2 word count");
return -EINVAL;
}
@@ -237,15 +238,15 @@ static int check2ndT2(struct smb_hdr *pSMB, unsigned int maxBufSize)
if (remaining == 0)
return 0;
else if (remaining < 0) {
- cFYI(1, ("total data %d smaller than data in frame %d",
- total_data_size, data_in_this_rsp));
+ cFYI(1, "total data %d smaller than data in frame %d",
+ total_data_size, data_in_this_rsp);
return -EINVAL;
} else {
- cFYI(1, ("missing %d bytes from transact2, check next response",
- remaining));
+ cFYI(1, "missing %d bytes from transact2, check next response",
+ remaining);
if (total_data_size > maxBufSize) {
- cERROR(1, ("TotalDataSize %d is over maximum buffer %d",
- total_data_size, maxBufSize));
+ cERROR(1, "TotalDataSize %d is over maximum buffer %d",
+ total_data_size, maxBufSize);
return -EINVAL;
}
return remaining;
@@ -267,7 +268,7 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB)
total_data_size = le16_to_cpu(pSMBt->t2_rsp.TotalDataCount);
if (total_data_size != le16_to_cpu(pSMB2->t2_rsp.TotalDataCount)) {
- cFYI(1, ("total data size of primary and secondary t2 differ"));
+ cFYI(1, "total data size of primary and secondary t2 differ");
}
total_in_buf = le16_to_cpu(pSMBt->t2_rsp.DataCount);
@@ -282,7 +283,7 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB)
total_in_buf2 = le16_to_cpu(pSMB2->t2_rsp.DataCount);
if (remaining < total_in_buf2) {
- cFYI(1, ("transact2 2nd response contains too much data"));
+ cFYI(1, "transact2 2nd response contains too much data");
}
/* find end of first SMB data area */
@@ -311,7 +312,7 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB)
pTargetSMB->smb_buf_length = byte_count;
if (remaining == total_in_buf2) {
- cFYI(1, ("found the last secondary response"));
+ cFYI(1, "found the last secondary response");
return 0; /* we are done */
} else /* more responses to go */
return 1;
@@ -339,7 +340,7 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
int reconnect;
current->flags |= PF_MEMALLOC;
- cFYI(1, ("Demultiplex PID: %d", task_pid_nr(current)));
+ cFYI(1, "Demultiplex PID: %d", task_pid_nr(current));
length = atomic_inc_return(&tcpSesAllocCount);
if (length > 1)
@@ -353,7 +354,7 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
if (bigbuf == NULL) {
bigbuf = cifs_buf_get();
if (!bigbuf) {
- cERROR(1, ("No memory for large SMB response"));
+ cERROR(1, "No memory for large SMB response");
msleep(3000);
/* retry will check if exiting */
continue;
@@ -366,7 +367,7 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
if (smallbuf == NULL) {
smallbuf = cifs_small_buf_get();
if (!smallbuf) {
- cERROR(1, ("No memory for SMB response"));
+ cERROR(1, "No memory for SMB response");
msleep(1000);
/* retry will check if exiting */
continue;
@@ -391,9 +392,9 @@ incomplete_rcv:
if (server->tcpStatus == CifsExiting) {
break;
} else if (server->tcpStatus == CifsNeedReconnect) {
- cFYI(1, ("Reconnect after server stopped responding"));
+ cFYI(1, "Reconnect after server stopped responding");
cifs_reconnect(server);
- cFYI(1, ("call to reconnect done"));
+ cFYI(1, "call to reconnect done");
csocket = server->ssocket;
continue;
} else if ((length == -ERESTARTSYS) || (length == -EAGAIN)) {
@@ -411,7 +412,7 @@ incomplete_rcv:
continue;
} else if (length <= 0) {
if (server->tcpStatus == CifsNew) {
- cFYI(1, ("tcp session abend after SMBnegprot"));
+ cFYI(1, "tcp session abend after SMBnegprot");
/* some servers kill the TCP session rather than
returning an SMB negprot error, in which
case reconnecting here is not going to help,
@@ -419,18 +420,18 @@ incomplete_rcv:
break;
}
if (!try_to_freeze() && (length == -EINTR)) {
- cFYI(1, ("cifsd thread killed"));
+ cFYI(1, "cifsd thread killed");
break;
}
- cFYI(1, ("Reconnect after unexpected peek error %d",
- length));
+ cFYI(1, "Reconnect after unexpected peek error %d",
+ length);
cifs_reconnect(server);
csocket = server->ssocket;
wake_up(&server->response_q);
continue;
} else if (length < pdu_length) {
- cFYI(1, ("requested %d bytes but only got %d bytes",
- pdu_length, length));
+ cFYI(1, "requested %d bytes but only got %d bytes",
+ pdu_length, length);
pdu_length -= length;
msleep(1);
goto incomplete_rcv;
@@ -450,18 +451,18 @@ incomplete_rcv:
pdu_length = be32_to_cpu((__force __be32)smb_buffer->smb_buf_length);
smb_buffer->smb_buf_length = pdu_length;
- cFYI(1, ("rfc1002 length 0x%x", pdu_length+4));
+ cFYI(1, "rfc1002 length 0x%x", pdu_length+4);
if (temp == (char) RFC1002_SESSION_KEEP_ALIVE) {
continue;
} else if (temp == (char)RFC1002_POSITIVE_SESSION_RESPONSE) {
- cFYI(1, ("Good RFC 1002 session rsp"));
+ cFYI(1, "Good RFC 1002 session rsp");
continue;
} else if (temp == (char)RFC1002_NEGATIVE_SESSION_RESPONSE) {
/* we get this from Windows 98 instead of
an error on SMB negprot response */
- cFYI(1, ("Negative RFC1002 Session Response Error 0x%x)",
- pdu_length));
+ cFYI(1, "Negative RFC1002 Session Response Error 0x%x)",
+ pdu_length);
if (server->tcpStatus == CifsNew) {
/* if nack on negprot (rather than
ret of smb negprot error) reconnecting
@@ -484,7 +485,7 @@ incomplete_rcv:
continue;
}
} else if (temp != (char) 0) {
- cERROR(1, ("Unknown RFC 1002 frame"));
+ cERROR(1, "Unknown RFC 1002 frame");
cifs_dump_mem(" Received Data: ", (char *)smb_buffer,
length);
cifs_reconnect(server);
@@ -495,8 +496,8 @@ incomplete_rcv:
/* else we have an SMB response */
if ((pdu_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) ||
(pdu_length < sizeof(struct smb_hdr) - 1 - 4)) {
- cERROR(1, ("Invalid size SMB length %d pdu_length %d",
- length, pdu_length+4));
+ cERROR(1, "Invalid size SMB length %d pdu_length %d",
+ length, pdu_length+4);
cifs_reconnect(server);
csocket = server->ssocket;
wake_up(&server->response_q);
@@ -539,8 +540,8 @@ incomplete_rcv:
length = 0;
continue;
} else if (length <= 0) {
- cERROR(1, ("Received no data, expecting %d",
- pdu_length - total_read));
+ cERROR(1, "Received no data, expecting %d",
+ pdu_length - total_read);
cifs_reconnect(server);
csocket = server->ssocket;
reconnect = 1;
@@ -588,7 +589,7 @@ incomplete_rcv:
}
} else {
if (!isLargeBuf) {
- cERROR(1,("1st trans2 resp needs bigbuf"));
+ cERROR(1, "1st trans2 resp needs bigbuf");
/* BB maybe we can fix this up, switch
to already allocated large buffer? */
} else {
@@ -630,8 +631,8 @@ multi_t2_fnd:
wake_up_process(task_to_wake);
} else if (!is_valid_oplock_break(smb_buffer, server) &&
!isMultiRsp) {
- cERROR(1, ("No task to wake, unknown frame received! "
- "NumMids %d", midCount.counter));
+ cERROR(1, "No task to wake, unknown frame received! "
+ "NumMids %d", midCount.counter);
cifs_dump_mem("Received Data is: ", (char *)smb_buffer,
sizeof(struct smb_hdr));
#ifdef CONFIG_CIFS_DEBUG2
@@ -708,8 +709,8 @@ multi_t2_fnd:
list_for_each(tmp, &server->pending_mid_q) {
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
if (mid_entry->midState == MID_REQUEST_SUBMITTED) {
- cFYI(1, ("Clearing Mid 0x%x - waking up ",
- mid_entry->mid));
+ cFYI(1, "Clearing Mid 0x%x - waking up ",
+ mid_entry->mid);
task_to_wake = mid_entry->tsk;
if (task_to_wake)
wake_up_process(task_to_wake);
@@ -728,7 +729,7 @@ multi_t2_fnd:
to wait at least 45 seconds before giving up
on a request getting a response and going ahead
and killing cifsd */
- cFYI(1, ("Wait for exit from demultiplex thread"));
+ cFYI(1, "Wait for exit from demultiplex thread");
msleep(46000);
/* if threads still have not exited they are probably never
coming home not much else we can do but free the memory */
@@ -849,7 +850,7 @@ cifs_parse_mount_options(char *options, const char *devname,
separator[0] = options[4];
options += 5;
} else {
- cFYI(1, ("Null separator not allowed"));
+ cFYI(1, "Null separator not allowed");
}
}
@@ -974,7 +975,7 @@ cifs_parse_mount_options(char *options, const char *devname,
}
} else if (strnicmp(data, "sec", 3) == 0) {
if (!value || !*value) {
- cERROR(1, ("no security value specified"));
+ cERROR(1, "no security value specified");
continue;
} else if (strnicmp(value, "krb5i", 5) == 0) {
vol->secFlg |= CIFSSEC_MAY_KRB5 |
@@ -982,7 +983,7 @@ cifs_parse_mount_options(char *options, const char *devname,
} else if (strnicmp(value, "krb5p", 5) == 0) {
/* vol->secFlg |= CIFSSEC_MUST_SEAL |
CIFSSEC_MAY_KRB5; */
- cERROR(1, ("Krb5 cifs privacy not supported"));
+ cERROR(1, "Krb5 cifs privacy not supported");
return 1;
} else if (strnicmp(value, "krb5", 4) == 0) {
vol->secFlg |= CIFSSEC_MAY_KRB5;
@@ -1014,7 +1015,7 @@ cifs_parse_mount_options(char *options, const char *devname,
} else if (strnicmp(value, "none", 4) == 0) {
vol->nullauth = 1;
} else {
- cERROR(1, ("bad security option: %s", value));
+ cERROR(1, "bad security option: %s", value);
return 1;
}
} else if ((strnicmp(data, "unc", 3) == 0)
@@ -1053,7 +1054,7 @@ cifs_parse_mount_options(char *options, const char *devname,
a domain name and need special handling? */
if (strnlen(value, 256) < 256) {
vol->domainname = value;
- cFYI(1, ("Domain name set"));
+ cFYI(1, "Domain name set");
} else {
printk(KERN_WARNING "CIFS: domain name too "
"long\n");
@@ -1076,7 +1077,7 @@ cifs_parse_mount_options(char *options, const char *devname,
strcpy(vol->prepath+1, value);
} else
strcpy(vol->prepath, value);
- cFYI(1, ("prefix path %s", vol->prepath));
+ cFYI(1, "prefix path %s", vol->prepath);
} else {
printk(KERN_WARNING "CIFS: prefix too long\n");
return 1;
@@ -1092,7 +1093,7 @@ cifs_parse_mount_options(char *options, const char *devname,
vol->iocharset = value;
/* if iocharset not set then load_nls_default
is used by caller */
- cFYI(1, ("iocharset set to %s", value));
+ cFYI(1, "iocharset set to %s", value);
} else {
printk(KERN_WARNING "CIFS: iocharset name "
"too long.\n");
@@ -1144,14 +1145,14 @@ cifs_parse_mount_options(char *options, const char *devname,
}
} else if (strnicmp(data, "sockopt", 5) == 0) {
if (!value || !*value) {
- cERROR(1, ("no socket option specified"));
+ cERROR(1, "no socket option specified");
continue;
} else if (strnicmp(value, "TCP_NODELAY", 11) == 0) {
vol->sockopt_tcp_nodelay = 1;
}
} else if (strnicmp(data, "netbiosname", 4) == 0) {
if (!value || !*value || (*value == ' ')) {
- cFYI(1, ("invalid (empty) netbiosname"));
+ cFYI(1, "invalid (empty) netbiosname");
} else {
memset(vol->source_rfc1001_name, 0x20, 15);
for (i = 0; i < 15; i++) {
@@ -1175,7 +1176,7 @@ cifs_parse_mount_options(char *options, const char *devname,
} else if (strnicmp(data, "servern", 7) == 0) {
/* servernetbiosname specified override *SMBSERVER */
if (!value || !*value || (*value == ' ')) {
- cFYI(1, ("empty server netbiosname specified"));
+ cFYI(1, "empty server netbiosname specified");
} else {
/* last byte, type, is 0x20 for servr type */
memset(vol->target_rfc1001_name, 0x20, 16);
@@ -1434,7 +1435,7 @@ cifs_find_tcp_session(struct sockaddr_storage *addr, unsigned short int port)
++server->srv_count;
write_unlock(&cifs_tcp_ses_lock);
- cFYI(1, ("Existing tcp session with server found"));
+ cFYI(1, "Existing tcp session with server found");
return server;
}
write_unlock(&cifs_tcp_ses_lock);
@@ -1475,7 +1476,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
memset(&addr, 0, sizeof(struct sockaddr_storage));
- cFYI(1, ("UNC: %s ip: %s", volume_info->UNC, volume_info->UNCip));
+ cFYI(1, "UNC: %s ip: %s", volume_info->UNC, volume_info->UNCip);
if (volume_info->UNCip && volume_info->UNC) {
rc = cifs_convert_address(volume_info->UNCip, &addr);
@@ -1487,13 +1488,12 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
} else if (volume_info->UNCip) {
/* BB using ip addr as tcp_ses name to connect to the
DFS root below */
- cERROR(1, ("Connecting to DFS root not implemented yet"));
+ cERROR(1, "Connecting to DFS root not implemented yet");
rc = -EINVAL;
goto out_err;
} else /* which tcp_sess DFS root would we conect to */ {
- cERROR(1,
- ("CIFS mount error: No UNC path (e.g. -o "
- "unc=//192.168.1.100/public) specified"));
+ cERROR(1, "CIFS mount error: No UNC path (e.g. -o "
+ "unc=//192.168.1.100/public) specified");
rc = -EINVAL;
goto out_err;
}
@@ -1540,7 +1540,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
++tcp_ses->srv_count;
if (addr.ss_family == AF_INET6) {
- cFYI(1, ("attempting ipv6 connect"));
+ cFYI(1, "attempting ipv6 connect");
/* BB should we allow ipv6 on port 139? */
/* other OS never observed in Wild doing 139 with v6 */
sin_server6->sin6_port = htons(volume_info->port);
@@ -1554,7 +1554,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
rc = ipv4_connect(tcp_ses);
}
if (rc < 0) {
- cERROR(1, ("Error connecting to socket. Aborting operation"));
+ cERROR(1, "Error connecting to socket. Aborting operation");
goto out_err;
}
@@ -1567,7 +1567,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
tcp_ses, "cifsd");
if (IS_ERR(tcp_ses->tsk)) {
rc = PTR_ERR(tcp_ses->tsk);
- cERROR(1, ("error %d create cifsd thread", rc));
+ cERROR(1, "error %d create cifsd thread", rc);
module_put(THIS_MODULE);
goto out_err;
}
@@ -1616,6 +1616,7 @@ cifs_put_smb_ses(struct cifsSesInfo *ses)
int xid;
struct TCP_Server_Info *server = ses->server;
+ cFYI(1, "%s: ses_count=%d\n", __func__, ses->ses_count);
write_lock(&cifs_tcp_ses_lock);
if (--ses->ses_count > 0) {
write_unlock(&cifs_tcp_ses_lock);
@@ -1634,6 +1635,102 @@ cifs_put_smb_ses(struct cifsSesInfo *ses)
cifs_put_tcp_session(server);
}
+static struct cifsSesInfo *
+cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
+{
+ int rc = -ENOMEM, xid;
+ struct cifsSesInfo *ses;
+
+ xid = GetXid();
+
+ ses = cifs_find_smb_ses(server, volume_info->username);
+ if (ses) {
+ cFYI(1, "Existing smb sess found (status=%d)", ses->status);
+
+ /* existing SMB ses has a server reference already */
+ cifs_put_tcp_session(server);
+
+ mutex_lock(&ses->session_mutex);
+ rc = cifs_negotiate_protocol(xid, ses);
+ if (rc) {
+ mutex_unlock(&ses->session_mutex);
+ /* problem -- put our ses reference */
+ cifs_put_smb_ses(ses);
+ FreeXid(xid);
+ return ERR_PTR(rc);
+ }
+ if (ses->need_reconnect) {
+ cFYI(1, "Session needs reconnect");
+ rc = cifs_setup_session(xid, ses,
+ volume_info->local_nls);
+ if (rc) {
+ mutex_unlock(&ses->session_mutex);
+ /* problem -- put our reference */
+ cifs_put_smb_ses(ses);
+ FreeXid(xid);
+ return ERR_PTR(rc);
+ }
+ }
+ mutex_unlock(&ses->session_mutex);
+ FreeXid(xid);
+ return ses;
+ }
+
+ cFYI(1, "Existing smb sess not found");
+ ses = sesInfoAlloc();
+ if (ses == NULL)
+ goto get_ses_fail;
+
+ /* new SMB session uses our server ref */
+ ses->server = server;
+ if (server->addr.sockAddr6.sin6_family == AF_INET6)
+ sprintf(ses->serverName, "%pI6",
+ &server->addr.sockAddr6.sin6_addr);
+ else
+ sprintf(ses->serverName, "%pI4",
+ &server->addr.sockAddr.sin_addr.s_addr);
+
+ if (volume_info->username)
+ strncpy(ses->userName, volume_info->username,
+ MAX_USERNAME_SIZE);
+
+ /* volume_info->password freed at unmount */
+ if (volume_info->password) {
+ ses->password = kstrdup(volume_info->password, GFP_KERNEL);
+ if (!ses->password)
+ goto get_ses_fail;
+ }
+ if (volume_info->domainname) {
+ int len = strlen(volume_info->domainname);
+ ses->domainName = kmalloc(len + 1, GFP_KERNEL);
+ if (ses->domainName)
+ strcpy(ses->domainName, volume_info->domainname);
+ }
+ ses->linux_uid = volume_info->linux_uid;
+ ses->overrideSecFlg = volume_info->secFlg;
+
+ mutex_lock(&ses->session_mutex);
+ rc = cifs_negotiate_protocol(xid, ses);
+ if (!rc)
+ rc = cifs_setup_session(xid, ses, volume_info->local_nls);
+ mutex_unlock(&ses->session_mutex);
+ if (rc)
+ goto get_ses_fail;
+
+ /* success, put it on the list */
+ write_lock(&cifs_tcp_ses_lock);
+ list_add(&ses->smb_ses_list, &server->smb_ses_list);
+ write_unlock(&cifs_tcp_ses_lock);
+
+ FreeXid(xid);
+ return ses;
+
+get_ses_fail:
+ sesInfoFree(ses);
+ FreeXid(xid);
+ return ERR_PTR(rc);
+}
+
static struct cifsTconInfo *
cifs_find_tcon(struct cifsSesInfo *ses, const char *unc)
{
@@ -1662,6 +1759,7 @@ cifs_put_tcon(struct cifsTconInfo *tcon)
int xid;
struct cifsSesInfo *ses = tcon->ses;
+ cFYI(1, "%s: tc_count=%d\n", __func__, tcon->tc_count);
write_lock(&cifs_tcp_ses_lock);
if (--tcon->tc_count > 0) {
write_unlock(&cifs_tcp_ses_lock);
@@ -1679,6 +1777,80 @@ cifs_put_tcon(struct cifsTconInfo *tcon)
cifs_put_smb_ses(ses);
}
+static struct cifsTconInfo *
+cifs_get_tcon(struct cifsSesInfo *ses, struct smb_vol *volume_info)
+{
+ int rc, xid;
+ struct cifsTconInfo *tcon;
+
+ tcon = cifs_find_tcon(ses, volume_info->UNC);
+ if (tcon) {
+ cFYI(1, "Found match on UNC path");
+ /* existing tcon already has a reference */
+ cifs_put_smb_ses(ses);
+ if (tcon->seal != volume_info->seal)
+ cERROR(1, "transport encryption setting "
+ "conflicts with existing tid");
+ return tcon;
+ }
+
+ tcon = tconInfoAlloc();
+ if (tcon == NULL) {
+ rc = -ENOMEM;
+ goto out_fail;
+ }
+
+ tcon->ses = ses;
+ if (volume_info->password) {
+ tcon->password = kstrdup(volume_info->password, GFP_KERNEL);
+ if (!tcon->password) {
+ rc = -ENOMEM;
+ goto out_fail;
+ }
+ }
+
+ if (strchr(volume_info->UNC + 3, '\\') == NULL
+ && strchr(volume_info->UNC + 3, '/') == NULL) {
+ cERROR(1, "Missing share name");
+ rc = -ENODEV;
+ goto out_fail;
+ }
+
+ /* BB Do we need to wrap session_mutex around
+ * this TCon call and Unix SetFS as
+ * we do on SessSetup and reconnect? */
+ xid = GetXid();
+ rc = CIFSTCon(xid, ses, volume_info->UNC, tcon, volume_info->local_nls);
+ FreeXid(xid);
+ cFYI(1, "CIFS Tcon rc = %d", rc);
+ if (rc)
+ goto out_fail;
+
+ if (volume_info->nodfs) {
+ tcon->Flags &= ~SMB_SHARE_IS_IN_DFS;
+ cFYI(1, "DFS disabled (%d)", tcon->Flags);
+ }
+ tcon->seal = volume_info->seal;
+ /* we can have only one retry value for a connection
+ to a share so for resources mounted more than once
+ to the same server share the last value passed in
+ for the retry flag is used */
+ tcon->retry = volume_info->retry;
+ tcon->nocase = volume_info->nocase;
+ tcon->local_lease = volume_info->local_lease;
+
+ write_lock(&cifs_tcp_ses_lock);
+ list_add(&tcon->tcon_list, &ses->tcon_list);
+ write_unlock(&cifs_tcp_ses_lock);
+
+ return tcon;
+
+out_fail:
+ tconInfoFree(tcon);
+ return ERR_PTR(rc);
+}
+
+
int
get_dfs_path(int xid, struct cifsSesInfo *pSesInfo, const char *old_path,
const struct nls_table *nls_codepage, unsigned int *pnum_referrals,
@@ -1703,8 +1875,7 @@ get_dfs_path(int xid, struct cifsSesInfo *pSesInfo, const char *old_path,
strcpy(temp_unc + 2, pSesInfo->serverName);
strcpy(temp_unc + 2 + strlen(pSesInfo->serverName), "\\IPC$");
rc = CIFSTCon(xid, pSesInfo, temp_unc, NULL, nls_codepage);
- cFYI(1,
- ("CIFS Tcon rc = %d ipc_tid = %d", rc, pSesInfo->ipc_tid));
+ cFYI(1, "CIFS Tcon rc = %d ipc_tid = %d", rc, pSesInfo->ipc_tid);
kfree(temp_unc);
}
if (rc == 0)
@@ -1777,12 +1948,12 @@ ipv4_connect(struct TCP_Server_Info *server)
rc = sock_create_kern(PF_INET, SOCK_STREAM,
IPPROTO_TCP, &socket);
if (rc < 0) {
- cERROR(1, ("Error %d creating socket", rc));
+ cERROR(1, "Error %d creating socket", rc);
return rc;
}
/* BB other socket options to set KEEPALIVE, NODELAY? */
- cFYI(1, ("Socket created"));
+ cFYI(1, "Socket created");
server->ssocket = socket;
socket->sk->sk_allocation = GFP_NOFS;
cifs_reclassify_socket4(socket);
@@ -1827,7 +1998,7 @@ ipv4_connect(struct TCP_Server_Info *server)
if (!connected) {
if (orig_port)
server->addr.sockAddr.sin_port = orig_port;
- cFYI(1, ("Error %d connecting to server via ipv4", rc));
+ cFYI(1, "Error %d connecting to server via ipv4", rc);
sock_release(socket);
server->ssocket = NULL;
return rc;
@@ -1855,12 +2026,12 @@ ipv4_connect(struct TCP_Server_Info *server)
rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY,
(char *)&val, sizeof(val));
if (rc)
- cFYI(1, ("set TCP_NODELAY socket option error %d", rc));
+ cFYI(1, "set TCP_NODELAY socket option error %d", rc);
}
- cFYI(1, ("sndbuf %d rcvbuf %d rcvtimeo 0x%lx",
+ cFYI(1, "sndbuf %d rcvbuf %d rcvtimeo 0x%lx",
socket->sk->sk_sndbuf,
- socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo));
+ socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo);
/* send RFC1001 sessinit */
if (server->addr.sockAddr.sin_port == htons(RFC1001_PORT)) {
@@ -1938,13 +2109,13 @@ ipv6_connect(struct TCP_Server_Info *server)
rc = sock_create_kern(PF_INET6, SOCK_STREAM,
IPPROTO_TCP, &socket);
if (rc < 0) {
- cERROR(1, ("Error %d creating ipv6 socket", rc));
+ cERROR(1, "Error %d creating ipv6 socket", rc);
socket = NULL;
return rc;
}
/* BB other socket options to set KEEPALIVE, NODELAY? */
- cFYI(1, ("ipv6 Socket created"));
+ cFYI(1, "ipv6 Socket created");
server->ssocket = socket;
socket->sk->sk_allocation = GFP_NOFS;
cifs_reclassify_socket6(socket);
@@ -1988,7 +2159,7 @@ ipv6_connect(struct TCP_Server_Info *server)
if (!connected) {
if (orig_port)
server->addr.sockAddr6.sin6_port = orig_port;
- cFYI(1, ("Error %d connecting to server via ipv6", rc));
+ cFYI(1, "Error %d connecting to server via ipv6", rc);
sock_release(socket);
server->ssocket = NULL;
return rc;
@@ -2007,7 +2178,7 @@ ipv6_connect(struct TCP_Server_Info *server)
rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY,
(char *)&val, sizeof(val));
if (rc)
- cFYI(1, ("set TCP_NODELAY socket option error %d", rc));
+ cFYI(1, "set TCP_NODELAY socket option error %d", rc);
}
server->ssocket = socket;
@@ -2032,13 +2203,13 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
if (vol_info && vol_info->no_linux_ext) {
tcon->fsUnixInfo.Capability = 0;
tcon->unix_ext = 0; /* Unix Extensions disabled */
- cFYI(1, ("Linux protocol extensions disabled"));
+ cFYI(1, "Linux protocol extensions disabled");
return;
} else if (vol_info)
tcon->unix_ext = 1; /* Unix Extensions supported */
if (tcon->unix_ext == 0) {
- cFYI(1, ("Unix extensions disabled so not set on reconnect"));
+ cFYI(1, "Unix extensions disabled so not set on reconnect");
return;
}
@@ -2054,12 +2225,11 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
if ((saved_cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) {
if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP)
- cERROR(1, ("POSIXPATH support change"));
+ cERROR(1, "POSIXPATH support change");
cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP;
} else if ((cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) {
- cERROR(1, ("possible reconnect error"));
- cERROR(1,
- ("server disabled POSIX path support"));
+ cERROR(1, "possible reconnect error");
+ cERROR(1, "server disabled POSIX path support");
}
}
@@ -2067,7 +2237,7 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
if (vol_info && vol_info->no_psx_acl)
cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
else if (CIFS_UNIX_POSIX_ACL_CAP & cap) {
- cFYI(1, ("negotiated posix acl support"));
+ cFYI(1, "negotiated posix acl support");
if (sb)
sb->s_flags |= MS_POSIXACL;
}
@@ -2075,7 +2245,7 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
if (vol_info && vol_info->posix_paths == 0)
cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP;
else if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) {
- cFYI(1, ("negotiate posix pathnames"));
+ cFYI(1, "negotiate posix pathnames");
if (sb)
CIFS_SB(sb)->mnt_cifs_flags |=
CIFS_MOUNT_POSIX_PATHS;
@@ -2090,39 +2260,38 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
if (sb && (CIFS_SB(sb)->rsize > 127 * 1024)) {
if ((cap & CIFS_UNIX_LARGE_READ_CAP) == 0) {
CIFS_SB(sb)->rsize = 127 * 1024;
- cFYI(DBG2,
- ("larger reads not supported by srv"));
+ cFYI(DBG2, "larger reads not supported by srv");
}
}
- cFYI(1, ("Negotiate caps 0x%x", (int)cap));
+ cFYI(1, "Negotiate caps 0x%x", (int)cap);
#ifdef CONFIG_CIFS_DEBUG2
if (cap & CIFS_UNIX_FCNTL_CAP)
- cFYI(1, ("FCNTL cap"));
+ cFYI(1, "FCNTL cap");
if (cap & CIFS_UNIX_EXTATTR_CAP)
- cFYI(1, ("EXTATTR cap"));
+ cFYI(1, "EXTATTR cap");
if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP)
- cFYI(1, ("POSIX path cap"));
+ cFYI(1, "POSIX path cap");
if (cap & CIFS_UNIX_XATTR_CAP)
- cFYI(1, ("XATTR cap"));
+ cFYI(1, "XATTR cap");
if (cap & CIFS_UNIX_POSIX_ACL_CAP)
- cFYI(1, ("POSIX ACL cap"));
+ cFYI(1, "POSIX ACL cap");
if (cap & CIFS_UNIX_LARGE_READ_CAP)
- cFYI(1, ("very large read cap"));
+ cFYI(1, "very large read cap");
if (cap & CIFS_UNIX_LARGE_WRITE_CAP)
- cFYI(1, ("very large write cap"));
+ cFYI(1, "very large write cap");
#endif /* CIFS_DEBUG2 */
if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) {
if (vol_info == NULL) {
- cFYI(1, ("resetting capabilities failed"));
+ cFYI(1, "resetting capabilities failed");
} else
- cERROR(1, ("Negotiating Unix capabilities "
+ cERROR(1, "Negotiating Unix capabilities "
"with the server failed. Consider "
"mounting with the Unix Extensions\n"
"disabled, if problems are found, "
"by specifying the nounix mount "
- "option."));
+ "option.");
}
}
@@ -2152,8 +2321,8 @@ static void setup_cifs_sb(struct smb_vol *pvolume_info,
struct cifs_sb_info *cifs_sb)
{
if (pvolume_info->rsize > CIFSMaxBufSize) {
- cERROR(1, ("rsize %d too large, using MaxBufSize",
- pvolume_info->rsize));
+ cERROR(1, "rsize %d too large, using MaxBufSize",
+ pvolume_info->rsize);
cifs_sb->rsize = CIFSMaxBufSize;
} else if ((pvolume_info->rsize) &&
(pvolume_info->rsize <= CIFSMaxBufSize))
@@ -2162,8 +2331,8 @@ static void setup_cifs_sb(struct smb_vol *pvolume_info,
cifs_sb->rsize = CIFSMaxBufSize;
if (pvolume_info->wsize > PAGEVEC_SIZE * PAGE_CACHE_SIZE) {
- cERROR(1, ("wsize %d too large, using 4096 instead",
- pvolume_info->wsize));
+ cERROR(1, "wsize %d too large, using 4096 instead",
+ pvolume_info->wsize);
cifs_sb->wsize = 4096;
} else if (pvolume_info->wsize)
cifs_sb->wsize = pvolume_info->wsize;
@@ -2181,7 +2350,7 @@ static void setup_cifs_sb(struct smb_vol *pvolume_info,
if (cifs_sb->rsize < 2048) {
cifs_sb->rsize = 2048;
/* Windows ME may prefer this */
- cFYI(1, ("readsize set to minimum: 2048"));
+ cFYI(1, "readsize set to minimum: 2048");
}
/* calculate prepath */
cifs_sb->prepath = pvolume_info->prepath;
@@ -2199,8 +2368,8 @@ static void setup_cifs_sb(struct smb_vol *pvolume_info,
cifs_sb->mnt_gid = pvolume_info->linux_gid;
cifs_sb->mnt_file_mode = pvolume_info->file_mode;
cifs_sb->mnt_dir_mode = pvolume_info->dir_mode;
- cFYI(1, ("file mode: 0x%x dir mode: 0x%x",
- cifs_sb->mnt_file_mode, cifs_sb->mnt_dir_mode));
+ cFYI(1, "file mode: 0x%x dir mode: 0x%x",
+ cifs_sb->mnt_file_mode, cifs_sb->mnt_dir_mode);
if (pvolume_info->noperm)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_PERM;
@@ -2229,13 +2398,13 @@ static void setup_cifs_sb(struct smb_vol *pvolume_info,
if (pvolume_info->dynperm)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DYNPERM;
if (pvolume_info->direct_io) {
- cFYI(1, ("mounting share using direct i/o"));
+ cFYI(1, "mounting share using direct i/o");
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DIRECT_IO;
}
if ((pvolume_info->cifs_acl) && (pvolume_info->dynperm))
- cERROR(1, ("mount option dynperm ignored if cifsacl "
- "mount option supported"));
+ cERROR(1, "mount option dynperm ignored if cifsacl "
+ "mount option supported");
}
static int
@@ -2262,7 +2431,7 @@ cleanup_volume_info(struct smb_vol **pvolume_info)
{
struct smb_vol *volume_info;
- if (!pvolume_info && !*pvolume_info)
+ if (!pvolume_info || !*pvolume_info)
return;
volume_info = *pvolume_info;
@@ -2344,11 +2513,11 @@ try_mount_again:
}
if (volume_info->nullauth) {
- cFYI(1, ("null user"));
+ cFYI(1, "null user");
volume_info->username = "";
} else if (volume_info->username) {
/* BB fixme parse for domain name here */
- cFYI(1, ("Username: %s", volume_info->username));
+ cFYI(1, "Username: %s", volume_info->username);
} else {
cifserror("No username specified");
/* In userspace mount helper we can get user name from alternate
@@ -2357,20 +2526,20 @@ try_mount_again:
goto out;
}
-
/* this is needed for ASCII cp to Unicode converts */
if (volume_info->iocharset == NULL) {
- cifs_sb->local_nls = load_nls_default();
- /* load_nls_default can not return null */
+ /* load_nls_default cannot return null */
+ volume_info->local_nls = load_nls_default();
} else {
- cifs_sb->local_nls = load_nls(volume_info->iocharset);
- if (cifs_sb->local_nls == NULL) {
- cERROR(1, ("CIFS mount error: iocharset %s not found",
- volume_info->iocharset));
+ volume_info->local_nls = load_nls(volume_info->iocharset);
+ if (volume_info->local_nls == NULL) {
+ cERROR(1, "CIFS mount error: iocharset %s not found",
+ volume_info->iocharset);
rc = -ELIBACC;
goto out;
}
}
+ cifs_sb->local_nls = volume_info->local_nls;
/* get a reference to a tcp session */
srvTcp = cifs_get_tcp_session(volume_info);
@@ -2379,148 +2548,30 @@ try_mount_again:
goto out;
}
- pSesInfo = cifs_find_smb_ses(srvTcp, volume_info->username);
- if (pSesInfo) {
- cFYI(1, ("Existing smb sess found (status=%d)",
- pSesInfo->status));
- /*
- * The existing SMB session already has a reference to srvTcp,
- * so we can put back the extra one we got before
- */
- cifs_put_tcp_session(srvTcp);
-
- mutex_lock(&pSesInfo->session_mutex);
- if (pSesInfo->need_reconnect) {
- cFYI(1, ("Session needs reconnect"));
- rc = cifs_setup_session(xid, pSesInfo,
- cifs_sb->local_nls);
- }
- mutex_unlock(&pSesInfo->session_mutex);
- } else if (!rc) {
- cFYI(1, ("Existing smb sess not found"));
- pSesInfo = sesInfoAlloc();
- if (pSesInfo == NULL) {
- rc = -ENOMEM;
- goto mount_fail_check;
- }
-
- /* new SMB session uses our srvTcp ref */
- pSesInfo->server = srvTcp;
- if (srvTcp->addr.sockAddr6.sin6_family == AF_INET6)
- sprintf(pSesInfo->serverName, "%pI6",
- &srvTcp->addr.sockAddr6.sin6_addr);
- else
- sprintf(pSesInfo->serverName, "%pI4",
- &srvTcp->addr.sockAddr.sin_addr.s_addr);
-
- write_lock(&cifs_tcp_ses_lock);
- list_add(&pSesInfo->smb_ses_list, &srvTcp->smb_ses_list);
- write_unlock(&cifs_tcp_ses_lock);
-
- /* volume_info->password freed at unmount */
- if (volume_info->password) {
- pSesInfo->password = kstrdup(volume_info->password,
- GFP_KERNEL);
- if (!pSesInfo->password) {
- rc = -ENOMEM;
- goto mount_fail_check;
- }
- }
- if (volume_info->username)
- strncpy(pSesInfo->userName, volume_info->username,
- MAX_USERNAME_SIZE);
- if (volume_info->domainname) {
- int len = strlen(volume_info->domainname);
- pSesInfo->domainName = kmalloc(len + 1, GFP_KERNEL);
- if (pSesInfo->domainName)
- strcpy(pSesInfo->domainName,
- volume_info->domainname);
- }
- pSesInfo->linux_uid = volume_info->linux_uid;
- pSesInfo->overrideSecFlg = volume_info->secFlg;
- mutex_lock(&pSesInfo->session_mutex);
-
- /* BB FIXME need to pass vol->secFlgs BB */
- rc = cifs_setup_session(xid, pSesInfo,
- cifs_sb->local_nls);
- mutex_unlock(&pSesInfo->session_mutex);
+ /* get a reference to a SMB session */
+ pSesInfo = cifs_get_smb_ses(srvTcp, volume_info);
+ if (IS_ERR(pSesInfo)) {
+ rc = PTR_ERR(pSesInfo);
+ pSesInfo = NULL;
+ goto mount_fail_check;
}
- /* search for existing tcon to this server share */
- if (!rc) {
- setup_cifs_sb(volume_info, cifs_sb);
-
- tcon = cifs_find_tcon(pSesInfo, volume_info->UNC);
- if (tcon) {
- cFYI(1, ("Found match on UNC path"));
- /* existing tcon already has a reference */
- cifs_put_smb_ses(pSesInfo);
- if (tcon->seal != volume_info->seal)
- cERROR(1, ("transport encryption setting "
- "conflicts with existing tid"));
- } else {
- tcon = tconInfoAlloc();
- if (tcon == NULL) {
- rc = -ENOMEM;
- goto mount_fail_check;
- }
-
- tcon->ses = pSesInfo;
- if (volume_info->password) {
- tcon->password = kstrdup(volume_info->password,
- GFP_KERNEL);
- if (!tcon->password) {
- rc = -ENOMEM;
- goto mount_fail_check;
- }
- }
-
- if ((strchr(volume_info->UNC + 3, '\\') == NULL)
- && (strchr(volume_info->UNC + 3, '/') == NULL)) {
- cERROR(1, ("Missing share name"));
- rc = -ENODEV;
- goto mount_fail_check;
- } else {
- /* BB Do we need to wrap sesSem around
- * this TCon call and Unix SetFS as
- * we do on SessSetup and reconnect? */
- rc = CIFSTCon(xid, pSesInfo, volume_info->UNC,
- tcon, cifs_sb->local_nls);
- cFYI(1, ("CIFS Tcon rc = %d", rc));
- if (volume_info->nodfs) {
- tcon->Flags &= ~SMB_SHARE_IS_IN_DFS;
- cFYI(1, ("DFS disabled (%d)",
- tcon->Flags));
- }
- }
- if (rc)
- goto remote_path_check;
- tcon->seal = volume_info->seal;
- write_lock(&cifs_tcp_ses_lock);
- list_add(&tcon->tcon_list, &pSesInfo->tcon_list);
- write_unlock(&cifs_tcp_ses_lock);
- }
-
- /* we can have only one retry value for a connection
- to a share so for resources mounted more than once
- to the same server share the last value passed in
- for the retry flag is used */
- tcon->retry = volume_info->retry;
- tcon->nocase = volume_info->nocase;
- tcon->local_lease = volume_info->local_lease;
- }
- if (pSesInfo) {
- if (pSesInfo->capabilities & CAP_LARGE_FILES)
- sb->s_maxbytes = MAX_LFS_FILESIZE;
- else
- sb->s_maxbytes = MAX_NON_LFS;
- }
+ setup_cifs_sb(volume_info, cifs_sb);
+ if (pSesInfo->capabilities & CAP_LARGE_FILES)
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
+ else
+ sb->s_maxbytes = MAX_NON_LFS;
/* BB FIXME fix time_gran to be larger for LANMAN sessions */
sb->s_time_gran = 100;
- if (rc)
+ /* search for existing tcon to this server share */
+ tcon = cifs_get_tcon(pSesInfo, volume_info);
+ if (IS_ERR(tcon)) {
+ rc = PTR_ERR(tcon);
+ tcon = NULL;
goto remote_path_check;
+ }
cifs_sb->tcon = tcon;
@@ -2544,7 +2595,7 @@ try_mount_again:
if ((tcon->unix_ext == 0) && (cifs_sb->rsize > (1024 * 127))) {
cifs_sb->rsize = 1024 * 127;
- cFYI(DBG2, ("no very large read support, rsize now 127K"));
+ cFYI(DBG2, "no very large read support, rsize now 127K");
}
if (!(tcon->ses->capabilities & CAP_LARGE_WRITE_X))
cifs_sb->wsize = min(cifs_sb->wsize,
@@ -2593,7 +2644,7 @@ remote_path_check:
goto mount_fail_check;
}
- cFYI(1, ("Getting referral for: %s", full_path));
+ cFYI(1, "Getting referral for: %s", full_path);
rc = get_dfs_path(xid, pSesInfo , full_path + 1,
cifs_sb->local_nls, &num_referrals, &referrals,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
@@ -2707,7 +2758,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
by Samba (not sure whether other servers allow
NTLMv2 password here) */
#ifdef CONFIG_CIFS_WEAK_PW_HASH
- if ((extended_security & CIFSSEC_MAY_LANMAN) &&
+ if ((global_secflags & CIFSSEC_MAY_LANMAN) &&
(ses->server->secType == LANMAN))
calc_lanman_hash(tcon->password, ses->server->cryptKey,
ses->server->secMode &
@@ -2778,13 +2829,13 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
if (length == 3) {
if ((bcc_ptr[0] == 'I') && (bcc_ptr[1] == 'P') &&
(bcc_ptr[2] == 'C')) {
- cFYI(1, ("IPC connection"));
+ cFYI(1, "IPC connection");
tcon->ipc = 1;
}
} else if (length == 2) {
if ((bcc_ptr[0] == 'A') && (bcc_ptr[1] == ':')) {
/* the most common case */
- cFYI(1, ("disk share connection"));
+ cFYI(1, "disk share connection");
}
}
bcc_ptr += length + 1;
@@ -2797,7 +2848,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
bytes_left, is_unicode,
nls_codepage);
- cFYI(1, ("nativeFileSystem=%s", tcon->nativeFileSystem));
+ cFYI(1, "nativeFileSystem=%s", tcon->nativeFileSystem);
if ((smb_buffer_response->WordCount == 3) ||
(smb_buffer_response->WordCount == 7))
@@ -2805,7 +2856,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
tcon->Flags = le16_to_cpu(pSMBr->OptionalSupport);
else
tcon->Flags = 0;
- cFYI(1, ("Tcon flags: 0x%x ", tcon->Flags));
+ cFYI(1, "Tcon flags: 0x%x ", tcon->Flags);
} else if ((rc == 0) && tcon == NULL) {
/* all we need to save for IPC$ connection */
ses->ipc_tid = smb_buffer_response->Tid;
@@ -2833,57 +2884,61 @@ cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb)
return rc;
}
-int cifs_setup_session(unsigned int xid, struct cifsSesInfo *pSesInfo,
- struct nls_table *nls_info)
+int cifs_negotiate_protocol(unsigned int xid, struct cifsSesInfo *ses)
{
int rc = 0;
- int first_time = 0;
- struct TCP_Server_Info *server = pSesInfo->server;
-
- /* what if server changes its buffer size after dropping the session? */
- if (server->maxBuf == 0) /* no need to send on reconnect */ {
- rc = CIFSSMBNegotiate(xid, pSesInfo);
- if (rc == -EAGAIN) {
- /* retry only once on 1st time connection */
- rc = CIFSSMBNegotiate(xid, pSesInfo);
- if (rc == -EAGAIN)
- rc = -EHOSTDOWN;
- }
- if (rc == 0) {
- spin_lock(&GlobalMid_Lock);
- if (server->tcpStatus != CifsExiting)
- server->tcpStatus = CifsGood;
- else
- rc = -EHOSTDOWN;
- spin_unlock(&GlobalMid_Lock);
+ struct TCP_Server_Info *server = ses->server;
+
+ /* only send once per connect */
+ if (server->maxBuf != 0)
+ return 0;
+
+ rc = CIFSSMBNegotiate(xid, ses);
+ if (rc == -EAGAIN) {
+ /* retry only once on 1st time connection */
+ rc = CIFSSMBNegotiate(xid, ses);
+ if (rc == -EAGAIN)
+ rc = -EHOSTDOWN;
+ }
+ if (rc == 0) {
+ spin_lock(&GlobalMid_Lock);
+ if (server->tcpStatus != CifsExiting)
+ server->tcpStatus = CifsGood;
+ else
+ rc = -EHOSTDOWN;
+ spin_unlock(&GlobalMid_Lock);
- }
- first_time = 1;
}
- if (rc)
- goto ss_err_exit;
+ return rc;
+}
+
+
+int cifs_setup_session(unsigned int xid, struct cifsSesInfo *ses,
+ struct nls_table *nls_info)
+{
+ int rc = 0;
+ struct TCP_Server_Info *server = ses->server;
- pSesInfo->flags = 0;
- pSesInfo->capabilities = server->capabilities;
+ ses->flags = 0;
+ ses->capabilities = server->capabilities;
if (linuxExtEnabled == 0)
- pSesInfo->capabilities &= (~CAP_UNIX);
+ ses->capabilities &= (~CAP_UNIX);
- cFYI(1, ("Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d",
- server->secMode, server->capabilities, server->timeAdj));
+ cFYI(1, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d",
+ server->secMode, server->capabilities, server->timeAdj);
- rc = CIFS_SessSetup(xid, pSesInfo, first_time, nls_info);
+ rc = CIFS_SessSetup(xid, ses, nls_info);
if (rc) {
- cERROR(1, ("Send error in SessSetup = %d", rc));
+ cERROR(1, "Send error in SessSetup = %d", rc);
} else {
- cFYI(1, ("CIFS Session Established successfully"));
+ cFYI(1, "CIFS Session Established successfully");
spin_lock(&GlobalMid_Lock);
- pSesInfo->status = CifsGood;
- pSesInfo->need_reconnect = false;
+ ses->status = CifsGood;
+ ses->need_reconnect = false;
spin_unlock(&GlobalMid_Lock);
}
-ss_err_exit:
return rc;
}
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index e9f7ecc..391816b 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -73,7 +73,7 @@ cifs_bp_rename_retry:
namelen += (1 + temp->d_name.len);
temp = temp->d_parent;
if (temp == NULL) {
- cERROR(1, ("corrupt dentry"));
+ cERROR(1, "corrupt dentry");
return NULL;
}
}
@@ -90,19 +90,18 @@ cifs_bp_rename_retry:
full_path[namelen] = dirsep;
strncpy(full_path + namelen + 1, temp->d_name.name,
temp->d_name.len);
- cFYI(0, ("name: %s", full_path + namelen));
+ cFYI(0, "name: %s", full_path + namelen);
}
temp = temp->d_parent;
if (temp == NULL) {
- cERROR(1, ("corrupt dentry"));
+ cERROR(1, "corrupt dentry");
kfree(full_path);
return NULL;
}
}
if (namelen != pplen + dfsplen) {
- cERROR(1,
- ("did not end path lookup where expected namelen is %d",
- namelen));
+ cERROR(1, "did not end path lookup where expected namelen is %d",
+ namelen);
/* presumably this is only possible if racing with a rename
of one of the parent directories (we can not lock the dentries
above us to prevent this, but retrying should be harmless) */
@@ -130,6 +129,12 @@ cifs_bp_rename_retry:
return full_path;
}
+/*
+ * When called with struct file pointer set to NULL, there is no way we could
+ * update file->private_data, but getting it stuck on openFileList provides a
+ * way to access it from cifs_fill_filedata and thereby set file->private_data
+ * from cifs_open.
+ */
struct cifsFileInfo *
cifs_new_fileinfo(struct inode *newinode, __u16 fileHandle,
struct file *file, struct vfsmount *mnt, unsigned int oflags)
@@ -173,7 +178,7 @@ cifs_new_fileinfo(struct inode *newinode, __u16 fileHandle,
if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
pCifsInode->clientCanCacheAll = true;
pCifsInode->clientCanCacheRead = true;
- cFYI(1, ("Exclusive Oplock inode %p", newinode));
+ cFYI(1, "Exclusive Oplock inode %p", newinode);
} else if ((oplock & 0xF) == OPLOCK_READ)
pCifsInode->clientCanCacheRead = true;
}
@@ -183,16 +188,17 @@ cifs_new_fileinfo(struct inode *newinode, __u16 fileHandle,
}
int cifs_posix_open(char *full_path, struct inode **pinode,
- struct vfsmount *mnt, int mode, int oflags,
- __u32 *poplock, __u16 *pnetfid, int xid)
+ struct vfsmount *mnt, struct super_block *sb,
+ int mode, int oflags,
+ __u32 *poplock, __u16 *pnetfid, int xid)
{
int rc;
FILE_UNIX_BASIC_INFO *presp_data;
__u32 posix_flags = 0;
- struct cifs_sb_info *cifs_sb = CIFS_SB(mnt->mnt_sb);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
struct cifs_fattr fattr;
- cFYI(1, ("posix open %s", full_path));
+ cFYI(1, "posix open %s", full_path);
presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
if (presp_data == NULL)
@@ -242,7 +248,8 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
/* get new inode and set it up */
if (*pinode == NULL) {
- *pinode = cifs_iget(mnt->mnt_sb, &fattr);
+ cifs_fill_uniqueid(sb, &fattr);
+ *pinode = cifs_iget(sb, &fattr);
if (!*pinode) {
rc = -ENOMEM;
goto posix_open_ret;
@@ -251,7 +258,18 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
cifs_fattr_to_inode(*pinode, &fattr);
}
- cifs_new_fileinfo(*pinode, *pnetfid, NULL, mnt, oflags);
+ /*
+ * cifs_fill_filedata() takes care of setting cifsFileInfo pointer to
+ * file->private_data.
+ */
+ if (mnt) {
+ struct cifsFileInfo *pfile_info;
+
+ pfile_info = cifs_new_fileinfo(*pinode, *pnetfid, NULL, mnt,
+ oflags);
+ if (pfile_info == NULL)
+ rc = -ENOMEM;
+ }
posix_open_ret:
kfree(presp_data);
@@ -315,13 +333,14 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
if (nd && (nd->flags & LOOKUP_OPEN))
oflags = nd->intent.open.flags;
else
- oflags = FMODE_READ;
+ oflags = FMODE_READ | SMB_O_CREAT;
if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
(CIFS_UNIX_POSIX_PATH_OPS_CAP &
le64_to_cpu(tcon->fsUnixInfo.Capability))) {
- rc = cifs_posix_open(full_path, &newinode, nd->path.mnt,
- mode, oflags, &oplock, &fileHandle, xid);
+ rc = cifs_posix_open(full_path, &newinode,
+ nd ? nd->path.mnt : NULL,
+ inode->i_sb, mode, oflags, &oplock, &fileHandle, xid);
/* EIO could indicate that (posix open) operation is not
supported, despite what server claimed in capability
negotation. EREMOTE indicates DFS junction, which is not
@@ -358,7 +377,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
else if ((oflags & O_CREAT) == O_CREAT)
disposition = FILE_OPEN_IF;
else
- cFYI(1, ("Create flag not set in create function"));
+ cFYI(1, "Create flag not set in create function");
}
/* BB add processing to set equivalent of mode - e.g. via CreateX with
@@ -394,7 +413,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
}
if (rc) {
- cFYI(1, ("cifs_create returned 0x%x", rc));
+ cFYI(1, "cifs_create returned 0x%x", rc);
goto cifs_create_out;
}
@@ -457,15 +476,22 @@ cifs_create_set_dentry:
if (rc == 0)
setup_cifs_dentry(tcon, direntry, newinode);
else
- cFYI(1, ("Create worked, get_inode_info failed rc = %d", rc));
+ cFYI(1, "Create worked, get_inode_info failed rc = %d", rc);
/* nfsd case - nfs srv does not set nd */
if ((nd == NULL) || (!(nd->flags & LOOKUP_OPEN))) {
/* mknod case - do not leave file open */
CIFSSMBClose(xid, tcon, fileHandle);
} else if (!(posix_create) && (newinode)) {
- cifs_new_fileinfo(newinode, fileHandle, NULL,
- nd->path.mnt, oflags);
+ struct cifsFileInfo *pfile_info;
+ /*
+ * cifs_fill_filedata() takes care of setting cifsFileInfo
+ * pointer to file->private_data.
+ */
+ pfile_info = cifs_new_fileinfo(newinode, fileHandle, NULL,
+ nd->path.mnt, oflags);
+ if (pfile_info == NULL)
+ rc = -ENOMEM;
}
cifs_create_out:
kfree(buf);
@@ -531,7 +557,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
u16 fileHandle;
FILE_ALL_INFO *buf;
- cFYI(1, ("sfu compat create special file"));
+ cFYI(1, "sfu compat create special file");
buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
if (buf == NULL) {
@@ -616,8 +642,8 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
xid = GetXid();
- cFYI(1, ("parent inode = 0x%p name is: %s and dentry = 0x%p",
- parent_dir_inode, direntry->d_name.name, direntry));
+ cFYI(1, "parent inode = 0x%p name is: %s and dentry = 0x%p",
+ parent_dir_inode, direntry->d_name.name, direntry);
/* check whether path exists */
@@ -632,7 +658,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
int i;
for (i = 0; i < direntry->d_name.len; i++)
if (direntry->d_name.name[i] == '\\') {
- cFYI(1, ("Invalid file name"));
+ cFYI(1, "Invalid file name");
FreeXid(xid);
return ERR_PTR(-EINVAL);
}
@@ -657,11 +683,11 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
}
if (direntry->d_inode != NULL) {
- cFYI(1, ("non-NULL inode in lookup"));
+ cFYI(1, "non-NULL inode in lookup");
} else {
- cFYI(1, ("NULL inode in lookup"));
+ cFYI(1, "NULL inode in lookup");
}
- cFYI(1, ("Full path: %s inode = 0x%p", full_path, direntry->d_inode));
+ cFYI(1, "Full path: %s inode = 0x%p", full_path, direntry->d_inode);
/* Posix open is only called (at lookup time) for file create now.
* For opens (rather than creates), because we do not know if it
@@ -678,6 +704,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
(nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open &&
(nd->intent.open.flags & O_CREAT)) {
rc = cifs_posix_open(full_path, &newInode, nd->path.mnt,
+ parent_dir_inode->i_sb,
nd->intent.open.create_mode,
nd->intent.open.flags, &oplock,
&fileHandle, xid);
@@ -723,7 +750,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
/* if it was once a directory (but how can we tell?) we could do
shrink_dcache_parent(direntry); */
} else if (rc != -EACCES) {
- cERROR(1, ("Unexpected lookup error %d", rc));
+ cERROR(1, "Unexpected lookup error %d", rc);
/* We special case check for Access Denied - since that
is a common return code */
}
@@ -742,8 +769,8 @@ cifs_d_revalidate(struct dentry *direntry, struct nameidata *nd)
if (cifs_revalidate_dentry(direntry))
return 0;
} else {
- cFYI(1, ("neg dentry 0x%p name = %s",
- direntry, direntry->d_name.name));
+ cFYI(1, "neg dentry 0x%p name = %s",
+ direntry, direntry->d_name.name);
if (time_after(jiffies, direntry->d_time + HZ) ||
!lookupCacheEnabled) {
d_drop(direntry);
@@ -758,7 +785,7 @@ cifs_d_revalidate(struct dentry *direntry, struct nameidata *nd)
{
int rc = 0;
- cFYI(1, ("In cifs d_delete, name = %s", direntry->d_name.name));
+ cFYI(1, "In cifs d_delete, name = %s", direntry->d_name.name);
return rc;
} */
diff --git a/fs/cifs/dns_resolve.c b/fs/cifs/dns_resolve.c
index 6f8a0e3..4db2c5e 100644
--- a/fs/cifs/dns_resolve.c
+++ b/fs/cifs/dns_resolve.c
@@ -106,14 +106,14 @@ dns_resolve_server_name_to_ip(const char *unc, char **ip_addr)
/* search for server name delimiter */
len = strlen(unc);
if (len < 3) {
- cFYI(1, ("%s: unc is too short: %s", __func__, unc));
+ cFYI(1, "%s: unc is too short: %s", __func__, unc);
return -EINVAL;
}
len -= 2;
name = memchr(unc+2, '\\', len);
if (!name) {
- cFYI(1, ("%s: probably server name is whole unc: %s",
- __func__, unc));
+ cFYI(1, "%s: probably server name is whole unc: %s",
+ __func__, unc);
} else {
len = (name - unc) - 2/* leading // */;
}
@@ -127,8 +127,8 @@ dns_resolve_server_name_to_ip(const char *unc, char **ip_addr)
name[len] = 0;
if (is_ip(name)) {
- cFYI(1, ("%s: it is IP, skipping dns upcall: %s",
- __func__, name));
+ cFYI(1, "%s: it is IP, skipping dns upcall: %s",
+ __func__, name);
data = name;
goto skip_upcall;
}
@@ -138,7 +138,7 @@ dns_resolve_server_name_to_ip(const char *unc, char **ip_addr)
len = rkey->type_data.x[0];
data = rkey->payload.data;
} else {
- cERROR(1, ("%s: unable to resolve: %s", __func__, name));
+ cERROR(1, "%s: unable to resolve: %s", __func__, name);
goto out;
}
@@ -148,10 +148,10 @@ skip_upcall:
if (*ip_addr) {
memcpy(*ip_addr, data, len + 1);
if (!IS_ERR(rkey))
- cFYI(1, ("%s: resolved: %s to %s", __func__,
+ cFYI(1, "%s: resolved: %s to %s", __func__,
name,
*ip_addr
- ));
+ );
rc = 0;
} else {
rc = -ENOMEM;
diff --git a/fs/cifs/export.c b/fs/cifs/export.c
index 6177f7c..993f820 100644
--- a/fs/cifs/export.c
+++ b/fs/cifs/export.c
@@ -49,7 +49,7 @@
static struct dentry *cifs_get_parent(struct dentry *dentry)
{
/* BB need to add code here eventually to enable export via NFSD */
- cFYI(1, ("get parent for %p", dentry));
+ cFYI(1, "get parent for %p", dentry);
return ERR_PTR(-EACCES);
}
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 9b11a8f..a83541e 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -3,7 +3,7 @@
*
* vfs operations that deal with files
*
- * Copyright (C) International Business Machines Corp., 2002,2007
+ * Copyright (C) International Business Machines Corp., 2002,2010
* Author(s): Steve French (sfrench@us.ibm.com)
* Jeremy Allison (jra@samba.org)
*
@@ -108,8 +108,7 @@ static inline int cifs_get_disposition(unsigned int flags)
/* all arguments to this function must be checked for validity in caller */
static inline int
cifs_posix_open_inode_helper(struct inode *inode, struct file *file,
- struct cifsInodeInfo *pCifsInode,
- struct cifsFileInfo *pCifsFile, __u32 oplock,
+ struct cifsInodeInfo *pCifsInode, __u32 oplock,
u16 netfid)
{
@@ -136,15 +135,15 @@ cifs_posix_open_inode_helper(struct inode *inode, struct file *file,
if (timespec_equal(&file->f_path.dentry->d_inode->i_mtime, &temp) &&
(file->f_path.dentry->d_inode->i_size ==
(loff_t)le64_to_cpu(buf->EndOfFile))) {
- cFYI(1, ("inode unchanged on server"));
+ cFYI(1, "inode unchanged on server");
} else {
if (file->f_path.dentry->d_inode->i_mapping) {
rc = filemap_write_and_wait(file->f_path.dentry->d_inode->i_mapping);
if (rc != 0)
CIFS_I(file->f_path.dentry->d_inode)->write_behind_rc = rc;
}
- cFYI(1, ("invalidating remote inode since open detected it "
- "changed"));
+ cFYI(1, "invalidating remote inode since open detected it "
+ "changed");
invalidate_remote_inode(file->f_path.dentry->d_inode);
} */
@@ -152,8 +151,8 @@ psx_client_can_cache:
if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
pCifsInode->clientCanCacheAll = true;
pCifsInode->clientCanCacheRead = true;
- cFYI(1, ("Exclusive Oplock granted on inode %p",
- file->f_path.dentry->d_inode));
+ cFYI(1, "Exclusive Oplock granted on inode %p",
+ file->f_path.dentry->d_inode);
} else if ((oplock & 0xF) == OPLOCK_READ)
pCifsInode->clientCanCacheRead = true;
@@ -190,8 +189,8 @@ cifs_fill_filedata(struct file *file)
if (file->private_data != NULL) {
return pCifsFile;
} else if ((file->f_flags & O_CREAT) && (file->f_flags & O_EXCL))
- cERROR(1, ("could not find file instance for "
- "new file %p", file));
+ cERROR(1, "could not find file instance for "
+ "new file %p", file);
return NULL;
}
@@ -217,7 +216,7 @@ static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
if (timespec_equal(&file->f_path.dentry->d_inode->i_mtime, &temp) &&
(file->f_path.dentry->d_inode->i_size ==
(loff_t)le64_to_cpu(buf->EndOfFile))) {
- cFYI(1, ("inode unchanged on server"));
+ cFYI(1, "inode unchanged on server");
} else {
if (file->f_path.dentry->d_inode->i_mapping) {
/* BB no need to lock inode until after invalidate
@@ -226,8 +225,8 @@ static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
if (rc != 0)
CIFS_I(file->f_path.dentry->d_inode)->write_behind_rc = rc;
}
- cFYI(1, ("invalidating remote inode since open detected it "
- "changed"));
+ cFYI(1, "invalidating remote inode since open detected it "
+ "changed");
invalidate_remote_inode(file->f_path.dentry->d_inode);
}
@@ -242,8 +241,8 @@ client_can_cache:
if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) {
pCifsInode->clientCanCacheAll = true;
pCifsInode->clientCanCacheRead = true;
- cFYI(1, ("Exclusive Oplock granted on inode %p",
- file->f_path.dentry->d_inode));
+ cFYI(1, "Exclusive Oplock granted on inode %p",
+ file->f_path.dentry->d_inode);
} else if ((*oplock & 0xF) == OPLOCK_READ)
pCifsInode->clientCanCacheRead = true;
@@ -285,8 +284,8 @@ int cifs_open(struct inode *inode, struct file *file)
return rc;
}
- cFYI(1, ("inode = 0x%p file flags are 0x%x for %s",
- inode, file->f_flags, full_path));
+ cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
+ inode, file->f_flags, full_path);
if (oplockEnabled)
oplock = REQ_OPLOCK;
@@ -298,27 +297,29 @@ int cifs_open(struct inode *inode, struct file *file)
(CIFS_UNIX_POSIX_PATH_OPS_CAP &
le64_to_cpu(tcon->fsUnixInfo.Capability))) {
int oflags = (int) cifs_posix_convert_flags(file->f_flags);
+ oflags |= SMB_O_CREAT;
/* can not refresh inode info since size could be stale */
rc = cifs_posix_open(full_path, &inode, file->f_path.mnt,
- cifs_sb->mnt_file_mode /* ignored */,
- oflags, &oplock, &netfid, xid);
+ inode->i_sb,
+ cifs_sb->mnt_file_mode /* ignored */,
+ oflags, &oplock, &netfid, xid);
if (rc == 0) {
- cFYI(1, ("posix open succeeded"));
+ cFYI(1, "posix open succeeded");
/* no need for special case handling of setting mode
on read only files needed here */
pCifsFile = cifs_fill_filedata(file);
cifs_posix_open_inode_helper(inode, file, pCifsInode,
- pCifsFile, oplock, netfid);
+ oplock, netfid);
goto out;
} else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
if (tcon->ses->serverNOS)
- cERROR(1, ("server %s of type %s returned"
+ cERROR(1, "server %s of type %s returned"
" unexpected error on SMB posix open"
", disabling posix open support."
" Check if server update available.",
tcon->ses->serverName,
- tcon->ses->serverNOS));
+ tcon->ses->serverNOS);
tcon->broken_posix_open = true;
} else if ((rc != -EIO) && (rc != -EREMOTE) &&
(rc != -EOPNOTSUPP)) /* path not found or net err */
@@ -386,7 +387,7 @@ int cifs_open(struct inode *inode, struct file *file)
& CIFS_MOUNT_MAP_SPECIAL_CHR);
}
if (rc) {
- cFYI(1, ("cifs_open returned 0x%x", rc));
+ cFYI(1, "cifs_open returned 0x%x", rc);
goto out;
}
@@ -469,7 +470,7 @@ static int cifs_reopen_file(struct file *file, bool can_flush)
}
if (file->f_path.dentry == NULL) {
- cERROR(1, ("no valid name if dentry freed"));
+ cERROR(1, "no valid name if dentry freed");
dump_stack();
rc = -EBADF;
goto reopen_error_exit;
@@ -477,7 +478,7 @@ static int cifs_reopen_file(struct file *file, bool can_flush)
inode = file->f_path.dentry->d_inode;
if (inode == NULL) {
- cERROR(1, ("inode not valid"));
+ cERROR(1, "inode not valid");
dump_stack();
rc = -EBADF;
goto reopen_error_exit;
@@ -499,8 +500,8 @@ reopen_error_exit:
return rc;
}
- cFYI(1, ("inode = 0x%p file flags 0x%x for %s",
- inode, file->f_flags, full_path));
+ cFYI(1, "inode = 0x%p file flags 0x%x for %s",
+ inode, file->f_flags, full_path);
if (oplockEnabled)
oplock = REQ_OPLOCK;
@@ -513,10 +514,11 @@ reopen_error_exit:
int oflags = (int) cifs_posix_convert_flags(file->f_flags);
/* can not refresh inode info since size could be stale */
rc = cifs_posix_open(full_path, NULL, file->f_path.mnt,
- cifs_sb->mnt_file_mode /* ignored */,
- oflags, &oplock, &netfid, xid);
+ inode->i_sb,
+ cifs_sb->mnt_file_mode /* ignored */,
+ oflags, &oplock, &netfid, xid);
if (rc == 0) {
- cFYI(1, ("posix reopen succeeded"));
+ cFYI(1, "posix reopen succeeded");
goto reopen_success;
}
/* fallthrough to retry open the old way on errors, especially
@@ -537,8 +539,8 @@ reopen_error_exit:
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc) {
mutex_unlock(&pCifsFile->fh_mutex);
- cFYI(1, ("cifs_open returned 0x%x", rc));
- cFYI(1, ("oplock: %d", oplock));
+ cFYI(1, "cifs_open returned 0x%x", rc);
+ cFYI(1, "oplock: %d", oplock);
} else {
reopen_success:
pCifsFile->netfid = netfid;
@@ -570,8 +572,8 @@ reopen_success:
if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
pCifsInode->clientCanCacheAll = true;
pCifsInode->clientCanCacheRead = true;
- cFYI(1, ("Exclusive Oplock granted on inode %p",
- file->f_path.dentry->d_inode));
+ cFYI(1, "Exclusive Oplock granted on inode %p",
+ file->f_path.dentry->d_inode);
} else if ((oplock & 0xF) == OPLOCK_READ) {
pCifsInode->clientCanCacheRead = true;
pCifsInode->clientCanCacheAll = false;
@@ -619,8 +621,7 @@ int cifs_close(struct inode *inode, struct file *file)
the struct would be in each open file,
but this should give enough time to
clear the socket */
- cFYI(DBG2,
- ("close delay, write pending"));
+ cFYI(DBG2, "close delay, write pending");
msleep(timeout);
timeout *= 4;
}
@@ -653,7 +654,7 @@ int cifs_close(struct inode *inode, struct file *file)
read_lock(&GlobalSMBSeslock);
if (list_empty(&(CIFS_I(inode)->openFileList))) {
- cFYI(1, ("closing last open instance for inode %p", inode));
+ cFYI(1, "closing last open instance for inode %p", inode);
/* if the file is not open we do not know if we can cache info
on this inode, much less write behind and read ahead */
CIFS_I(inode)->clientCanCacheRead = false;
@@ -674,7 +675,7 @@ int cifs_closedir(struct inode *inode, struct file *file)
(struct cifsFileInfo *)file->private_data;
char *ptmp;
- cFYI(1, ("Closedir inode = 0x%p", inode));
+ cFYI(1, "Closedir inode = 0x%p", inode);
xid = GetXid();
@@ -685,22 +686,22 @@ int cifs_closedir(struct inode *inode, struct file *file)
pTcon = cifs_sb->tcon;
- cFYI(1, ("Freeing private data in close dir"));
+ cFYI(1, "Freeing private data in close dir");
write_lock(&GlobalSMBSeslock);
if (!pCFileStruct->srch_inf.endOfSearch &&
!pCFileStruct->invalidHandle) {
pCFileStruct->invalidHandle = true;
write_unlock(&GlobalSMBSeslock);
rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
- cFYI(1, ("Closing uncompleted readdir with rc %d",
- rc));
+ cFYI(1, "Closing uncompleted readdir with rc %d",
+ rc);
/* not much we can do if it fails anyway, ignore rc */
rc = 0;
} else
write_unlock(&GlobalSMBSeslock);
ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
if (ptmp) {
- cFYI(1, ("closedir free smb buf in srch struct"));
+ cFYI(1, "closedir free smb buf in srch struct");
pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
if (pCFileStruct->srch_inf.smallBuf)
cifs_small_buf_release(ptmp);
@@ -748,49 +749,49 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
rc = -EACCES;
xid = GetXid();
- cFYI(1, ("Lock parm: 0x%x flockflags: "
+ cFYI(1, "Lock parm: 0x%x flockflags: "
"0x%x flocktype: 0x%x start: %lld end: %lld",
cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
- pfLock->fl_end));
+ pfLock->fl_end);
if (pfLock->fl_flags & FL_POSIX)
- cFYI(1, ("Posix"));
+ cFYI(1, "Posix");
if (pfLock->fl_flags & FL_FLOCK)
- cFYI(1, ("Flock"));
+ cFYI(1, "Flock");
if (pfLock->fl_flags & FL_SLEEP) {
- cFYI(1, ("Blocking lock"));
+ cFYI(1, "Blocking lock");
wait_flag = true;
}
if (pfLock->fl_flags & FL_ACCESS)
- cFYI(1, ("Process suspended by mandatory locking - "
- "not implemented yet"));
+ cFYI(1, "Process suspended by mandatory locking - "
+ "not implemented yet");
if (pfLock->fl_flags & FL_LEASE)
- cFYI(1, ("Lease on file - not implemented yet"));
+ cFYI(1, "Lease on file - not implemented yet");
if (pfLock->fl_flags &
(~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
- cFYI(1, ("Unknown lock flags 0x%x", pfLock->fl_flags));
+ cFYI(1, "Unknown lock flags 0x%x", pfLock->fl_flags);
if (pfLock->fl_type == F_WRLCK) {
- cFYI(1, ("F_WRLCK "));
+ cFYI(1, "F_WRLCK ");
numLock = 1;
} else if (pfLock->fl_type == F_UNLCK) {
- cFYI(1, ("F_UNLCK"));
+ cFYI(1, "F_UNLCK");
numUnlock = 1;
/* Check if unlock includes more than
one lock range */
} else if (pfLock->fl_type == F_RDLCK) {
- cFYI(1, ("F_RDLCK"));
+ cFYI(1, "F_RDLCK");
lockType |= LOCKING_ANDX_SHARED_LOCK;
numLock = 1;
} else if (pfLock->fl_type == F_EXLCK) {
- cFYI(1, ("F_EXLCK"));
+ cFYI(1, "F_EXLCK");
numLock = 1;
} else if (pfLock->fl_type == F_SHLCK) {
- cFYI(1, ("F_SHLCK"));
+ cFYI(1, "F_SHLCK");
lockType |= LOCKING_ANDX_SHARED_LOCK;
numLock = 1;
} else
- cFYI(1, ("Unknown type of lock"));
+ cFYI(1, "Unknown type of lock");
cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
tcon = cifs_sb->tcon;
@@ -833,8 +834,8 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
0 /* wait flag */ );
pfLock->fl_type = F_UNLCK;
if (rc != 0)
- cERROR(1, ("Error unlocking previously locked "
- "range %d during test of lock", rc));
+ cERROR(1, "Error unlocking previously locked "
+ "range %d during test of lock", rc);
rc = 0;
} else {
@@ -856,9 +857,9 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
0 /* wait flag */);
pfLock->fl_type = F_RDLCK;
if (rc != 0)
- cERROR(1, ("Error unlocking "
+ cERROR(1, "Error unlocking "
"previously locked range %d "
- "during test of lock", rc));
+ "during test of lock", rc);
rc = 0;
} else {
pfLock->fl_type = F_WRLCK;
@@ -923,9 +924,10 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
1, 0, li->type, false);
if (stored_rc)
rc = stored_rc;
-
- list_del(&li->llist);
- kfree(li);
+ else {
+ list_del(&li->llist);
+ kfree(li);
+ }
}
}
mutex_unlock(&fid->lock_mutex);
@@ -988,9 +990,8 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data,
pTcon = cifs_sb->tcon;
- /* cFYI(1,
- (" write %d bytes to offset %lld of %s", write_size,
- *poffset, file->f_path.dentry->d_name.name)); */
+ /* cFYI(1, " write %d bytes to offset %lld of %s", write_size,
+ *poffset, file->f_path.dentry->d_name.name); */
if (file->private_data == NULL)
return -EBADF;
@@ -1091,8 +1092,8 @@ static ssize_t cifs_write(struct file *file, const char *write_data,
pTcon = cifs_sb->tcon;
- cFYI(1, ("write %zd bytes to offset %lld of %s", write_size,
- *poffset, file->f_path.dentry->d_name.name));
+ cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
+ *poffset, file->f_path.dentry->d_name.name);
if (file->private_data == NULL)
return -EBADF;
@@ -1233,7 +1234,7 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode)
it being zero) during stress testcases so we need to check for it */
if (cifs_inode == NULL) {
- cERROR(1, ("Null inode passed to cifs_writeable_file"));
+ cERROR(1, "Null inode passed to cifs_writeable_file");
dump_stack();
return NULL;
}
@@ -1277,7 +1278,7 @@ refind_writable:
again. Note that it would be bad
to hold up writepages here (rather than
in caller) with continuous retries */
- cFYI(1, ("wp failed on reopen file"));
+ cFYI(1, "wp failed on reopen file");
read_lock(&GlobalSMBSeslock);
/* can not use this handle, no write
pending on this one after all */
@@ -1353,7 +1354,7 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
else if (bytes_written < 0)
rc = bytes_written;
} else {
- cFYI(1, ("No writeable filehandles for inode"));
+ cFYI(1, "No writeable filehandles for inode");
rc = -EIO;
}
@@ -1525,7 +1526,7 @@ retry:
*/
open_file = find_writable_file(CIFS_I(mapping->host));
if (!open_file) {
- cERROR(1, ("No writable handles for inode"));
+ cERROR(1, "No writable handles for inode");
rc = -EBADF;
} else {
long_op = cifs_write_timeout(cifsi, offset);
@@ -1538,8 +1539,8 @@ retry:
cifs_update_eof(cifsi, offset, bytes_written);
if (rc || bytes_written < bytes_to_write) {
- cERROR(1, ("Write2 ret %d, wrote %d",
- rc, bytes_written));
+ cERROR(1, "Write2 ret %d, wrote %d",
+ rc, bytes_written);
/* BB what if continued retry is
requested via mount flags? */
if (rc == -ENOSPC)
@@ -1600,7 +1601,7 @@ static int cifs_writepage(struct page *page, struct writeback_control *wbc)
/* BB add check for wbc flags */
page_cache_get(page);
if (!PageUptodate(page))
- cFYI(1, ("ppw - page not up to date"));
+ cFYI(1, "ppw - page not up to date");
/*
* Set the "writeback" flag, and clear "dirty" in the radix tree.
@@ -1629,8 +1630,8 @@ static int cifs_write_end(struct file *file, struct address_space *mapping,
int rc;
struct inode *inode = mapping->host;
- cFYI(1, ("write_end for page %p from pos %lld with %d bytes",
- page, pos, copied));
+ cFYI(1, "write_end for page %p from pos %lld with %d bytes",
+ page, pos, copied);
if (PageChecked(page)) {
if (copied == len)
@@ -1686,8 +1687,8 @@ int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
xid = GetXid();
- cFYI(1, ("Sync file - name: %s datasync: 0x%x",
- dentry->d_name.name, datasync));
+ cFYI(1, "Sync file - name: %s datasync: 0x%x",
+ dentry->d_name.name, datasync);
rc = filemap_write_and_wait(inode->i_mapping);
if (rc == 0) {
@@ -1711,7 +1712,7 @@ int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
unsigned int rpages = 0;
int rc = 0;
- cFYI(1, ("sync page %p",page));
+ cFYI(1, "sync page %p", page);
mapping = page->mapping;
if (!mapping)
return 0;
@@ -1722,7 +1723,7 @@ int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
/* fill in rpages then
result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
-/* cFYI(1, ("rpages is %d for sync page of Index %ld", rpages, index));
+/* cFYI(1, "rpages is %d for sync page of Index %ld", rpages, index);
#if 0
if (rc < 0)
@@ -1756,7 +1757,7 @@ int cifs_flush(struct file *file, fl_owner_t id)
CIFS_I(inode)->write_behind_rc = 0;
}
- cFYI(1, ("Flush inode %p file %p rc %d", inode, file, rc));
+ cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
return rc;
}
@@ -1788,7 +1789,7 @@ ssize_t cifs_user_read(struct file *file, char __user *read_data,
open_file = (struct cifsFileInfo *)file->private_data;
if ((file->f_flags & O_ACCMODE) == O_WRONLY)
- cFYI(1, ("attempting read on write only file instance"));
+ cFYI(1, "attempting read on write only file instance");
for (total_read = 0, current_offset = read_data;
read_size > total_read;
@@ -1869,7 +1870,7 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
open_file = (struct cifsFileInfo *)file->private_data;
if ((file->f_flags & O_ACCMODE) == O_WRONLY)
- cFYI(1, ("attempting read on write only file instance"));
+ cFYI(1, "attempting read on write only file instance");
for (total_read = 0, current_offset = read_data;
read_size > total_read;
@@ -1920,7 +1921,7 @@ int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
xid = GetXid();
rc = cifs_revalidate_file(file);
if (rc) {
- cFYI(1, ("Validation prior to mmap failed, error=%d", rc));
+ cFYI(1, "Validation prior to mmap failed, error=%d", rc);
FreeXid(xid);
return rc;
}
@@ -1931,8 +1932,7 @@ int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
static void cifs_copy_cache_pages(struct address_space *mapping,
- struct list_head *pages, int bytes_read, char *data,
- struct pagevec *plru_pvec)
+ struct list_head *pages, int bytes_read, char *data)
{
struct page *page;
char *target;
@@ -1944,10 +1944,10 @@ static void cifs_copy_cache_pages(struct address_space *mapping,
page = list_entry(pages->prev, struct page, lru);
list_del(&page->lru);
- if (add_to_page_cache(page, mapping, page->index,
+ if (add_to_page_cache_lru(page, mapping, page->index,
GFP_KERNEL)) {
page_cache_release(page);
- cFYI(1, ("Add page cache failed"));
+ cFYI(1, "Add page cache failed");
data += PAGE_CACHE_SIZE;
bytes_read -= PAGE_CACHE_SIZE;
continue;
@@ -1970,8 +1970,6 @@ static void cifs_copy_cache_pages(struct address_space *mapping,
flush_dcache_page(page);
SetPageUptodate(page);
unlock_page(page);
- if (!pagevec_add(plru_pvec, page))
- __pagevec_lru_add_file(plru_pvec);
data += PAGE_CACHE_SIZE;
}
return;
@@ -1990,7 +1988,6 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
unsigned int read_size, i;
char *smb_read_data = NULL;
struct smb_com_read_rsp *pSMBr;
- struct pagevec lru_pvec;
struct cifsFileInfo *open_file;
int buf_type = CIFS_NO_BUFFER;
@@ -2004,8 +2001,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
pTcon = cifs_sb->tcon;
- pagevec_init(&lru_pvec, 0);
- cFYI(DBG2, ("rpages: num pages %d", num_pages));
+ cFYI(DBG2, "rpages: num pages %d", num_pages);
for (i = 0; i < num_pages; ) {
unsigned contig_pages;
struct page *tmp_page;
@@ -2038,8 +2034,8 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
/* Read size needs to be in multiples of one page */
read_size = min_t(const unsigned int, read_size,
cifs_sb->rsize & PAGE_CACHE_MASK);
- cFYI(DBG2, ("rpages: read size 0x%x contiguous pages %d",
- read_size, contig_pages));
+ cFYI(DBG2, "rpages: read size 0x%x contiguous pages %d",
+ read_size, contig_pages);
rc = -EAGAIN;
while (rc == -EAGAIN) {
if ((open_file->invalidHandle) &&
@@ -2066,14 +2062,14 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
}
}
if ((rc < 0) || (smb_read_data == NULL)) {
- cFYI(1, ("Read error in readpages: %d", rc));
+ cFYI(1, "Read error in readpages: %d", rc);
break;
} else if (bytes_read > 0) {
task_io_account_read(bytes_read);
pSMBr = (struct smb_com_read_rsp *)smb_read_data;
cifs_copy_cache_pages(mapping, page_list, bytes_read,
smb_read_data + 4 /* RFC1001 hdr */ +
- le16_to_cpu(pSMBr->DataOffset), &lru_pvec);
+ le16_to_cpu(pSMBr->DataOffset));
i += bytes_read >> PAGE_CACHE_SHIFT;
cifs_stats_bytes_read(pTcon, bytes_read);
@@ -2089,9 +2085,9 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
/* break; */
}
} else {
- cFYI(1, ("No bytes read (%d) at offset %lld . "
- "Cleaning remaining pages from readahead list",
- bytes_read, offset));
+ cFYI(1, "No bytes read (%d) at offset %lld . "
+ "Cleaning remaining pages from readahead list",
+ bytes_read, offset);
/* BB turn off caching and do new lookup on
file size at server? */
break;
@@ -2106,8 +2102,6 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
bytes_read = 0;
}
- pagevec_lru_add_file(&lru_pvec);
-
/* need to free smb_read_data buf before exit */
if (smb_read_data) {
if (buf_type == CIFS_SMALL_BUFFER)
@@ -2136,7 +2130,7 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
if (rc < 0)
goto io_error;
else
- cFYI(1, ("Bytes read %d", rc));
+ cFYI(1, "Bytes read %d", rc);
file->f_path.dentry->d_inode->i_atime =
current_fs_time(file->f_path.dentry->d_inode->i_sb);
@@ -2168,8 +2162,8 @@ static int cifs_readpage(struct file *file, struct page *page)
return rc;
}
- cFYI(1, ("readpage %p at offset %d 0x%x\n",
- page, (int)offset, (int)offset));
+ cFYI(1, "readpage %p at offset %d 0x%x\n",
+ page, (int)offset, (int)offset);
rc = cifs_readpage_worker(file, page, &offset);
@@ -2239,7 +2233,7 @@ static int cifs_write_begin(struct file *file, struct address_space *mapping,
struct page *page;
int rc = 0;
- cFYI(1, ("write_begin from %lld len %d", (long long)pos, len));
+ cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
page = grab_cache_page_write_begin(mapping, index, flags);
if (!page) {
@@ -2311,12 +2305,10 @@ cifs_oplock_break(struct slow_work *work)
int rc, waitrc = 0;
if (inode && S_ISREG(inode->i_mode)) {
-#ifdef CONFIG_CIFS_EXPERIMENTAL
- if (cinode->clientCanCacheAll == 0)
+ if (cinode->clientCanCacheRead)
break_lease(inode, O_RDONLY);
- else if (cinode->clientCanCacheRead == 0)
+ else
break_lease(inode, O_WRONLY);
-#endif
rc = filemap_fdatawrite(inode->i_mapping);
if (cinode->clientCanCacheRead == 0) {
waitrc = filemap_fdatawait(inode->i_mapping);
@@ -2326,7 +2318,7 @@ cifs_oplock_break(struct slow_work *work)
rc = waitrc;
if (rc)
cinode->write_behind_rc = rc;
- cFYI(1, ("Oplock flush inode %p rc %d", inode, rc));
+ cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
}
/*
@@ -2338,7 +2330,7 @@ cifs_oplock_break(struct slow_work *work)
if (!cfile->closePend && !cfile->oplock_break_cancelled) {
rc = CIFSSMBLock(0, cifs_sb->tcon, cfile->netfid, 0, 0, 0, 0,
LOCKING_ANDX_OPLOCK_RELEASE, false);
- cFYI(1, ("Oplock release rc = %d", rc));
+ cFYI(1, "Oplock release rc = %d", rc);
}
}
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 35ec117..62b324f 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1,7 +1,7 @@
/*
* fs/cifs/inode.c
*
- * Copyright (C) International Business Machines Corp., 2002,2008
+ * Copyright (C) International Business Machines Corp., 2002,2010
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
@@ -86,30 +86,30 @@ cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr)
{
struct cifsInodeInfo *cifs_i = CIFS_I(inode);
- cFYI(1, ("%s: revalidating inode %llu", __func__, cifs_i->uniqueid));
+ cFYI(1, "%s: revalidating inode %llu", __func__, cifs_i->uniqueid);
if (inode->i_state & I_NEW) {
- cFYI(1, ("%s: inode %llu is new", __func__, cifs_i->uniqueid));
+ cFYI(1, "%s: inode %llu is new", __func__, cifs_i->uniqueid);
return;
}
/* don't bother with revalidation if we have an oplock */
if (cifs_i->clientCanCacheRead) {
- cFYI(1, ("%s: inode %llu is oplocked", __func__,
- cifs_i->uniqueid));
+ cFYI(1, "%s: inode %llu is oplocked", __func__,
+ cifs_i->uniqueid);
return;
}
/* revalidate if mtime or size have changed */
if (timespec_equal(&inode->i_mtime, &fattr->cf_mtime) &&
cifs_i->server_eof == fattr->cf_eof) {
- cFYI(1, ("%s: inode %llu is unchanged", __func__,
- cifs_i->uniqueid));
+ cFYI(1, "%s: inode %llu is unchanged", __func__,
+ cifs_i->uniqueid);
return;
}
- cFYI(1, ("%s: invalidating inode %llu mapping", __func__,
- cifs_i->uniqueid));
+ cFYI(1, "%s: invalidating inode %llu mapping", __func__,
+ cifs_i->uniqueid);
cifs_i->invalid_mapping = true;
}
@@ -137,15 +137,14 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
inode->i_mode = fattr->cf_mode;
cifs_i->cifsAttrs = fattr->cf_cifsattrs;
- cifs_i->uniqueid = fattr->cf_uniqueid;
if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL)
cifs_i->time = 0;
else
cifs_i->time = jiffies;
- cFYI(1, ("inode 0x%p old_time=%ld new_time=%ld", inode,
- oldtime, cifs_i->time));
+ cFYI(1, "inode 0x%p old_time=%ld new_time=%ld", inode,
+ oldtime, cifs_i->time);
cifs_i->delete_pending = fattr->cf_flags & CIFS_FATTR_DELETE_PENDING;
@@ -170,6 +169,17 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
cifs_set_ops(inode, fattr->cf_flags & CIFS_FATTR_DFS_REFERRAL);
}
+void
+cifs_fill_uniqueid(struct super_block *sb, struct cifs_fattr *fattr)
+{
+ struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
+ return;
+
+ fattr->cf_uniqueid = iunique(sb, ROOT_I);
+}
+
/* Fill a cifs_fattr struct with info from FILE_UNIX_BASIC_INFO. */
void
cifs_unix_basic_to_fattr(struct cifs_fattr *fattr, FILE_UNIX_BASIC_INFO *info,
@@ -227,7 +237,7 @@ cifs_unix_basic_to_fattr(struct cifs_fattr *fattr, FILE_UNIX_BASIC_INFO *info,
/* safest to call it a file if we do not know */
fattr->cf_mode |= S_IFREG;
fattr->cf_dtype = DT_REG;
- cFYI(1, ("unknown type %d", le32_to_cpu(info->Type)));
+ cFYI(1, "unknown type %d", le32_to_cpu(info->Type));
break;
}
@@ -256,7 +266,7 @@ cifs_create_dfs_fattr(struct cifs_fattr *fattr, struct super_block *sb)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
- cFYI(1, ("creating fake fattr for DFS referral"));
+ cFYI(1, "creating fake fattr for DFS referral");
memset(fattr, 0, sizeof(*fattr));
fattr->cf_mode = S_IFDIR | S_IXUGO | S_IRWXU;
@@ -305,7 +315,7 @@ int cifs_get_inode_info_unix(struct inode **pinode,
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
tcon = cifs_sb->tcon;
- cFYI(1, ("Getting info on %s", full_path));
+ cFYI(1, "Getting info on %s", full_path);
/* could have done a find first instead but this returns more info */
rc = CIFSSMBUnixQPathInfo(xid, tcon, full_path, &find_data,
@@ -323,6 +333,7 @@ int cifs_get_inode_info_unix(struct inode **pinode,
if (*pinode == NULL) {
/* get new inode */
+ cifs_fill_uniqueid(sb, &fattr);
*pinode = cifs_iget(sb, &fattr);
if (!*pinode)
rc = -ENOMEM;
@@ -373,7 +384,7 @@ cifs_sfu_type(struct cifs_fattr *fattr, const unsigned char *path,
&bytes_read, &pbuf, &buf_type);
if ((rc == 0) && (bytes_read >= 8)) {
if (memcmp("IntxBLK", pbuf, 8) == 0) {
- cFYI(1, ("Block device"));
+ cFYI(1, "Block device");
fattr->cf_mode |= S_IFBLK;
fattr->cf_dtype = DT_BLK;
if (bytes_read == 24) {
@@ -385,7 +396,7 @@ cifs_sfu_type(struct cifs_fattr *fattr, const unsigned char *path,
fattr->cf_rdev = MKDEV(mjr, mnr);
}
} else if (memcmp("IntxCHR", pbuf, 8) == 0) {
- cFYI(1, ("Char device"));
+ cFYI(1, "Char device");
fattr->cf_mode |= S_IFCHR;
fattr->cf_dtype = DT_CHR;
if (bytes_read == 24) {
@@ -397,7 +408,7 @@ cifs_sfu_type(struct cifs_fattr *fattr, const unsigned char *path,
fattr->cf_rdev = MKDEV(mjr, mnr);
}
} else if (memcmp("IntxLNK", pbuf, 7) == 0) {
- cFYI(1, ("Symlink"));
+ cFYI(1, "Symlink");
fattr->cf_mode |= S_IFLNK;
fattr->cf_dtype = DT_LNK;
} else {
@@ -439,10 +450,10 @@ static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path,
else if (rc > 3) {
mode = le32_to_cpu(*((__le32 *)ea_value));
fattr->cf_mode &= ~SFBITS_MASK;
- cFYI(1, ("special bits 0%o org mode 0%o", mode,
- fattr->cf_mode));
+ cFYI(1, "special bits 0%o org mode 0%o", mode,
+ fattr->cf_mode);
fattr->cf_mode = (mode & SFBITS_MASK) | fattr->cf_mode;
- cFYI(1, ("special mode bits 0%o", mode));
+ cFYI(1, "special mode bits 0%o", mode);
}
return 0;
@@ -548,11 +559,11 @@ int cifs_get_inode_info(struct inode **pinode,
struct cifs_fattr fattr;
pTcon = cifs_sb->tcon;
- cFYI(1, ("Getting info on %s", full_path));
+ cFYI(1, "Getting info on %s", full_path);
if ((pfindData == NULL) && (*pinode != NULL)) {
if (CIFS_I(*pinode)->clientCanCacheRead) {
- cFYI(1, ("No need to revalidate cached inode sizes"));
+ cFYI(1, "No need to revalidate cached inode sizes");
return rc;
}
}
@@ -618,7 +629,7 @@ int cifs_get_inode_info(struct inode **pinode,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc1 || !fattr.cf_uniqueid) {
- cFYI(1, ("GetSrvInodeNum rc %d", rc1));
+ cFYI(1, "GetSrvInodeNum rc %d", rc1);
fattr.cf_uniqueid = iunique(sb, ROOT_I);
cifs_autodisable_serverino(cifs_sb);
}
@@ -634,13 +645,13 @@ int cifs_get_inode_info(struct inode **pinode,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
tmprc = cifs_sfu_type(&fattr, full_path, cifs_sb, xid);
if (tmprc)
- cFYI(1, ("cifs_sfu_type failed: %d", tmprc));
+ cFYI(1, "cifs_sfu_type failed: %d", tmprc);
}
#ifdef CONFIG_CIFS_EXPERIMENTAL
/* fill in 0777 bits from ACL */
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
- cFYI(1, ("Getting mode bits from ACL"));
+ cFYI(1, "Getting mode bits from ACL");
cifs_acl_to_fattr(cifs_sb, &fattr, *pinode, full_path, pfid);
}
#endif
@@ -715,6 +726,16 @@ cifs_find_inode(struct inode *inode, void *opaque)
if (CIFS_I(inode)->uniqueid != fattr->cf_uniqueid)
return 0;
+ /*
+ * uh oh -- it's a directory. We can't use it since hardlinked dirs are
+ * verboten. Disable serverino and return it as if it were found, the
+ * caller can discard it, generate a uniqueid and retry the find
+ */
+ if (S_ISDIR(inode->i_mode) && !list_empty(&inode->i_dentry)) {
+ fattr->cf_flags |= CIFS_FATTR_INO_COLLISION;
+ cifs_autodisable_serverino(CIFS_SB(inode->i_sb));
+ }
+
return 1;
}
@@ -734,15 +755,22 @@ cifs_iget(struct super_block *sb, struct cifs_fattr *fattr)
unsigned long hash;
struct inode *inode;
- cFYI(1, ("looking for uniqueid=%llu", fattr->cf_uniqueid));
+retry_iget5_locked:
+ cFYI(1, "looking for uniqueid=%llu", fattr->cf_uniqueid);
/* hash down to 32-bits on 32-bit arch */
hash = cifs_uniqueid_to_ino_t(fattr->cf_uniqueid);
inode = iget5_locked(sb, hash, cifs_find_inode, cifs_init_inode, fattr);
-
- /* we have fattrs in hand, update the inode */
if (inode) {
+ /* was there a problematic inode number collision? */
+ if (fattr->cf_flags & CIFS_FATTR_INO_COLLISION) {
+ iput(inode);
+ fattr->cf_uniqueid = iunique(sb, ROOT_I);
+ fattr->cf_flags &= ~CIFS_FATTR_INO_COLLISION;
+ goto retry_iget5_locked;
+ }
+
cifs_fattr_to_inode(inode, fattr);
if (sb->s_flags & MS_NOATIME)
inode->i_flags |= S_NOATIME | S_NOCMTIME;
@@ -780,7 +808,7 @@ struct inode *cifs_root_iget(struct super_block *sb, unsigned long ino)
return ERR_PTR(-ENOMEM);
if (rc && cifs_sb->tcon->ipc) {
- cFYI(1, ("ipc connection - fake read inode"));
+ cFYI(1, "ipc connection - fake read inode");
inode->i_mode |= S_IFDIR;
inode->i_nlink = 2;
inode->i_op = &cifs_ipc_inode_ops;
@@ -842,7 +870,7 @@ cifs_set_file_info(struct inode *inode, struct iattr *attrs, int xid,
* server times.
*/
if (set_time && (attrs->ia_valid & ATTR_CTIME)) {
- cFYI(1, ("CIFS - CTIME changed"));
+ cFYI(1, "CIFS - CTIME changed");
info_buf.ChangeTime =
cpu_to_le64(cifs_UnixTimeToNT(attrs->ia_ctime));
} else
@@ -877,8 +905,8 @@ cifs_set_file_info(struct inode *inode, struct iattr *attrs, int xid,
goto out;
}
- cFYI(1, ("calling SetFileInfo since SetPathInfo for "
- "times not supported by this server"));
+ cFYI(1, "calling SetFileInfo since SetPathInfo for "
+ "times not supported by this server");
rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_OPEN,
SYNCHRONIZE | FILE_WRITE_ATTRIBUTES,
CREATE_NOT_DIR, &netfid, &oplock,
@@ -1036,7 +1064,7 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
struct iattr *attrs = NULL;
__u32 dosattr = 0, origattr = 0;
- cFYI(1, ("cifs_unlink, dir=0x%p, dentry=0x%p", dir, dentry));
+ cFYI(1, "cifs_unlink, dir=0x%p, dentry=0x%p", dir, dentry);
xid = GetXid();
@@ -1055,7 +1083,7 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
rc = CIFSPOSIXDelFile(xid, tcon, full_path,
SMB_POSIX_UNLINK_FILE_TARGET, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
- cFYI(1, ("posix del rc %d", rc));
+ cFYI(1, "posix del rc %d", rc);
if ((rc == 0) || (rc == -ENOENT))
goto psx_del_no_retry;
}
@@ -1129,7 +1157,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
struct inode *newinode = NULL;
struct cifs_fattr fattr;
- cFYI(1, ("In cifs_mkdir, mode = 0x%x inode = 0x%p", mode, inode));
+ cFYI(1, "In cifs_mkdir, mode = 0x%x inode = 0x%p", mode, inode);
xid = GetXid();
@@ -1164,7 +1192,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
kfree(pInfo);
goto mkdir_retry_old;
} else if (rc) {
- cFYI(1, ("posix mkdir returned 0x%x", rc));
+ cFYI(1, "posix mkdir returned 0x%x", rc);
d_drop(direntry);
} else {
if (pInfo->Type == cpu_to_le32(-1)) {
@@ -1181,6 +1209,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
direntry->d_op = &cifs_dentry_ops;
cifs_unix_basic_to_fattr(&fattr, pInfo, cifs_sb);
+ cifs_fill_uniqueid(inode->i_sb, &fattr);
newinode = cifs_iget(inode->i_sb, &fattr);
if (!newinode) {
kfree(pInfo);
@@ -1190,12 +1219,12 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
d_instantiate(direntry, newinode);
#ifdef CONFIG_CIFS_DEBUG2
- cFYI(1, ("instantiated dentry %p %s to inode %p",
- direntry, direntry->d_name.name, newinode));
+ cFYI(1, "instantiated dentry %p %s to inode %p",
+ direntry, direntry->d_name.name, newinode);
if (newinode->i_nlink != 2)
- cFYI(1, ("unexpected number of links %d",
- newinode->i_nlink));
+ cFYI(1, "unexpected number of links %d",
+ newinode->i_nlink);
#endif
}
kfree(pInfo);
@@ -1206,7 +1235,7 @@ mkdir_retry_old:
rc = CIFSSMBMkDir(xid, pTcon, full_path, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc) {
- cFYI(1, ("cifs_mkdir returned 0x%x", rc));
+ cFYI(1, "cifs_mkdir returned 0x%x", rc);
d_drop(direntry);
} else {
mkdir_get_info:
@@ -1309,7 +1338,7 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry)
char *full_path = NULL;
struct cifsInodeInfo *cifsInode;
- cFYI(1, ("cifs_rmdir, inode = 0x%p", inode));
+ cFYI(1, "cifs_rmdir, inode = 0x%p", inode);
xid = GetXid();
@@ -1511,6 +1540,11 @@ cifs_inode_needs_reval(struct inode *inode)
if (time_after_eq(jiffies, cifs_i->time + HZ))
return true;
+ /* hardlinked files w/ noserverino get "special" treatment */
+ if (!(CIFS_SB(inode->i_sb)->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) &&
+ S_ISREG(inode->i_mode) && inode->i_nlink != 1)
+ return true;
+
return false;
}
@@ -1577,9 +1611,9 @@ int cifs_revalidate_dentry(struct dentry *dentry)
goto check_inval;
}
- cFYI(1, ("Revalidate: %s inode 0x%p count %d dentry: 0x%p d_time %ld "
+ cFYI(1, "Revalidate: %s inode 0x%p count %d dentry: 0x%p d_time %ld "
"jiffies %ld", full_path, inode, inode->i_count.counter,
- dentry, dentry->d_time, jiffies));
+ dentry, dentry->d_time, jiffies);
if (CIFS_SB(sb)->tcon->unix_ext)
rc = cifs_get_inode_info_unix(&inode, full_path, sb, xid);
@@ -1673,12 +1707,12 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
rc = CIFSSMBSetFileSize(xid, pTcon, attrs->ia_size, nfid,
npid, false);
cifsFileInfo_put(open_file);
- cFYI(1, ("SetFSize for attrs rc = %d", rc));
+ cFYI(1, "SetFSize for attrs rc = %d", rc);
if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
unsigned int bytes_written;
rc = CIFSSMBWrite(xid, pTcon, nfid, 0, attrs->ia_size,
&bytes_written, NULL, NULL, 1);
- cFYI(1, ("Wrt seteof rc %d", rc));
+ cFYI(1, "Wrt seteof rc %d", rc);
}
} else
rc = -EINVAL;
@@ -1692,7 +1726,7 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
false, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
- cFYI(1, ("SetEOF by path (setattrs) rc = %d", rc));
+ cFYI(1, "SetEOF by path (setattrs) rc = %d", rc);
if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
__u16 netfid;
int oplock = 0;
@@ -1709,7 +1743,7 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
attrs->ia_size,
&bytes_written, NULL,
NULL, 1);
- cFYI(1, ("wrt seteof rc %d", rc));
+ cFYI(1, "wrt seteof rc %d", rc);
CIFSSMBClose(xid, pTcon, netfid);
}
}
@@ -1737,8 +1771,8 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
struct cifs_unix_set_info_args *args = NULL;
struct cifsFileInfo *open_file;
- cFYI(1, ("setattr_unix on file %s attrs->ia_valid=0x%x",
- direntry->d_name.name, attrs->ia_valid));
+ cFYI(1, "setattr_unix on file %s attrs->ia_valid=0x%x",
+ direntry->d_name.name, attrs->ia_valid);
xid = GetXid();
@@ -1868,8 +1902,8 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
xid = GetXid();
- cFYI(1, ("setattr on file %s attrs->iavalid 0x%x",
- direntry->d_name.name, attrs->ia_valid));
+ cFYI(1, "setattr on file %s attrs->iavalid 0x%x",
+ direntry->d_name.name, attrs->ia_valid);
if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) == 0) {
/* check if we have permission to change attrs */
@@ -1926,7 +1960,7 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
attrs->ia_valid &= ~ATTR_MODE;
if (attrs->ia_valid & ATTR_MODE) {
- cFYI(1, ("Mode changed to 0%o", attrs->ia_mode));
+ cFYI(1, "Mode changed to 0%o", attrs->ia_mode);
mode = attrs->ia_mode;
}
@@ -2012,7 +2046,7 @@ cifs_setattr(struct dentry *direntry, struct iattr *attrs)
#if 0
void cifs_delete_inode(struct inode *inode)
{
- cFYI(1, ("In cifs_delete_inode, inode = 0x%p", inode));
+ cFYI(1, "In cifs_delete_inode, inode = 0x%p", inode);
/* may have to add back in if and when safe distributed caching of
directories added e.g. via FindNotify */
}
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index f946506..505926f 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -47,7 +47,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
xid = GetXid();
- cFYI(1, ("ioctl file %p cmd %u arg %lu", filep, command, arg));
+ cFYI(1, "ioctl file %p cmd %u arg %lu", filep, command, arg);
cifs_sb = CIFS_SB(inode->i_sb);
@@ -64,12 +64,12 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
switch (command) {
case CIFS_IOC_CHECKUMOUNT:
- cFYI(1, ("User unmount attempted"));
+ cFYI(1, "User unmount attempted");
if (cifs_sb->mnt_uid == current_uid())
rc = 0;
else {
rc = -EACCES;
- cFYI(1, ("uids do not match"));
+ cFYI(1, "uids do not match");
}
break;
#ifdef CONFIG_CIFS_POSIX
@@ -97,11 +97,11 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
/* rc= CIFSGetExtAttr(xid,tcon,pSMBFile->netfid,
extAttrBits, &ExtAttrMask);*/
}
- cFYI(1, ("set flags not implemented yet"));
+ cFYI(1, "set flags not implemented yet");
break;
#endif /* CONFIG_CIFS_POSIX */
default:
- cFYI(1, ("unsupported ioctl"));
+ cFYI(1, "unsupported ioctl");
break;
}
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index c1a9d42..473ca80 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -139,7 +139,7 @@ cifs_follow_link(struct dentry *direntry, struct nameidata *nd)
if (!full_path)
goto out;
- cFYI(1, ("Full path: %s inode = 0x%p", full_path, inode));
+ cFYI(1, "Full path: %s inode = 0x%p", full_path, inode);
rc = CIFSSMBUnixQuerySymLink(xid, tcon, full_path, &target_path,
cifs_sb->local_nls);
@@ -178,8 +178,8 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
return rc;
}
- cFYI(1, ("Full path: %s", full_path));
- cFYI(1, ("symname is %s", symname));
+ cFYI(1, "Full path: %s", full_path);
+ cFYI(1, "symname is %s", symname);
/* BB what if DFS and this volume is on different share? BB */
if (pTcon->unix_ext)
@@ -198,8 +198,8 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
inode->i_sb, xid, NULL);
if (rc != 0) {
- cFYI(1, ("Create symlink ok, getinodeinfo fail rc = %d",
- rc));
+ cFYI(1, "Create symlink ok, getinodeinfo fail rc = %d",
+ rc);
} else {
if (pTcon->nocase)
direntry->d_op = &cifs_ci_dentry_ops;
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index d147499..1394aa3 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -51,7 +51,7 @@ _GetXid(void)
if (GlobalTotalActiveXid > GlobalMaxActiveXid)
GlobalMaxActiveXid = GlobalTotalActiveXid;
if (GlobalTotalActiveXid > 65000)
- cFYI(1, ("warning: more than 65000 requests active"));
+ cFYI(1, "warning: more than 65000 requests active");
xid = GlobalCurrentXid++;
spin_unlock(&GlobalMid_Lock);
return xid;
@@ -88,7 +88,7 @@ void
sesInfoFree(struct cifsSesInfo *buf_to_free)
{
if (buf_to_free == NULL) {
- cFYI(1, ("Null buffer passed to sesInfoFree"));
+ cFYI(1, "Null buffer passed to sesInfoFree");
return;
}
@@ -126,7 +126,7 @@ void
tconInfoFree(struct cifsTconInfo *buf_to_free)
{
if (buf_to_free == NULL) {
- cFYI(1, ("Null buffer passed to tconInfoFree"));
+ cFYI(1, "Null buffer passed to tconInfoFree");
return;
}
atomic_dec(&tconInfoAllocCount);
@@ -166,7 +166,7 @@ void
cifs_buf_release(void *buf_to_free)
{
if (buf_to_free == NULL) {
- /* cFYI(1, ("Null buffer passed to cifs_buf_release"));*/
+ /* cFYI(1, "Null buffer passed to cifs_buf_release");*/
return;
}
mempool_free(buf_to_free, cifs_req_poolp);
@@ -202,7 +202,7 @@ cifs_small_buf_release(void *buf_to_free)
{
if (buf_to_free == NULL) {
- cFYI(1, ("Null buffer passed to cifs_small_buf_release"));
+ cFYI(1, "Null buffer passed to cifs_small_buf_release");
return;
}
mempool_free(buf_to_free, cifs_sm_req_poolp);
@@ -345,19 +345,19 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
/* with userid/password pairs found on the smb session */
/* for other target tcp/ip addresses BB */
if (current_fsuid() != treeCon->ses->linux_uid) {
- cFYI(1, ("Multiuser mode and UID "
- "did not match tcon uid"));
+ cFYI(1, "Multiuser mode and UID "
+ "did not match tcon uid");
read_lock(&cifs_tcp_ses_lock);
list_for_each(temp_item, &treeCon->ses->server->smb_ses_list) {
ses = list_entry(temp_item, struct cifsSesInfo, smb_ses_list);
if (ses->linux_uid == current_fsuid()) {
if (ses->server == treeCon->ses->server) {
- cFYI(1, ("found matching uid substitute right smb_uid"));
+ cFYI(1, "found matching uid substitute right smb_uid");
buffer->Uid = ses->Suid;
break;
} else {
/* BB eventually call cifs_setup_session here */
- cFYI(1, ("local UID found but no smb sess with this server exists"));
+ cFYI(1, "local UID found but no smb sess with this server exists");
}
}
}
@@ -394,17 +394,16 @@ checkSMBhdr(struct smb_hdr *smb, __u16 mid)
if (smb->Command == SMB_COM_LOCKING_ANDX)
return 0;
else
- cERROR(1, ("Received Request not response"));
+ cERROR(1, "Received Request not response");
}
} else { /* bad signature or mid */
if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff))
- cERROR(1,
- ("Bad protocol string signature header %x",
- *(unsigned int *) smb->Protocol));
+ cERROR(1, "Bad protocol string signature header %x",
+ *(unsigned int *) smb->Protocol);
if (mid != smb->Mid)
- cERROR(1, ("Mids do not match"));
+ cERROR(1, "Mids do not match");
}
- cERROR(1, ("bad smb detected. The Mid=%d", smb->Mid));
+ cERROR(1, "bad smb detected. The Mid=%d", smb->Mid);
return 1;
}
@@ -413,7 +412,7 @@ checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length)
{
__u32 len = smb->smb_buf_length;
__u32 clc_len; /* calculated length */
- cFYI(0, ("checkSMB Length: 0x%x, smb_buf_length: 0x%x", length, len));
+ cFYI(0, "checkSMB Length: 0x%x, smb_buf_length: 0x%x", length, len);
if (length < 2 + sizeof(struct smb_hdr)) {
if ((length >= sizeof(struct smb_hdr) - 1)
@@ -437,15 +436,15 @@ checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length)
tmp[sizeof(struct smb_hdr)+1] = 0;
return 0;
}
- cERROR(1, ("rcvd invalid byte count (bcc)"));
+ cERROR(1, "rcvd invalid byte count (bcc)");
} else {
- cERROR(1, ("Length less than smb header size"));
+ cERROR(1, "Length less than smb header size");
}
return 1;
}
if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
- cERROR(1, ("smb length greater than MaxBufSize, mid=%d",
- smb->Mid));
+ cERROR(1, "smb length greater than MaxBufSize, mid=%d",
+ smb->Mid);
return 1;
}
@@ -454,8 +453,8 @@ checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length)
clc_len = smbCalcSize_LE(smb);
if (4 + len != length) {
- cERROR(1, ("Length read does not match RFC1001 length %d",
- len));
+ cERROR(1, "Length read does not match RFC1001 length %d",
+ len);
return 1;
}
@@ -466,8 +465,8 @@ checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length)
if (((4 + len) & 0xFFFF) == (clc_len & 0xFFFF))
return 0; /* bcc wrapped */
}
- cFYI(1, ("Calculated size %d vs length %d mismatch for mid %d",
- clc_len, 4 + len, smb->Mid));
+ cFYI(1, "Calculated size %d vs length %d mismatch for mid %d",
+ clc_len, 4 + len, smb->Mid);
/* Windows XP can return a few bytes too much, presumably
an illegal pad, at the end of byte range lock responses
so we allow for that three byte pad, as long as actual
@@ -482,8 +481,8 @@ checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length)
if ((4+len > clc_len) && (len <= clc_len + 512))
return 0;
else {
- cERROR(1, ("RFC1001 size %d bigger than SMB for Mid=%d",
- len, smb->Mid));
+ cERROR(1, "RFC1001 size %d bigger than SMB for Mid=%d",
+ len, smb->Mid);
return 1;
}
}
@@ -501,7 +500,7 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
struct cifsFileInfo *netfile;
int rc;
- cFYI(1, ("Checking for oplock break or dnotify response"));
+ cFYI(1, "Checking for oplock break or dnotify response");
if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
(pSMB->hdr.Flags & SMBFLG_RESPONSE)) {
struct smb_com_transaction_change_notify_rsp *pSMBr =
@@ -513,15 +512,15 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
pnotify = (struct file_notify_information *)
((char *)&pSMBr->hdr.Protocol + data_offset);
- cFYI(1, ("dnotify on %s Action: 0x%x",
- pnotify->FileName, pnotify->Action));
+ cFYI(1, "dnotify on %s Action: 0x%x",
+ pnotify->FileName, pnotify->Action);
/* cifs_dump_mem("Rcvd notify Data: ",buf,
sizeof(struct smb_hdr)+60); */
return true;
}
if (pSMBr->hdr.Status.CifsError) {
- cFYI(1, ("notify err 0x%d",
- pSMBr->hdr.Status.CifsError));
+ cFYI(1, "notify err 0x%d",
+ pSMBr->hdr.Status.CifsError);
return true;
}
return false;
@@ -535,7 +534,7 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
large dirty files cached on the client */
if ((NT_STATUS_INVALID_HANDLE) ==
le32_to_cpu(pSMB->hdr.Status.CifsError)) {
- cFYI(1, ("invalid handle on oplock break"));
+ cFYI(1, "invalid handle on oplock break");
return true;
} else if (ERRbadfid ==
le16_to_cpu(pSMB->hdr.Status.DosError.Error)) {
@@ -547,8 +546,8 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
if (pSMB->hdr.WordCount != 8)
return false;
- cFYI(1, ("oplock type 0x%d level 0x%d",
- pSMB->LockType, pSMB->OplockLevel));
+ cFYI(1, "oplock type 0x%d level 0x%d",
+ pSMB->LockType, pSMB->OplockLevel);
if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE))
return false;
@@ -579,15 +578,15 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
return true;
}
- cFYI(1, ("file id match, oplock break"));
+ cFYI(1, "file id match, oplock break");
pCifsInode = CIFS_I(netfile->pInode);
pCifsInode->clientCanCacheAll = false;
if (pSMB->OplockLevel == 0)
pCifsInode->clientCanCacheRead = false;
rc = slow_work_enqueue(&netfile->oplock_break);
if (rc) {
- cERROR(1, ("failed to enqueue oplock "
- "break: %d\n", rc));
+ cERROR(1, "failed to enqueue oplock "
+ "break: %d\n", rc);
} else {
netfile->oplock_break_cancelled = false;
}
@@ -597,12 +596,12 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
}
read_unlock(&GlobalSMBSeslock);
read_unlock(&cifs_tcp_ses_lock);
- cFYI(1, ("No matching file for oplock break"));
+ cFYI(1, "No matching file for oplock break");
return true;
}
}
read_unlock(&cifs_tcp_ses_lock);
- cFYI(1, ("Can not process oplock break for non-existent connection"));
+ cFYI(1, "Can not process oplock break for non-existent connection");
return true;
}
@@ -721,11 +720,11 @@ cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
{
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
- cERROR(1, ("Autodisabling the use of server inode numbers on "
+ cERROR(1, "Autodisabling the use of server inode numbers on "
"%s. This server doesn't seem to support them "
"properly. Hardlinks will not be recognized on this "
"mount. Consider mounting with the \"noserverino\" "
"option to silence this message.",
- cifs_sb->tcon->treeName));
+ cifs_sb->tcon->treeName);
}
}
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index bd6d689..d35d528 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -149,7 +149,7 @@ cifs_inet_pton(const int address_family, const char *cp, void *dst)
else if (address_family == AF_INET6)
ret = in6_pton(cp, -1 /* len */, dst , '\\', NULL);
- cFYI(DBG2, ("address conversion returned %d for %s", ret, cp));
+ cFYI(DBG2, "address conversion returned %d for %s", ret, cp);
if (ret > 0)
ret = 1;
return ret;
@@ -870,8 +870,8 @@ map_smb_to_linux_error(struct smb_hdr *smb, int logErr)
}
/* else ERRHRD class errors or junk - return EIO */
- cFYI(1, ("Mapping smb error code %d to POSIX err %d",
- smberrcode, rc));
+ cFYI(1, "Mapping smb error code %d to POSIX err %d",
+ smberrcode, rc);
/* generic corrective action e.g. reconnect SMB session on
* ERRbaduid could be added */
@@ -940,20 +940,20 @@ struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time, int offset)
SMB_TIME *st = (SMB_TIME *)&time;
SMB_DATE *sd = (SMB_DATE *)&date;
- cFYI(1, ("date %d time %d", date, time));
+ cFYI(1, "date %d time %d", date, time);
sec = 2 * st->TwoSeconds;
min = st->Minutes;
if ((sec > 59) || (min > 59))
- cERROR(1, ("illegal time min %d sec %d", min, sec));
+ cERROR(1, "illegal time min %d sec %d", min, sec);
sec += (min * 60);
sec += 60 * 60 * st->Hours;
if (st->Hours > 24)
- cERROR(1, ("illegal hours %d", st->Hours));
+ cERROR(1, "illegal hours %d", st->Hours);
days = sd->Day;
month = sd->Month;
if ((days > 31) || (month > 12)) {
- cERROR(1, ("illegal date, month %d day: %d", month, days));
+ cERROR(1, "illegal date, month %d day: %d", month, days);
if (month > 12)
month = 12;
}
@@ -979,7 +979,7 @@ struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time, int offset)
ts.tv_sec = sec + offset;
- /* cFYI(1,("sec after cnvrt dos to unix time %d",sec)); */
+ /* cFYI(1, "sec after cnvrt dos to unix time %d",sec); */
ts.tv_nsec = 0;
return ts;
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 18e0bc1..daf1753 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -47,15 +47,15 @@ static void dump_cifs_file_struct(struct file *file, char *label)
if (file) {
cf = file->private_data;
if (cf == NULL) {
- cFYI(1, ("empty cifs private file data"));
+ cFYI(1, "empty cifs private file data");
return;
}
if (cf->invalidHandle)
- cFYI(1, ("invalid handle"));
+ cFYI(1, "invalid handle");
if (cf->srch_inf.endOfSearch)
- cFYI(1, ("end of search"));
+ cFYI(1, "end of search");
if (cf->srch_inf.emptyDir)
- cFYI(1, ("empty dir"));
+ cFYI(1, "empty dir");
}
}
#else
@@ -76,7 +76,7 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name,
struct inode *inode;
struct super_block *sb = parent->d_inode->i_sb;
- cFYI(1, ("For %s", name->name));
+ cFYI(1, "For %s", name->name);
if (parent->d_op && parent->d_op->d_hash)
parent->d_op->d_hash(parent, name);
@@ -214,7 +214,7 @@ int get_symlink_reparse_path(char *full_path, struct cifs_sb_info *cifs_sb,
fid,
cifs_sb->local_nls);
if (CIFSSMBClose(xid, ptcon, fid)) {
- cFYI(1, ("Error closing temporary reparsepoint open)"));
+ cFYI(1, "Error closing temporary reparsepoint open");
}
}
}
@@ -252,7 +252,7 @@ static int initiate_cifs_search(const int xid, struct file *file)
if (full_path == NULL)
return -ENOMEM;
- cFYI(1, ("Full path: %s start at: %lld", full_path, file->f_pos));
+ cFYI(1, "Full path: %s start at: %lld", full_path, file->f_pos);
ffirst_retry:
/* test for Unix extensions */
@@ -297,7 +297,7 @@ static int cifs_unicode_bytelen(char *str)
if (ustr[len] == 0)
return len << 1;
}
- cFYI(1, ("Unicode string longer than PATH_MAX found"));
+ cFYI(1, "Unicode string longer than PATH_MAX found");
return len << 1;
}
@@ -314,19 +314,18 @@ static char *nxt_dir_entry(char *old_entry, char *end_of_smb, int level)
pfData->FileNameLength;
} else
new_entry = old_entry + le32_to_cpu(pDirInfo->NextEntryOffset);
- cFYI(1, ("new entry %p old entry %p", new_entry, old_entry));
+ cFYI(1, "new entry %p old entry %p", new_entry, old_entry);
/* validate that new_entry is not past end of SMB */
if (new_entry >= end_of_smb) {
- cERROR(1,
- ("search entry %p began after end of SMB %p old entry %p",
- new_entry, end_of_smb, old_entry));
+ cERROR(1, "search entry %p began after end of SMB %p old entry %p",
+ new_entry, end_of_smb, old_entry);
return NULL;
} else if (((level == SMB_FIND_FILE_INFO_STANDARD) &&
(new_entry + sizeof(FIND_FILE_STANDARD_INFO) > end_of_smb))
|| ((level != SMB_FIND_FILE_INFO_STANDARD) &&
(new_entry + sizeof(FILE_DIRECTORY_INFO) > end_of_smb))) {
- cERROR(1, ("search entry %p extends after end of SMB %p",
- new_entry, end_of_smb));
+ cERROR(1, "search entry %p extends after end of SMB %p",
+ new_entry, end_of_smb);
return NULL;
} else
return new_entry;
@@ -380,8 +379,8 @@ static int cifs_entry_is_dot(char *current_entry, struct cifsFileInfo *cfile)
filename = &pFindData->FileName[0];
len = pFindData->FileNameLength;
} else {
- cFYI(1, ("Unknown findfirst level %d",
- cfile->srch_inf.info_level));
+ cFYI(1, "Unknown findfirst level %d",
+ cfile->srch_inf.info_level);
}
if (filename) {
@@ -481,7 +480,7 @@ static int cifs_save_resume_key(const char *current_entry,
len = (unsigned int)pFindData->FileNameLength;
cifsFile->srch_inf.resume_key = pFindData->ResumeKey;
} else {
- cFYI(1, ("Unknown findfirst level %d", level));
+ cFYI(1, "Unknown findfirst level %d", level);
return -EINVAL;
}
cifsFile->srch_inf.resume_name_len = len;
@@ -525,7 +524,7 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
is_dir_changed(file)) ||
(index_to_find < first_entry_in_buffer)) {
/* close and restart search */
- cFYI(1, ("search backing up - close and restart search"));
+ cFYI(1, "search backing up - close and restart search");
write_lock(&GlobalSMBSeslock);
if (!cifsFile->srch_inf.endOfSearch &&
!cifsFile->invalidHandle) {
@@ -535,7 +534,7 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
} else
write_unlock(&GlobalSMBSeslock);
if (cifsFile->srch_inf.ntwrk_buf_start) {
- cFYI(1, ("freeing SMB ff cache buf on search rewind"));
+ cFYI(1, "freeing SMB ff cache buf on search rewind");
if (cifsFile->srch_inf.smallBuf)
cifs_small_buf_release(cifsFile->srch_inf.
ntwrk_buf_start);
@@ -546,8 +545,8 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
}
rc = initiate_cifs_search(xid, file);
if (rc) {
- cFYI(1, ("error %d reinitiating a search on rewind",
- rc));
+ cFYI(1, "error %d reinitiating a search on rewind",
+ rc);
return rc;
}
cifs_save_resume_key(cifsFile->srch_inf.last_entry, cifsFile);
@@ -555,7 +554,7 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
while ((index_to_find >= cifsFile->srch_inf.index_of_last_entry) &&
(rc == 0) && !cifsFile->srch_inf.endOfSearch) {
- cFYI(1, ("calling findnext2"));
+ cFYI(1, "calling findnext2");
rc = CIFSFindNext(xid, pTcon, cifsFile->netfid,
&cifsFile->srch_inf);
cifs_save_resume_key(cifsFile->srch_inf.last_entry, cifsFile);
@@ -575,7 +574,7 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
first_entry_in_buffer = cifsFile->srch_inf.index_of_last_entry
- cifsFile->srch_inf.entries_in_buffer;
pos_in_buf = index_to_find - first_entry_in_buffer;
- cFYI(1, ("found entry - pos_in_buf %d", pos_in_buf));
+ cFYI(1, "found entry - pos_in_buf %d", pos_in_buf);
for (i = 0; (i < (pos_in_buf)) && (current_entry != NULL); i++) {
/* go entry by entry figuring out which is first */
@@ -584,19 +583,19 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
}
if ((current_entry == NULL) && (i < pos_in_buf)) {
/* BB fixme - check if we should flag this error */
- cERROR(1, ("reached end of buf searching for pos in buf"
+ cERROR(1, "reached end of buf searching for pos in buf"
" %d index to find %lld rc %d",
- pos_in_buf, index_to_find, rc));
+ pos_in_buf, index_to_find, rc);
}
rc = 0;
*ppCurrentEntry = current_entry;
} else {
- cFYI(1, ("index not in buffer - could not findnext into it"));
+ cFYI(1, "index not in buffer - could not findnext into it");
return 0;
}
if (pos_in_buf >= cifsFile->srch_inf.entries_in_buffer) {
- cFYI(1, ("can not return entries pos_in_buf beyond last"));
+ cFYI(1, "can not return entries pos_in_buf beyond last");
*num_to_ret = 0;
} else
*num_to_ret = cifsFile->srch_inf.entries_in_buffer - pos_in_buf;
@@ -656,12 +655,12 @@ static int cifs_get_name_from_search_buf(struct qstr *pqst,
/* one byte length, no name conversion */
len = (unsigned int)pFindData->FileNameLength;
} else {
- cFYI(1, ("Unknown findfirst level %d", level));
+ cFYI(1, "Unknown findfirst level %d", level);
return -EINVAL;
}
if (len > max_len) {
- cERROR(1, ("bad search response length %d past smb end", len));
+ cERROR(1, "bad search response length %d past smb end", len);
return -EINVAL;
}
@@ -754,7 +753,7 @@ static int cifs_filldir(char *pfindEntry, struct file *file, filldir_t filldir,
* case already. Why should we be clobbering other errors from it?
*/
if (rc) {
- cFYI(1, ("filldir rc = %d", rc));
+ cFYI(1, "filldir rc = %d", rc);
rc = -EOVERFLOW;
}
dput(tmp_dentry);
@@ -786,7 +785,7 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
case 0:
if (filldir(direntry, ".", 1, file->f_pos,
file->f_path.dentry->d_inode->i_ino, DT_DIR) < 0) {
- cERROR(1, ("Filldir for current dir failed"));
+ cERROR(1, "Filldir for current dir failed");
rc = -ENOMEM;
break;
}
@@ -794,7 +793,7 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
case 1:
if (filldir(direntry, "..", 2, file->f_pos,
file->f_path.dentry->d_parent->d_inode->i_ino, DT_DIR) < 0) {
- cERROR(1, ("Filldir for parent dir failed"));
+ cERROR(1, "Filldir for parent dir failed");
rc = -ENOMEM;
break;
}
@@ -807,7 +806,7 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
if (file->private_data == NULL) {
rc = initiate_cifs_search(xid, file);
- cFYI(1, ("initiate cifs search rc %d", rc));
+ cFYI(1, "initiate cifs search rc %d", rc);
if (rc) {
FreeXid(xid);
return rc;
@@ -821,7 +820,7 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
cifsFile = file->private_data;
if (cifsFile->srch_inf.endOfSearch) {
if (cifsFile->srch_inf.emptyDir) {
- cFYI(1, ("End of search, empty dir"));
+ cFYI(1, "End of search, empty dir");
rc = 0;
break;
}
@@ -833,16 +832,16 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
rc = find_cifs_entry(xid, pTcon, file,
&current_entry, &num_to_fill);
if (rc) {
- cFYI(1, ("fce error %d", rc));
+ cFYI(1, "fce error %d", rc);
goto rddir2_exit;
} else if (current_entry != NULL) {
- cFYI(1, ("entry %lld found", file->f_pos));
+ cFYI(1, "entry %lld found", file->f_pos);
} else {
- cFYI(1, ("could not find entry"));
+ cFYI(1, "could not find entry");
goto rddir2_exit;
}
- cFYI(1, ("loop through %d times filling dir for net buf %p",
- num_to_fill, cifsFile->srch_inf.ntwrk_buf_start));
+ cFYI(1, "loop through %d times filling dir for net buf %p",
+ num_to_fill, cifsFile->srch_inf.ntwrk_buf_start);
max_len = smbCalcSize((struct smb_hdr *)
cifsFile->srch_inf.ntwrk_buf_start);
end_of_smb = cifsFile->srch_inf.ntwrk_buf_start + max_len;
@@ -851,8 +850,8 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
for (i = 0; (i < num_to_fill) && (rc == 0); i++) {
if (current_entry == NULL) {
/* evaluate whether this case is an error */
- cERROR(1, ("past SMB end, num to fill %d i %d",
- num_to_fill, i));
+ cERROR(1, "past SMB end, num to fill %d i %d",
+ num_to_fill, i);
break;
}
/* if buggy server returns . and .. late do
@@ -867,8 +866,8 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
file->f_pos++;
if (file->f_pos ==
cifsFile->srch_inf.index_of_last_entry) {
- cFYI(1, ("last entry in buf at pos %lld %s",
- file->f_pos, tmp_buf));
+ cFYI(1, "last entry in buf at pos %lld %s",
+ file->f_pos, tmp_buf);
cifs_save_resume_key(current_entry, cifsFile);
break;
} else
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 7c3fd74..7707389 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -35,9 +35,11 @@
extern void SMBNTencrypt(unsigned char *passwd, unsigned char *c8,
unsigned char *p24);
-/* Checks if this is the first smb session to be reconnected after
- the socket has been reestablished (so we know whether to use vc 0).
- Called while holding the cifs_tcp_ses_lock, so do not block */
+/*
+ * Checks if this is the first smb session to be reconnected after
+ * the socket has been reestablished (so we know whether to use vc 0).
+ * Called while holding the cifs_tcp_ses_lock, so do not block
+ */
static bool is_first_ses_reconnect(struct cifsSesInfo *ses)
{
struct list_head *tmp;
@@ -284,7 +286,7 @@ decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifsSesInfo *ses,
int len;
char *data = *pbcc_area;
- cFYI(1, ("bleft %d", bleft));
+ cFYI(1, "bleft %d", bleft);
/*
* Windows servers do not always double null terminate their final
@@ -301,7 +303,7 @@ decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifsSesInfo *ses,
kfree(ses->serverOS);
ses->serverOS = cifs_strndup_from_ucs(data, bleft, true, nls_cp);
- cFYI(1, ("serverOS=%s", ses->serverOS));
+ cFYI(1, "serverOS=%s", ses->serverOS);
len = (UniStrnlen((wchar_t *) data, bleft / 2) * 2) + 2;
data += len;
bleft -= len;
@@ -310,7 +312,7 @@ decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifsSesInfo *ses,
kfree(ses->serverNOS);
ses->serverNOS = cifs_strndup_from_ucs(data, bleft, true, nls_cp);
- cFYI(1, ("serverNOS=%s", ses->serverNOS));
+ cFYI(1, "serverNOS=%s", ses->serverNOS);
len = (UniStrnlen((wchar_t *) data, bleft / 2) * 2) + 2;
data += len;
bleft -= len;
@@ -319,7 +321,7 @@ decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifsSesInfo *ses,
kfree(ses->serverDomain);
ses->serverDomain = cifs_strndup_from_ucs(data, bleft, true, nls_cp);
- cFYI(1, ("serverDomain=%s", ses->serverDomain));
+ cFYI(1, "serverDomain=%s", ses->serverDomain);
return;
}
@@ -332,7 +334,7 @@ static int decode_ascii_ssetup(char **pbcc_area, int bleft,
int len;
char *bcc_ptr = *pbcc_area;
- cFYI(1, ("decode sessetup ascii. bleft %d", bleft));
+ cFYI(1, "decode sessetup ascii. bleft %d", bleft);
len = strnlen(bcc_ptr, bleft);
if (len >= bleft)
@@ -344,7 +346,7 @@ static int decode_ascii_ssetup(char **pbcc_area, int bleft,
if (ses->serverOS)
strncpy(ses->serverOS, bcc_ptr, len);
if (strncmp(ses->serverOS, "OS/2", 4) == 0) {
- cFYI(1, ("OS/2 server"));
+ cFYI(1, "OS/2 server");
ses->flags |= CIFS_SES_OS2;
}
@@ -373,7 +375,7 @@ static int decode_ascii_ssetup(char **pbcc_area, int bleft,
/* BB For newer servers which do not support Unicode,
but thus do return domain here we could add parsing
for it later, but it is not very important */
- cFYI(1, ("ascii: bytes left %d", bleft));
+ cFYI(1, "ascii: bytes left %d", bleft);
return rc;
}
@@ -384,16 +386,16 @@ static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
CHALLENGE_MESSAGE *pblob = (CHALLENGE_MESSAGE *)bcc_ptr;
if (blob_len < sizeof(CHALLENGE_MESSAGE)) {
- cERROR(1, ("challenge blob len %d too small", blob_len));
+ cERROR(1, "challenge blob len %d too small", blob_len);
return -EINVAL;
}
if (memcmp(pblob->Signature, "NTLMSSP", 8)) {
- cERROR(1, ("blob signature incorrect %s", pblob->Signature));
+ cERROR(1, "blob signature incorrect %s", pblob->Signature);
return -EINVAL;
}
if (pblob->MessageType != NtLmChallenge) {
- cERROR(1, ("Incorrect message type %d", pblob->MessageType));
+ cERROR(1, "Incorrect message type %d", pblob->MessageType);
return -EINVAL;
}
@@ -447,7 +449,7 @@ static void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
This function returns the length of the data in the blob */
static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
struct cifsSesInfo *ses,
- const struct nls_table *nls_cp, int first)
+ const struct nls_table *nls_cp, bool first)
{
AUTHENTICATE_MESSAGE *sec_blob = (AUTHENTICATE_MESSAGE *)pbuffer;
__u32 flags;
@@ -546,7 +548,7 @@ static void setup_ntlmssp_neg_req(SESSION_SETUP_ANDX *pSMB,
static int setup_ntlmssp_auth_req(SESSION_SETUP_ANDX *pSMB,
struct cifsSesInfo *ses,
- const struct nls_table *nls, int first_time)
+ const struct nls_table *nls, bool first_time)
{
int bloblen;
@@ -559,8 +561,8 @@ static int setup_ntlmssp_auth_req(SESSION_SETUP_ANDX *pSMB,
#endif
int
-CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, int first_time,
- const struct nls_table *nls_cp)
+CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses,
+ const struct nls_table *nls_cp)
{
int rc = 0;
int wct;
@@ -577,13 +579,18 @@ CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, int first_time,
int bytes_remaining;
struct key *spnego_key = NULL;
__le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */
+ bool first_time;
if (ses == NULL)
return -EINVAL;
+ read_lock(&cifs_tcp_ses_lock);
+ first_time = is_first_ses_reconnect(ses);
+ read_unlock(&cifs_tcp_ses_lock);
+
type = ses->server->secType;
- cFYI(1, ("sess setup type %d", type));
+ cFYI(1, "sess setup type %d", type);
ssetup_ntlmssp_authenticate:
if (phase == NtLmChallenge)
phase = NtLmAuthenticate; /* if ntlmssp, now final phase */
@@ -664,7 +671,7 @@ ssetup_ntlmssp_authenticate:
changed to do higher than lanman dialect and
we reconnected would we ever calc signing_key? */
- cFYI(1, ("Negotiating LANMAN setting up strings"));
+ cFYI(1, "Negotiating LANMAN setting up strings");
/* Unicode not allowed for LANMAN dialects */
ascii_ssetup_strings(&bcc_ptr, ses, nls_cp);
#endif
@@ -744,7 +751,7 @@ ssetup_ntlmssp_authenticate:
unicode_ssetup_strings(&bcc_ptr, ses, nls_cp);
} else
ascii_ssetup_strings(&bcc_ptr, ses, nls_cp);
- } else if (type == Kerberos || type == MSKerberos) {
+ } else if (type == Kerberos) {
#ifdef CONFIG_CIFS_UPCALL
struct cifs_spnego_msg *msg;
spnego_key = cifs_get_spnego_key(ses);
@@ -758,17 +765,17 @@ ssetup_ntlmssp_authenticate:
/* check version field to make sure that cifs.upcall is
sending us a response in an expected form */
if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) {
- cERROR(1, ("incorrect version of cifs.upcall (expected"
+ cERROR(1, "incorrect version of cifs.upcall (expected"
" %d but got %d)",
- CIFS_SPNEGO_UPCALL_VERSION, msg->version));
+ CIFS_SPNEGO_UPCALL_VERSION, msg->version);
rc = -EKEYREJECTED;
goto ssetup_exit;
}
/* bail out if key is too long */
if (msg->sesskey_len >
sizeof(ses->server->mac_signing_key.data.krb5)) {
- cERROR(1, ("Kerberos signing key too long (%u bytes)",
- msg->sesskey_len));
+ cERROR(1, "Kerberos signing key too long (%u bytes)",
+ msg->sesskey_len);
rc = -EOVERFLOW;
goto ssetup_exit;
}
@@ -796,7 +803,7 @@ ssetup_ntlmssp_authenticate:
/* BB: is this right? */
ascii_ssetup_strings(&bcc_ptr, ses, nls_cp);
#else /* ! CONFIG_CIFS_UPCALL */
- cERROR(1, ("Kerberos negotiated but upcall support disabled!"));
+ cERROR(1, "Kerberos negotiated but upcall support disabled!");
rc = -ENOSYS;
goto ssetup_exit;
#endif /* CONFIG_CIFS_UPCALL */
@@ -804,12 +811,12 @@ ssetup_ntlmssp_authenticate:
#ifdef CONFIG_CIFS_EXPERIMENTAL
if (type == RawNTLMSSP) {
if ((pSMB->req.hdr.Flags2 & SMBFLG2_UNICODE) == 0) {
- cERROR(1, ("NTLMSSP requires Unicode support"));
+ cERROR(1, "NTLMSSP requires Unicode support");
rc = -ENOSYS;
goto ssetup_exit;
}
- cFYI(1, ("ntlmssp session setup phase %d", phase));
+ cFYI(1, "ntlmssp session setup phase %d", phase);
pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
capabilities |= CAP_EXTENDED_SECURITY;
pSMB->req.Capabilities |= cpu_to_le32(capabilities);
@@ -827,7 +834,7 @@ ssetup_ntlmssp_authenticate:
on the response (challenge) */
smb_buf->Uid = ses->Suid;
} else {
- cERROR(1, ("invalid phase %d", phase));
+ cERROR(1, "invalid phase %d", phase);
rc = -ENOSYS;
goto ssetup_exit;
}
@@ -839,12 +846,12 @@ ssetup_ntlmssp_authenticate:
}
unicode_oslm_strings(&bcc_ptr, nls_cp);
} else {
- cERROR(1, ("secType %d not supported!", type));
+ cERROR(1, "secType %d not supported!", type);
rc = -ENOSYS;
goto ssetup_exit;
}
#else
- cERROR(1, ("secType %d not supported!", type));
+ cERROR(1, "secType %d not supported!", type);
rc = -ENOSYS;
goto ssetup_exit;
#endif
@@ -862,7 +869,7 @@ ssetup_ntlmssp_authenticate:
CIFS_STD_OP /* not long */ | CIFS_LOG_ERROR);
/* SMB request buf freed in SendReceive2 */
- cFYI(1, ("ssetup rc from sendrecv2 is %d", rc));
+ cFYI(1, "ssetup rc from sendrecv2 is %d", rc);
pSMB = (SESSION_SETUP_ANDX *)iov[0].iov_base;
smb_buf = (struct smb_hdr *)iov[0].iov_base;
@@ -870,7 +877,7 @@ ssetup_ntlmssp_authenticate:
if ((type == RawNTLMSSP) && (smb_buf->Status.CifsError ==
cpu_to_le32(NT_STATUS_MORE_PROCESSING_REQUIRED))) {
if (phase != NtLmNegotiate) {
- cERROR(1, ("Unexpected more processing error"));
+ cERROR(1, "Unexpected more processing error");
goto ssetup_exit;
}
/* NTLMSSP Negotiate sent now processing challenge (response) */
@@ -882,14 +889,14 @@ ssetup_ntlmssp_authenticate:
if ((smb_buf->WordCount != 3) && (smb_buf->WordCount != 4)) {
rc = -EIO;
- cERROR(1, ("bad word count %d", smb_buf->WordCount));
+ cERROR(1, "bad word count %d", smb_buf->WordCount);
goto ssetup_exit;
}
action = le16_to_cpu(pSMB->resp.Action);
if (action & GUEST_LOGIN)
- cFYI(1, ("Guest login")); /* BB mark SesInfo struct? */
+ cFYI(1, "Guest login"); /* BB mark SesInfo struct? */
ses->Suid = smb_buf->Uid; /* UID left in wire format (le) */
- cFYI(1, ("UID = %d ", ses->Suid));
+ cFYI(1, "UID = %d ", ses->Suid);
/* response can have either 3 or 4 word count - Samba sends 3 */
/* and lanman response is 3 */
bytes_remaining = BCC(smb_buf);
@@ -899,7 +906,7 @@ ssetup_ntlmssp_authenticate:
__u16 blob_len;
blob_len = le16_to_cpu(pSMB->resp.SecurityBlobLength);
if (blob_len > bytes_remaining) {
- cERROR(1, ("bad security blob length %d", blob_len));
+ cERROR(1, "bad security blob length %d", blob_len);
rc = -EINVAL;
goto ssetup_exit;
}
@@ -933,7 +940,7 @@ ssetup_exit:
}
kfree(str_area);
if (resp_buf_type == CIFS_SMALL_BUFFER) {
- cFYI(1, ("ssetup freeing small buf %p", iov[0].iov_base));
+ cFYI(1, "ssetup freeing small buf %p", iov[0].iov_base);
cifs_small_buf_release(iov[0].iov_base);
} else if (resp_buf_type == CIFS_LARGE_BUFFER)
cifs_buf_release(iov[0].iov_base);
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index ad081fe..82f78c4 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -35,7 +35,6 @@
#include "cifs_debug.h"
extern mempool_t *cifs_mid_poolp;
-extern struct kmem_cache *cifs_oplock_cachep;
static struct mid_q_entry *
AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
@@ -43,7 +42,7 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
struct mid_q_entry *temp;
if (server == NULL) {
- cERROR(1, ("Null TCP session in AllocMidQEntry"));
+ cERROR(1, "Null TCP session in AllocMidQEntry");
return NULL;
}
@@ -55,7 +54,7 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
temp->mid = smb_buffer->Mid; /* always LE */
temp->pid = current->pid;
temp->command = smb_buffer->Command;
- cFYI(1, ("For smb_command %d", temp->command));
+ cFYI(1, "For smb_command %d", temp->command);
/* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
/* when mid allocated can be before when sent */
temp->when_alloc = jiffies;
@@ -140,7 +139,7 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
total_len += iov[i].iov_len;
smb_buffer->smb_buf_length = cpu_to_be32(smb_buffer->smb_buf_length);
- cFYI(1, ("Sending smb: total_len %d", total_len));
+ cFYI(1, "Sending smb: total_len %d", total_len);
dump_smb(smb_buffer, len);
i = 0;
@@ -168,9 +167,8 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
reconnect which may clear the network problem.
*/
if ((i >= 14) || (!server->noblocksnd && (i > 2))) {
- cERROR(1,
- ("sends on sock %p stuck for 15 seconds",
- ssocket));
+ cERROR(1, "sends on sock %p stuck for 15 seconds",
+ ssocket);
rc = -EAGAIN;
break;
}
@@ -184,13 +182,13 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
total_len = 0;
break;
} else if (rc > total_len) {
- cERROR(1, ("sent %d requested %d", rc, total_len));
+ cERROR(1, "sent %d requested %d", rc, total_len);
break;
}
if (rc == 0) {
/* should never happen, letting socket clear before
retrying is our only obvious option here */
- cERROR(1, ("tcp sent no data"));
+ cERROR(1, "tcp sent no data");
msleep(500);
continue;
}
@@ -213,8 +211,8 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
}
if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
- cFYI(1, ("partial send (%d remaining), terminating session",
- total_len));
+ cFYI(1, "partial send (%d remaining), terminating session",
+ total_len);
/* If we have only sent part of an SMB then the next SMB
could be taken as the remainder of this one. We need
to kill the socket so the server throws away the partial
@@ -223,7 +221,7 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
}
if (rc < 0) {
- cERROR(1, ("Error %d sending data on socket to server", rc));
+ cERROR(1, "Error %d sending data on socket to server", rc);
} else
rc = 0;
@@ -296,7 +294,7 @@ static int allocate_mid(struct cifsSesInfo *ses, struct smb_hdr *in_buf,
}
if (ses->server->tcpStatus == CifsNeedReconnect) {
- cFYI(1, ("tcp session dead - return to caller to retry"));
+ cFYI(1, "tcp session dead - return to caller to retry");
return -EAGAIN;
}
@@ -348,7 +346,7 @@ static int wait_for_response(struct cifsSesInfo *ses,
lrt += time_to_wait;
if (time_after(jiffies, lrt)) {
/* No replies for time_to_wait. */
- cERROR(1, ("server not responding"));
+ cERROR(1, "server not responding");
return -1;
}
} else {
@@ -379,7 +377,7 @@ SendReceiveNoRsp(const unsigned int xid, struct cifsSesInfo *ses,
iov[0].iov_len = in_buf->smb_buf_length + 4;
flags |= CIFS_NO_RESP;
rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags);
- cFYI(DBG2, ("SendRcvNoRsp flags %d rc %d", flags, rc));
+ cFYI(DBG2, "SendRcvNoRsp flags %d rc %d", flags, rc);
return rc;
}
@@ -402,7 +400,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
if ((ses == NULL) || (ses->server == NULL)) {
cifs_small_buf_release(in_buf);
- cERROR(1, ("Null session"));
+ cERROR(1, "Null session");
return -EIO;
}
@@ -471,7 +469,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
else if (long_op == CIFS_BLOCKING_OP)
timeout = 0x7FFFFFFF; /* large, but not so large as to wrap */
else {
- cERROR(1, ("unknown timeout flag %d", long_op));
+ cERROR(1, "unknown timeout flag %d", long_op);
rc = -EIO;
goto out;
}
@@ -490,8 +488,8 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
spin_lock(&GlobalMid_Lock);
if (midQ->resp_buf == NULL) {
- cERROR(1, ("No response to cmd %d mid %d",
- midQ->command, midQ->mid));
+ cERROR(1, "No response to cmd %d mid %d",
+ midQ->command, midQ->mid);
if (midQ->midState == MID_REQUEST_SUBMITTED) {
if (ses->server->tcpStatus == CifsExiting)
rc = -EHOSTDOWN;
@@ -504,7 +502,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
if (rc != -EHOSTDOWN) {
if (midQ->midState == MID_RETRY_NEEDED) {
rc = -EAGAIN;
- cFYI(1, ("marking request for retry"));
+ cFYI(1, "marking request for retry");
} else {
rc = -EIO;
}
@@ -521,8 +519,8 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
receive_len = midQ->resp_buf->smb_buf_length;
if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
- cERROR(1, ("Frame too large received. Length: %d Xid: %d",
- receive_len, xid));
+ cERROR(1, "Frame too large received. Length: %d Xid: %d",
+ receive_len, xid);
rc = -EIO;
goto out;
}
@@ -548,7 +546,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
&ses->server->mac_signing_key,
midQ->sequence_number+1);
if (rc) {
- cERROR(1, ("Unexpected SMB signature"));
+ cERROR(1, "Unexpected SMB signature");
/* BB FIXME add code to kill session */
}
}
@@ -569,7 +567,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
DeleteMidQEntry */
} else {
rc = -EIO;
- cFYI(1, ("Bad MID state?"));
+ cFYI(1, "Bad MID state?");
}
out:
@@ -591,11 +589,11 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
struct mid_q_entry *midQ;
if (ses == NULL) {
- cERROR(1, ("Null smb session"));
+ cERROR(1, "Null smb session");
return -EIO;
}
if (ses->server == NULL) {
- cERROR(1, ("Null tcp session"));
+ cERROR(1, "Null tcp session");
return -EIO;
}
@@ -607,8 +605,8 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
use ses->maxReq */
if (in_buf->smb_buf_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
- cERROR(1, ("Illegal length, greater than maximum frame, %d",
- in_buf->smb_buf_length));
+ cERROR(1, "Illegal length, greater than maximum frame, %d",
+ in_buf->smb_buf_length);
return -EIO;
}
@@ -665,7 +663,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
else if (long_op == CIFS_BLOCKING_OP)
timeout = 0x7FFFFFFF; /* large but no so large as to wrap */
else {
- cERROR(1, ("unknown timeout flag %d", long_op));
+ cERROR(1, "unknown timeout flag %d", long_op);
rc = -EIO;
goto out;
}
@@ -681,8 +679,8 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
spin_lock(&GlobalMid_Lock);
if (midQ->resp_buf == NULL) {
- cERROR(1, ("No response for cmd %d mid %d",
- midQ->command, midQ->mid));
+ cERROR(1, "No response for cmd %d mid %d",
+ midQ->command, midQ->mid);
if (midQ->midState == MID_REQUEST_SUBMITTED) {
if (ses->server->tcpStatus == CifsExiting)
rc = -EHOSTDOWN;
@@ -695,7 +693,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
if (rc != -EHOSTDOWN) {
if (midQ->midState == MID_RETRY_NEEDED) {
rc = -EAGAIN;
- cFYI(1, ("marking request for retry"));
+ cFYI(1, "marking request for retry");
} else {
rc = -EIO;
}
@@ -712,8 +710,8 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
receive_len = midQ->resp_buf->smb_buf_length;
if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
- cERROR(1, ("Frame too large received. Length: %d Xid: %d",
- receive_len, xid));
+ cERROR(1, "Frame too large received. Length: %d Xid: %d",
+ receive_len, xid);
rc = -EIO;
goto out;
}
@@ -736,7 +734,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
&ses->server->mac_signing_key,
midQ->sequence_number+1);
if (rc) {
- cERROR(1, ("Unexpected SMB signature"));
+ cERROR(1, "Unexpected SMB signature");
/* BB FIXME add code to kill session */
}
}
@@ -753,7 +751,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
BCC(out_buf) = le16_to_cpu(BCC_LE(out_buf));
} else {
rc = -EIO;
- cERROR(1, ("Bad MID state?"));
+ cERROR(1, "Bad MID state?");
}
out:
@@ -824,13 +822,13 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
struct cifsSesInfo *ses;
if (tcon == NULL || tcon->ses == NULL) {
- cERROR(1, ("Null smb session"));
+ cERROR(1, "Null smb session");
return -EIO;
}
ses = tcon->ses;
if (ses->server == NULL) {
- cERROR(1, ("Null tcp session"));
+ cERROR(1, "Null tcp session");
return -EIO;
}
@@ -842,8 +840,8 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
use ses->maxReq */
if (in_buf->smb_buf_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
- cERROR(1, ("Illegal length, greater than maximum frame, %d",
- in_buf->smb_buf_length));
+ cERROR(1, "Illegal length, greater than maximum frame, %d",
+ in_buf->smb_buf_length);
return -EIO;
}
@@ -933,8 +931,8 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
spin_unlock(&GlobalMid_Lock);
receive_len = midQ->resp_buf->smb_buf_length;
} else {
- cERROR(1, ("No response for cmd %d mid %d",
- midQ->command, midQ->mid));
+ cERROR(1, "No response for cmd %d mid %d",
+ midQ->command, midQ->mid);
if (midQ->midState == MID_REQUEST_SUBMITTED) {
if (ses->server->tcpStatus == CifsExiting)
rc = -EHOSTDOWN;
@@ -947,7 +945,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
if (rc != -EHOSTDOWN) {
if (midQ->midState == MID_RETRY_NEEDED) {
rc = -EAGAIN;
- cFYI(1, ("marking request for retry"));
+ cFYI(1, "marking request for retry");
} else {
rc = -EIO;
}
@@ -958,8 +956,8 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
}
if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
- cERROR(1, ("Frame too large received. Length: %d Xid: %d",
- receive_len, xid));
+ cERROR(1, "Frame too large received. Length: %d Xid: %d",
+ receive_len, xid);
rc = -EIO;
goto out;
}
@@ -968,7 +966,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
if ((out_buf == NULL) || (midQ->midState != MID_RESPONSE_RECEIVED)) {
rc = -EIO;
- cERROR(1, ("Bad MID state?"));
+ cERROR(1, "Bad MID state?");
goto out;
}
@@ -986,7 +984,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
&ses->server->mac_signing_key,
midQ->sequence_number+1);
if (rc) {
- cERROR(1, ("Unexpected SMB signature"));
+ cERROR(1, "Unexpected SMB signature");
/* BB FIXME add code to kill session */
}
}
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index f555ce0..a150920 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -70,12 +70,12 @@ int cifs_removexattr(struct dentry *direntry, const char *ea_name)
return rc;
}
if (ea_name == NULL) {
- cFYI(1, ("Null xattr names not supported"));
+ cFYI(1, "Null xattr names not supported");
} else if (strncmp(ea_name, CIFS_XATTR_USER_PREFIX, 5)
&& (strncmp(ea_name, CIFS_XATTR_OS2_PREFIX, 4))) {
cFYI(1,
- ("illegal xattr request %s (only user namespace supported)",
- ea_name));
+ "illegal xattr request %s (only user namespace supported)",
+ ea_name);
/* BB what if no namespace prefix? */
/* Should we just pass them to server, except for
system and perhaps security prefixes? */
@@ -131,19 +131,19 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
search server for EAs or streams to
returns as xattrs */
if (value_size > MAX_EA_VALUE_SIZE) {
- cFYI(1, ("size of EA value too large"));
+ cFYI(1, "size of EA value too large");
kfree(full_path);
FreeXid(xid);
return -EOPNOTSUPP;
}
if (ea_name == NULL) {
- cFYI(1, ("Null xattr names not supported"));
+ cFYI(1, "Null xattr names not supported");
} else if (strncmp(ea_name, CIFS_XATTR_USER_PREFIX, 5) == 0) {
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
goto set_ea_exit;
if (strncmp(ea_name, CIFS_XATTR_DOS_ATTRIB, 14) == 0)
- cFYI(1, ("attempt to set cifs inode metadata"));
+ cFYI(1, "attempt to set cifs inode metadata");
ea_name += 5; /* skip past user. prefix */
rc = CIFSSMBSetEA(xid, pTcon, full_path, ea_name, ea_value,
@@ -169,9 +169,9 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
ACL_TYPE_ACCESS, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
- cFYI(1, ("set POSIX ACL rc %d", rc));
+ cFYI(1, "set POSIX ACL rc %d", rc);
#else
- cFYI(1, ("set POSIX ACL not supported"));
+ cFYI(1, "set POSIX ACL not supported");
#endif
} else if (strncmp(ea_name, POSIX_ACL_XATTR_DEFAULT,
strlen(POSIX_ACL_XATTR_DEFAULT)) == 0) {
@@ -182,13 +182,13 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
ACL_TYPE_DEFAULT, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
- cFYI(1, ("set POSIX default ACL rc %d", rc));
+ cFYI(1, "set POSIX default ACL rc %d", rc);
#else
- cFYI(1, ("set default POSIX ACL not supported"));
+ cFYI(1, "set default POSIX ACL not supported");
#endif
} else {
- cFYI(1, ("illegal xattr request %s (only user namespace"
- " supported)", ea_name));
+ cFYI(1, "illegal xattr request %s (only user namespace"
+ " supported)", ea_name);
/* BB what if no namespace prefix? */
/* Should we just pass them to server, except for
system and perhaps security prefixes? */
@@ -235,13 +235,13 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
/* return dos attributes as pseudo xattr */
/* return alt name if available as pseudo attr */
if (ea_name == NULL) {
- cFYI(1, ("Null xattr names not supported"));
+ cFYI(1, "Null xattr names not supported");
} else if (strncmp(ea_name, CIFS_XATTR_USER_PREFIX, 5) == 0) {
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
goto get_ea_exit;
if (strncmp(ea_name, CIFS_XATTR_DOS_ATTRIB, 14) == 0) {
- cFYI(1, ("attempt to query cifs inode metadata"));
+ cFYI(1, "attempt to query cifs inode metadata");
/* revalidate/getattr then populate from inode */
} /* BB add else when above is implemented */
ea_name += 5; /* skip past user. prefix */
@@ -287,7 +287,7 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
}
#endif /* EXPERIMENTAL */
#else
- cFYI(1, ("query POSIX ACL not supported yet"));
+ cFYI(1, "query POSIX ACL not supported yet");
#endif /* CONFIG_CIFS_POSIX */
} else if (strncmp(ea_name, POSIX_ACL_XATTR_DEFAULT,
strlen(POSIX_ACL_XATTR_DEFAULT)) == 0) {
@@ -299,18 +299,18 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
#else
- cFYI(1, ("query POSIX default ACL not supported yet"));
+ cFYI(1, "query POSIX default ACL not supported yet");
#endif
} else if (strncmp(ea_name,
CIFS_XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) == 0) {
- cFYI(1, ("Trusted xattr namespace not supported yet"));
+ cFYI(1, "Trusted xattr namespace not supported yet");
} else if (strncmp(ea_name,
CIFS_XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) == 0) {
- cFYI(1, ("Security xattr namespace not supported yet"));
+ cFYI(1, "Security xattr namespace not supported yet");
} else
cFYI(1,
- ("illegal xattr request %s (only user namespace supported)",
- ea_name));
+ "illegal xattr request %s (only user namespace supported)",
+ ea_name);
/* We could add an additional check for streams ie
if proc/fs/cifs/streamstoxattr is set then
diff --git a/fs/compat.c b/fs/compat.c
index 4b6ed03..0544873 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -1531,8 +1531,6 @@ int compat_do_execve(char * filename,
if (retval < 0)
goto out;
- current->stack_start = current->mm->start_stack;
-
/* execve succeeded */
current->fs->in_exec = 0;
current->in_execve = 0;
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index c32a1b6..641640d 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -102,7 +102,6 @@
#include <linux/nbd.h>
#include <linux/random.h>
#include <linux/filter.h>
-#include <linux/pktcdvd.h>
#include <linux/hiddev.h>
@@ -1126,8 +1125,6 @@ COMPATIBLE_IOCTL(PPGETMODE)
COMPATIBLE_IOCTL(PPGETPHASE)
COMPATIBLE_IOCTL(PPGETFLAGS)
COMPATIBLE_IOCTL(PPSETFLAGS)
-/* pktcdvd */
-COMPATIBLE_IOCTL(PACKET_CTRL_CMD)
/* Big A */
/* sparc only */
/* Big Q for sound/OSS */
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 8e48b52..0b502f8 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -645,6 +645,7 @@ static void detach_groups(struct config_group *group)
configfs_detach_group(sd->s_element);
child->d_inode->i_flags |= S_DEAD;
+ dont_mount(child);
mutex_unlock(&child->d_inode->i_mutex);
@@ -840,6 +841,7 @@ static int configfs_attach_item(struct config_item *parent_item,
mutex_lock(&dentry->d_inode->i_mutex);
configfs_remove_dir(item);
dentry->d_inode->i_flags |= S_DEAD;
+ dont_mount(dentry);
mutex_unlock(&dentry->d_inode->i_mutex);
d_delete(dentry);
}
@@ -882,6 +884,7 @@ static int configfs_attach_group(struct config_item *parent_item,
if (ret) {
configfs_detach_item(item);
dentry->d_inode->i_flags |= S_DEAD;
+ dont_mount(dentry);
}
configfs_adjust_dir_dirent_depth_after_populate(sd);
mutex_unlock(&dentry->d_inode->i_mutex);
@@ -1725,6 +1728,7 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys)
mutex_unlock(&configfs_symlink_mutex);
configfs_detach_group(&group->cg_item);
dentry->d_inode->i_flags |= S_DEAD;
+ dont_mount(dentry);
mutex_unlock(&dentry->d_inode->i_mutex);
d_delete(dentry);
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 17903b4..031dbe3 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -733,10 +733,7 @@ static void lkb_add_ordered(struct list_head *new, struct list_head *head,
if (lkb->lkb_rqmode < mode)
break;
- if (!lkb)
- list_add_tail(new, head);
- else
- __list_add(new, lkb->lkb_statequeue.prev, &lkb->lkb_statequeue);
+ __list_add(new, lkb->lkb_statequeue.prev, &lkb->lkb_statequeue);
}
/* add/remove lkb to rsb's grant/convert/wait queue */
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index 8b6e73c..b627285 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -215,6 +215,7 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, int type, int mode)
if (!ast_type) {
kref_get(&lkb->lkb_ref);
list_add_tail(&lkb->lkb_astqueue, &proc->asts);
+ lkb->lkb_ast_first = type;
wake_up_interruptible(&proc->wait);
}
if (type == AST_COMP && (ast_type & AST_COMP))
@@ -223,7 +224,6 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, int type, int mode)
eol = lkb_is_endoflife(lkb, ua->lksb.sb_status, type);
if (eol) {
- lkb->lkb_ast_type &= ~AST_BAST;
lkb->lkb_flags |= DLM_IFL_ENDOFLIFE;
}
@@ -706,7 +706,7 @@ static int device_close(struct inode *inode, struct file *file)
}
static int copy_result_to_user(struct dlm_user_args *ua, int compat, int type,
- int bmode, char __user *buf, size_t count)
+ int mode, char __user *buf, size_t count)
{
#ifdef CONFIG_COMPAT
struct dlm_lock_result32 result32;
@@ -733,7 +733,7 @@ static int copy_result_to_user(struct dlm_user_args *ua, int compat, int type,
if (type == AST_BAST) {
result.user_astaddr = ua->bastaddr;
result.user_astparam = ua->bastparam;
- result.bast_mode = bmode;
+ result.bast_mode = mode;
} else {
result.user_astaddr = ua->castaddr;
result.user_astparam = ua->castparam;
@@ -801,7 +801,9 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
struct dlm_user_proc *proc = file->private_data;
struct dlm_lkb *lkb;
DECLARE_WAITQUEUE(wait, current);
- int error, type=0, bmode=0, removed = 0;
+ int error = 0, removed;
+ int ret_type, ret_mode;
+ int bastmode, castmode, do_bast, do_cast;
if (count == sizeof(struct dlm_device_version)) {
error = copy_version_to_user(buf, count);
@@ -820,6 +822,8 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
#endif
return -EINVAL;
+ try_another:
+
/* do we really need this? can a read happen after a close? */
if (test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))
return -EINVAL;
@@ -855,13 +859,55 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
lkb = list_entry(proc->asts.next, struct dlm_lkb, lkb_astqueue);
- if (lkb->lkb_ast_type & AST_COMP) {
- lkb->lkb_ast_type &= ~AST_COMP;
- type = AST_COMP;
- } else if (lkb->lkb_ast_type & AST_BAST) {
- lkb->lkb_ast_type &= ~AST_BAST;
- type = AST_BAST;
- bmode = lkb->lkb_bastmode;
+ removed = 0;
+ ret_type = 0;
+ ret_mode = 0;
+ do_bast = lkb->lkb_ast_type & AST_BAST;
+ do_cast = lkb->lkb_ast_type & AST_COMP;
+ bastmode = lkb->lkb_bastmode;
+ castmode = lkb->lkb_castmode;
+
+ /* when both are queued figure out which to do first and
+ switch first so the other goes in the next read */
+
+ if (do_cast && do_bast) {
+ if (lkb->lkb_ast_first == AST_COMP) {
+ ret_type = AST_COMP;
+ ret_mode = castmode;
+ lkb->lkb_ast_type &= ~AST_COMP;
+ lkb->lkb_ast_first = AST_BAST;
+ } else {
+ ret_type = AST_BAST;
+ ret_mode = bastmode;
+ lkb->lkb_ast_type &= ~AST_BAST;
+ lkb->lkb_ast_first = AST_COMP;
+ }
+ } else {
+ ret_type = lkb->lkb_ast_first;
+ ret_mode = (ret_type == AST_COMP) ? castmode : bastmode;
+ lkb->lkb_ast_type &= ~ret_type;
+ lkb->lkb_ast_first = 0;
+ }
+
+ /* if we're doing a bast but the bast is unnecessary, then
+ switch to do nothing or do a cast if that was needed next */
+
+ if ((ret_type == AST_BAST) &&
+ dlm_modes_compat(bastmode, lkb->lkb_castmode_done)) {
+ ret_type = 0;
+ ret_mode = 0;
+
+ if (do_cast) {
+ ret_type = AST_COMP;
+ ret_mode = castmode;
+ lkb->lkb_ast_type &= ~AST_COMP;
+ lkb->lkb_ast_first = 0;
+ }
+ }
+
+ if (lkb->lkb_ast_first != lkb->lkb_ast_type) {
+ log_print("device_read %x ast_first %x ast_type %x",
+ lkb->lkb_id, lkb->lkb_ast_first, lkb->lkb_ast_type);
}
if (!lkb->lkb_ast_type) {
@@ -870,15 +916,29 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
}
spin_unlock(&proc->asts_spin);
- error = copy_result_to_user(lkb->lkb_ua,
- test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags),
- type, bmode, buf, count);
+ if (ret_type) {
+ error = copy_result_to_user(lkb->lkb_ua,
+ test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags),
+ ret_type, ret_mode, buf, count);
+
+ if (ret_type == AST_COMP)
+ lkb->lkb_castmode_done = castmode;
+ if (ret_type == AST_BAST)
+ lkb->lkb_bastmode_done = bastmode;
+ }
/* removes reference for the proc->asts lists added by
dlm_user_add_ast() and may result in the lkb being freed */
+
if (removed)
dlm_put_lkb(lkb);
+ /* the bast that was queued was eliminated (see unnecessary above),
+ leaving nothing to return */
+
+ if (!ret_type)
+ goto try_another;
+
return error;
}
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index bd056a5..3817149 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1140,8 +1140,7 @@ retry:
* ep_poll_callback() when events will become available.
*/
init_waitqueue_entry(&wait, current);
- wait.flags |= WQ_FLAG_EXCLUSIVE;
- __add_wait_queue(&ep->wq, &wait);
+ __add_wait_queue_exclusive(&ep->wq, &wait);
for (;;) {
/*
diff --git a/fs/exec.c b/fs/exec.c
index 49cdaa1..e6e94c6 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1387,8 +1387,6 @@ int do_execve(char * filename,
if (retval < 0)
goto out;
- current->stack_start = current->mm->start_stack;
-
/* execve succeeded */
current->fs->in_exec = 0;
current->in_execve = 0;
diff --git a/fs/exofs/exofs.h b/fs/exofs/exofs.h
index 5437327..22721b2 100644
--- a/fs/exofs/exofs.h
+++ b/fs/exofs/exofs.h
@@ -85,6 +85,7 @@ struct exofs_sb_info {
u32 s_next_generation; /* next gen # to use */
atomic_t s_curr_pending; /* number of pending commands */
uint8_t s_cred[OSD_CAP_LEN]; /* credential for the fscb */
+ struct backing_dev_info bdi; /* register our bdi with VFS */
struct pnfs_osd_data_map data_map; /* Default raid to use
* FIXME: Needed ?
@@ -93,7 +94,6 @@ struct exofs_sb_info {
struct exofs_layout layout; /* Default files layout,
* contains the variable osd_dev
* array. Keep last */
- struct backing_dev_info bdi;
struct osd_dev *_min_one_dev[1]; /* Place holder for one dev */
};
diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
index 3cf038c..e8766a3 100644
--- a/fs/ext2/balloc.c
+++ b/fs/ext2/balloc.c
@@ -1332,6 +1332,12 @@ retry_alloc:
free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
/*
+ * skip this group (and avoid loading bitmap) if there
+ * are no free blocks
+ */
+ if (!free_blocks)
+ continue;
+ /*
* skip this group if the number of
* free blocks is less than half of the reservation
* window size.
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index ad7d572..f0c5286 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -106,7 +106,7 @@ void ext2_free_inode (struct inode * inode)
struct super_block * sb = inode->i_sb;
int is_directory;
unsigned long ino;
- struct buffer_head *bitmap_bh = NULL;
+ struct buffer_head *bitmap_bh;
unsigned long block_group;
unsigned long bit;
struct ext2_super_block * es;
@@ -135,14 +135,13 @@ void ext2_free_inode (struct inode * inode)
ino > le32_to_cpu(es->s_inodes_count)) {
ext2_error (sb, "ext2_free_inode",
"reserved or nonexistent inode %lu", ino);
- goto error_return;
+ return;
}
block_group = (ino - 1) / EXT2_INODES_PER_GROUP(sb);
bit = (ino - 1) % EXT2_INODES_PER_GROUP(sb);
- brelse(bitmap_bh);
bitmap_bh = read_inode_bitmap(sb, block_group);
if (!bitmap_bh)
- goto error_return;
+ return;
/* Ok, now we can actually update the inode bitmaps.. */
if (!ext2_clear_bit_atomic(sb_bgl_lock(EXT2_SB(sb), block_group),
@@ -154,7 +153,7 @@ void ext2_free_inode (struct inode * inode)
mark_buffer_dirty(bitmap_bh);
if (sb->s_flags & MS_SYNCHRONOUS)
sync_dirty_buffer(bitmap_bh);
-error_return:
+
brelse(bitmap_bh);
}
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index fc13cc1..527c46d 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -22,7 +22,6 @@
* Assorted race fixes, rewrite of ext2_get_block() by Al Viro, 2000
*/
-#include <linux/smp_lock.h>
#include <linux/time.h>
#include <linux/highuid.h>
#include <linux/pagemap.h>
@@ -1406,11 +1405,11 @@ static int __ext2_write_inode(struct inode *inode, int do_sync)
/* If this is the first large file
* created, add a flag to the superblock.
*/
- lock_kernel();
+ spin_lock(&EXT2_SB(sb)->s_lock);
ext2_update_dynamic_rev(sb);
EXT2_SET_RO_COMPAT_FEATURE(sb,
EXT2_FEATURE_RO_COMPAT_LARGE_FILE);
- unlock_kernel();
+ spin_unlock(&EXT2_SB(sb)->s_lock);
ext2_write_super(sb);
}
}
@@ -1467,7 +1466,7 @@ int ext2_setattr(struct dentry *dentry, struct iattr *iattr)
if (error)
return error;
- if (iattr->ia_valid & ATTR_SIZE)
+ if (is_quota_modification(inode, iattr))
dquot_initialize(inode);
if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
(iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 42e4a30..71e9eb1 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -26,7 +26,6 @@
#include <linux/random.h>
#include <linux/buffer_head.h>
#include <linux/exportfs.h>
-#include <linux/smp_lock.h>
#include <linux/vfs.h>
#include <linux/seq_file.h>
#include <linux/mount.h>
@@ -39,7 +38,7 @@
#include "xip.h"
static void ext2_sync_super(struct super_block *sb,
- struct ext2_super_block *es);
+ struct ext2_super_block *es, int wait);
static int ext2_remount (struct super_block * sb, int * flags, char * data);
static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf);
static int ext2_sync_fs(struct super_block *sb, int wait);
@@ -52,9 +51,11 @@ void ext2_error (struct super_block * sb, const char * function,
struct ext2_super_block *es = sbi->s_es;
if (!(sb->s_flags & MS_RDONLY)) {
+ spin_lock(&sbi->s_lock);
sbi->s_mount_state |= EXT2_ERROR_FS;
es->s_state |= cpu_to_le16(EXT2_ERROR_FS);
- ext2_sync_super(sb, es);
+ spin_unlock(&sbi->s_lock);
+ ext2_sync_super(sb, es, 1);
}
va_start(args, fmt);
@@ -84,6 +85,9 @@ void ext2_msg(struct super_block *sb, const char *prefix,
va_end(args);
}
+/*
+ * This must be called with sbi->s_lock held.
+ */
void ext2_update_dynamic_rev(struct super_block *sb)
{
struct ext2_super_block *es = EXT2_SB(sb)->s_es;
@@ -115,8 +119,6 @@ static void ext2_put_super (struct super_block * sb)
int i;
struct ext2_sb_info *sbi = EXT2_SB(sb);
- lock_kernel();
-
if (sb->s_dirt)
ext2_write_super(sb);
@@ -124,8 +126,10 @@ static void ext2_put_super (struct super_block * sb)
if (!(sb->s_flags & MS_RDONLY)) {
struct ext2_super_block *es = sbi->s_es;
+ spin_lock(&sbi->s_lock);
es->s_state = cpu_to_le16(sbi->s_mount_state);
- ext2_sync_super(sb, es);
+ spin_unlock(&sbi->s_lock);
+ ext2_sync_super(sb, es, 1);
}
db_count = sbi->s_gdb_count;
for (i = 0; i < db_count; i++)
@@ -140,8 +144,6 @@ static void ext2_put_super (struct super_block * sb)
sb->s_fs_info = NULL;
kfree(sbi->s_blockgroup_lock);
kfree(sbi);
-
- unlock_kernel();
}
static struct kmem_cache * ext2_inode_cachep;
@@ -209,6 +211,7 @@ static int ext2_show_options(struct seq_file *seq, struct vfsmount *vfs)
struct ext2_super_block *es = sbi->s_es;
unsigned long def_mount_opts;
+ spin_lock(&sbi->s_lock);
def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
if (sbi->s_sb_block != 1)
@@ -281,6 +284,7 @@ static int ext2_show_options(struct seq_file *seq, struct vfsmount *vfs)
if (!test_opt(sb, RESERVATION))
seq_puts(seq, ",noreservation");
+ spin_unlock(&sbi->s_lock);
return 0;
}
@@ -606,7 +610,6 @@ static int ext2_setup_super (struct super_block * sb,
if (!le16_to_cpu(es->s_max_mnt_count))
es->s_max_mnt_count = cpu_to_le16(EXT2_DFL_MAX_MNT_COUNT);
le16_add_cpu(&es->s_mnt_count, 1);
- ext2_write_super(sb);
if (test_opt (sb, DEBUG))
ext2_msg(sb, KERN_INFO, "%s, %s, bs=%lu, fs=%lu, gc=%lu, "
"bpg=%lu, ipg=%lu, mo=%04lx]",
@@ -767,6 +770,8 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
sb->s_fs_info = sbi;
sbi->s_sb_block = sb_block;
+ spin_lock_init(&sbi->s_lock);
+
/*
* See what the current blocksize for the device is, and
* use that as the blocksize. Otherwise (or if the blocksize
@@ -1079,7 +1084,9 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
if (EXT2_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL))
ext2_msg(sb, KERN_WARNING,
"warning: mounting ext3 filesystem as ext2");
- ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY);
+ if (ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY))
+ sb->s_flags |= MS_RDONLY;
+ ext2_write_super(sb);
return 0;
cantfind_ext2:
@@ -1120,30 +1127,26 @@ static void ext2_clear_super_error(struct super_block *sb)
* be remapped. Nothing we can do but to retry the
* write and hope for the best.
*/
- printk(KERN_ERR "EXT2-fs: %s previous I/O error to "
- "superblock detected", sb->s_id);
+ ext2_msg(sb, KERN_ERR,
+ "previous I/O error to superblock detected\n");
clear_buffer_write_io_error(sbh);
set_buffer_uptodate(sbh);
}
}
-static void ext2_commit_super (struct super_block * sb,
- struct ext2_super_block * es)
-{
- ext2_clear_super_error(sb);
- es->s_wtime = cpu_to_le32(get_seconds());
- mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
- sb->s_dirt = 0;
-}
-
-static void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es)
+static void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es,
+ int wait)
{
ext2_clear_super_error(sb);
+ spin_lock(&EXT2_SB(sb)->s_lock);
es->s_free_blocks_count = cpu_to_le32(ext2_count_free_blocks(sb));
es->s_free_inodes_count = cpu_to_le32(ext2_count_free_inodes(sb));
es->s_wtime = cpu_to_le32(get_seconds());
+ /* unlock before we do IO */
+ spin_unlock(&EXT2_SB(sb)->s_lock);
mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
- sync_dirty_buffer(EXT2_SB(sb)->s_sbh);
+ if (wait)
+ sync_dirty_buffer(EXT2_SB(sb)->s_sbh);
sb->s_dirt = 0;
}
@@ -1157,43 +1160,18 @@ static void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es)
* may have been checked while mounted and e2fsck may have
* set s_state to EXT2_VALID_FS after some corrections.
*/
-
static int ext2_sync_fs(struct super_block *sb, int wait)
{
+ struct ext2_sb_info *sbi = EXT2_SB(sb);
struct ext2_super_block *es = EXT2_SB(sb)->s_es;
- struct buffer_head *sbh = EXT2_SB(sb)->s_sbh;
-
- lock_kernel();
- if (buffer_write_io_error(sbh)) {
- /*
- * Oh, dear. A previous attempt to write the
- * superblock failed. This could happen because the
- * USB device was yanked out. Or it could happen to
- * be a transient write error and maybe the block will
- * be remapped. Nothing we can do but to retry the
- * write and hope for the best.
- */
- ext2_msg(sb, KERN_ERR,
- "previous I/O error to superblock detected\n");
- clear_buffer_write_io_error(sbh);
- set_buffer_uptodate(sbh);
- }
+ spin_lock(&sbi->s_lock);
if (es->s_state & cpu_to_le16(EXT2_VALID_FS)) {
ext2_debug("setting valid to 0\n");
es->s_state &= cpu_to_le16(~EXT2_VALID_FS);
- es->s_free_blocks_count =
- cpu_to_le32(ext2_count_free_blocks(sb));
- es->s_free_inodes_count =
- cpu_to_le32(ext2_count_free_inodes(sb));
- es->s_mtime = cpu_to_le32(get_seconds());
- ext2_sync_super(sb, es);
- } else {
- ext2_commit_super(sb, es);
}
- sb->s_dirt = 0;
- unlock_kernel();
-
+ spin_unlock(&sbi->s_lock);
+ ext2_sync_super(sb, es, wait);
return 0;
}
@@ -1215,7 +1193,7 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
unsigned long old_sb_flags;
int err;
- lock_kernel();
+ spin_lock(&sbi->s_lock);
/* Store the old options */
old_sb_flags = sb->s_flags;
@@ -1254,13 +1232,13 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
sbi->s_mount_opt |= old_mount_opt & EXT2_MOUNT_XIP;
}
if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) {
- unlock_kernel();
+ spin_unlock(&sbi->s_lock);
return 0;
}
if (*flags & MS_RDONLY) {
if (le16_to_cpu(es->s_state) & EXT2_VALID_FS ||
!(sbi->s_mount_state & EXT2_VALID_FS)) {
- unlock_kernel();
+ spin_unlock(&sbi->s_lock);
return 0;
}
/*
@@ -1269,6 +1247,8 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
*/
es->s_state = cpu_to_le16(sbi->s_mount_state);
es->s_mtime = cpu_to_le32(get_seconds());
+ spin_unlock(&sbi->s_lock);
+ ext2_sync_super(sb, es, 1);
} else {
__le32 ret = EXT2_HAS_RO_COMPAT_FEATURE(sb,
~EXT2_FEATURE_RO_COMPAT_SUPP);
@@ -1288,16 +1268,16 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
sbi->s_mount_state = le16_to_cpu(es->s_state);
if (!ext2_setup_super (sb, es, 0))
sb->s_flags &= ~MS_RDONLY;
+ spin_unlock(&sbi->s_lock);
+ ext2_write_super(sb);
}
- ext2_sync_super(sb, es);
- unlock_kernel();
return 0;
restore_opts:
sbi->s_mount_opt = old_opts.s_mount_opt;
sbi->s_resuid = old_opts.s_resuid;
sbi->s_resgid = old_opts.s_resgid;
sb->s_flags = old_sb_flags;
- unlock_kernel();
+ spin_unlock(&sbi->s_lock);
return err;
}
@@ -1308,6 +1288,8 @@ static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf)
struct ext2_super_block *es = sbi->s_es;
u64 fsid;
+ spin_lock(&sbi->s_lock);
+
if (test_opt (sb, MINIX_DF))
sbi->s_overhead_last = 0;
else if (sbi->s_blocks_last != le32_to_cpu(es->s_blocks_count)) {
@@ -1362,6 +1344,7 @@ static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf)
le64_to_cpup((void *)es->s_uuid + sizeof(u64));
buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
+ spin_unlock(&sbi->s_lock);
return 0;
}
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index e44dc92..3b96045 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -345,7 +345,9 @@ static void ext2_xattr_update_super_block(struct super_block *sb)
if (EXT2_HAS_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR))
return;
+ spin_lock(&EXT2_SB(sb)->s_lock);
EXT2_SET_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR);
+ spin_unlock(&EXT2_SB(sb)->s_lock);
sb->s_dirt = 1;
mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
}
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index a177122..4a32511 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -1584,6 +1584,12 @@ retry_alloc:
goto io_error;
free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
/*
+ * skip this group (and avoid loading bitmap) if there
+ * are no free blocks
+ */
+ if (!free_blocks)
+ continue;
+ /*
* skip this group if the number of
* free blocks is less than half of the reservation
* window size.
diff --git a/fs/ext3/fsync.c b/fs/ext3/fsync.c
index 9492f60..fcf7487 100644
--- a/fs/ext3/fsync.c
+++ b/fs/ext3/fsync.c
@@ -48,7 +48,7 @@ int ext3_sync_file(struct file * file, struct dentry *dentry, int datasync)
struct inode *inode = dentry->d_inode;
struct ext3_inode_info *ei = EXT3_I(inode);
journal_t *journal = EXT3_SB(inode->i_sb)->s_journal;
- int ret = 0;
+ int ret, needs_barrier = 0;
tid_t commit_tid;
if (inode->i_sb->s_flags & MS_RDONLY)
@@ -70,29 +70,27 @@ int ext3_sync_file(struct file * file, struct dentry *dentry, int datasync)
* (they were dirtied by commit). But that's OK - the blocks are
* safe in-journal, which is all fsync() needs to ensure.
*/
- if (ext3_should_journal_data(inode)) {
- ret = ext3_force_commit(inode->i_sb);
- goto out;
- }
+ if (ext3_should_journal_data(inode))
+ return ext3_force_commit(inode->i_sb);
if (datasync)
commit_tid = atomic_read(&ei->i_datasync_tid);
else
commit_tid = atomic_read(&ei->i_sync_tid);
- if (log_start_commit(journal, commit_tid)) {
- log_wait_commit(journal, commit_tid);
- goto out;
- }
+ if (test_opt(inode->i_sb, BARRIER) &&
+ !journal_trans_will_send_data_barrier(journal, commit_tid))
+ needs_barrier = 1;
+ log_start_commit(journal, commit_tid);
+ ret = log_wait_commit(journal, commit_tid);
/*
* In case we didn't commit a transaction, we have to flush
* disk caches manually so that data really is on persistent
* storage
*/
- if (test_opt(inode->i_sb, BARRIER))
+ if (needs_barrier)
blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL,
BLKDEV_IFL_WAIT);
-out:
return ret;
}
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index ea33bdf..735f019 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -3151,7 +3151,7 @@ int ext3_setattr(struct dentry *dentry, struct iattr *attr)
if (error)
return error;
- if (ia_valid & ATTR_SIZE)
+ if (is_quota_modification(inode, attr))
dquot_initialize(inode);
if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
(ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 1bee604..0fc1293 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -653,8 +653,12 @@ static int ext3_show_options(struct seq_file *seq, struct vfsmount *vfs)
seq_printf(seq, ",commit=%u",
(unsigned) (sbi->s_commit_interval / HZ));
}
- if (test_opt(sb, BARRIER))
- seq_puts(seq, ",barrier=1");
+
+ /*
+ * Always display barrier state so it's clear what the status is.
+ */
+ seq_puts(seq, ",barrier=");
+ seq_puts(seq, test_opt(sb, BARRIER) ? "1" : "0");
if (test_opt(sb, NOBH))
seq_puts(seq, ",nobh");
@@ -810,8 +814,8 @@ enum {
Opt_data_err_abort, Opt_data_err_ignore,
Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
- Opt_noquota, Opt_ignore, Opt_barrier, Opt_err, Opt_resize,
- Opt_usrquota, Opt_grpquota
+ Opt_noquota, Opt_ignore, Opt_barrier, Opt_nobarrier, Opt_err,
+ Opt_resize, Opt_usrquota, Opt_grpquota
};
static const match_table_t tokens = {
@@ -865,6 +869,8 @@ static const match_table_t tokens = {
{Opt_quota, "quota"},
{Opt_usrquota, "usrquota"},
{Opt_barrier, "barrier=%u"},
+ {Opt_barrier, "barrier"},
+ {Opt_nobarrier, "nobarrier"},
{Opt_resize, "resize"},
{Opt_err, NULL},
};
@@ -967,7 +973,11 @@ static int parse_options (char *options, struct super_block *sb,
int token;
if (!*p)
continue;
-
+ /*
+ * Initialize args struct so we know whether arg was
+ * found; some options take optional arguments.
+ */
+ args[0].to = args[0].from = 0;
token = match_token(p, tokens, args);
switch (token) {
case Opt_bsd_df:
@@ -1215,9 +1225,15 @@ set_qf_format:
case Opt_abort:
set_opt(sbi->s_mount_opt, ABORT);
break;
+ case Opt_nobarrier:
+ clear_opt(sbi->s_mount_opt, BARRIER);
+ break;
case Opt_barrier:
- if (match_int(&args[0], &option))
- return 0;
+ if (args[0].from) {
+ if (match_int(&args[0], &option))
+ return 0;
+ } else
+ option = 1; /* No argument, default to 1 */
if (option)
set_opt(sbi->s_mount_opt, BARRIER);
else
@@ -1890,21 +1906,6 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
get_random_bytes(&sbi->s_next_generation, sizeof(u32));
spin_lock_init(&sbi->s_next_gen_lock);
- err = percpu_counter_init(&sbi->s_freeblocks_counter,
- ext3_count_free_blocks(sb));
- if (!err) {
- err = percpu_counter_init(&sbi->s_freeinodes_counter,
- ext3_count_free_inodes(sb));
- }
- if (!err) {
- err = percpu_counter_init(&sbi->s_dirs_counter,
- ext3_count_dirs(sb));
- }
- if (err) {
- ext3_msg(sb, KERN_ERR, "error: insufficient memory");
- goto failed_mount3;
- }
-
/* per fileystem reservation list head & lock */
spin_lock_init(&sbi->s_rsv_window_lock);
sbi->s_rsv_window_root = RB_ROOT;
@@ -1945,15 +1946,29 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
if (!test_opt(sb, NOLOAD) &&
EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL)) {
if (ext3_load_journal(sb, es, journal_devnum))
- goto failed_mount3;
+ goto failed_mount2;
} else if (journal_inum) {
if (ext3_create_journal(sb, es, journal_inum))
- goto failed_mount3;
+ goto failed_mount2;
} else {
if (!silent)
ext3_msg(sb, KERN_ERR,
"error: no journal found. "
"mounting ext3 over ext2?");
+ goto failed_mount2;
+ }
+ err = percpu_counter_init(&sbi->s_freeblocks_counter,
+ ext3_count_free_blocks(sb));
+ if (!err) {
+ err = percpu_counter_init(&sbi->s_freeinodes_counter,
+ ext3_count_free_inodes(sb));
+ }
+ if (!err) {
+ err = percpu_counter_init(&sbi->s_dirs_counter,
+ ext3_count_dirs(sb));
+ }
+ if (err) {
+ ext3_msg(sb, KERN_ERR, "error: insufficient memory");
goto failed_mount3;
}
@@ -1978,7 +1993,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
ext3_msg(sb, KERN_ERR,
"error: journal does not support "
"requested data journaling mode");
- goto failed_mount4;
+ goto failed_mount3;
}
default:
break;
@@ -2001,19 +2016,19 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
if (IS_ERR(root)) {
ext3_msg(sb, KERN_ERR, "error: get root inode failed");
ret = PTR_ERR(root);
- goto failed_mount4;
+ goto failed_mount3;
}
if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
iput(root);
ext3_msg(sb, KERN_ERR, "error: corrupt root inode, run e2fsck");
- goto failed_mount4;
+ goto failed_mount3;
}
sb->s_root = d_alloc_root(root);
if (!sb->s_root) {
ext3_msg(sb, KERN_ERR, "error: get root dentry failed");
iput(root);
ret = -ENOMEM;
- goto failed_mount4;
+ goto failed_mount3;
}
ext3_setup_super (sb, es, sb->s_flags & MS_RDONLY);
@@ -2039,12 +2054,11 @@ cantfind_ext3:
sb->s_id);
goto failed_mount;
-failed_mount4:
- journal_destroy(sbi->s_journal);
failed_mount3:
percpu_counter_destroy(&sbi->s_freeblocks_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
+ journal_destroy(sbi->s_journal);
failed_mount2:
for (i = 0; i < db_count; i++)
brelse(sbi->s_group_desc[i]);
@@ -2278,6 +2292,9 @@ static int ext3_load_journal(struct super_block *sb,
return -EINVAL;
}
+ if (!(journal->j_flags & JFS_BARRIER))
+ printk(KERN_INFO "EXT3-fs: barriers not enabled\n");
+
if (!really_read_only && test_opt(sb, UPDATE_JOURNAL)) {
err = journal_update_format(journal);
if (err) {
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 81d6054..3e0f6af 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -5425,7 +5425,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
if (error)
return error;
- if (ia_valid & ATTR_SIZE)
+ if (is_quota_modification(inode, attr))
dquot_initialize(inode);
if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
(ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
diff --git a/fs/fcntl.c b/fs/fcntl.c
index bcba9603..f74d270 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -619,9 +619,15 @@ int send_sigurg(struct fown_struct *fown)
return ret;
}
-static DEFINE_RWLOCK(fasync_lock);
+static DEFINE_SPINLOCK(fasync_lock);
static struct kmem_cache *fasync_cache __read_mostly;
+static void fasync_free_rcu(struct rcu_head *head)
+{
+ kmem_cache_free(fasync_cache,
+ container_of(head, struct fasync_struct, fa_rcu));
+}
+
/*
* Remove a fasync entry. If successfully removed, return
* positive and clear the FASYNC flag. If no entry exists,
@@ -630,8 +636,6 @@ static struct kmem_cache *fasync_cache __read_mostly;
* NOTE! It is very important that the FASYNC flag always
* match the state "is the filp on a fasync list".
*
- * We always take the 'filp->f_lock', in since fasync_lock
- * needs to be irq-safe.
*/
static int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
{
@@ -639,17 +643,22 @@ static int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
int result = 0;
spin_lock(&filp->f_lock);
- write_lock_irq(&fasync_lock);
+ spin_lock(&fasync_lock);
for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
if (fa->fa_file != filp)
continue;
+
+ spin_lock_irq(&fa->fa_lock);
+ fa->fa_file = NULL;
+ spin_unlock_irq(&fa->fa_lock);
+
*fp = fa->fa_next;
- kmem_cache_free(fasync_cache, fa);
+ call_rcu(&fa->fa_rcu, fasync_free_rcu);
filp->f_flags &= ~FASYNC;
result = 1;
break;
}
- write_unlock_irq(&fasync_lock);
+ spin_unlock(&fasync_lock);
spin_unlock(&filp->f_lock);
return result;
}
@@ -671,25 +680,30 @@ static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fa
return -ENOMEM;
spin_lock(&filp->f_lock);
- write_lock_irq(&fasync_lock);
+ spin_lock(&fasync_lock);
for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
if (fa->fa_file != filp)
continue;
+
+ spin_lock_irq(&fa->fa_lock);
fa->fa_fd = fd;
+ spin_unlock_irq(&fa->fa_lock);
+
kmem_cache_free(fasync_cache, new);
goto out;
}
+ spin_lock_init(&new->fa_lock);
new->magic = FASYNC_MAGIC;
new->fa_file = filp;
new->fa_fd = fd;
new->fa_next = *fapp;
- *fapp = new;
+ rcu_assign_pointer(*fapp, new);
result = 1;
filp->f_flags |= FASYNC;
out:
- write_unlock_irq(&fasync_lock);
+ spin_unlock(&fasync_lock);
spin_unlock(&filp->f_lock);
return result;
}
@@ -709,37 +723,41 @@ int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fap
EXPORT_SYMBOL(fasync_helper);
-void __kill_fasync(struct fasync_struct *fa, int sig, int band)
+/*
+ * rcu_read_lock() is held
+ */
+static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band)
{
while (fa) {
- struct fown_struct * fown;
+ struct fown_struct *fown;
if (fa->magic != FASYNC_MAGIC) {
printk(KERN_ERR "kill_fasync: bad magic number in "
"fasync_struct!\n");
return;
}
- fown = &fa->fa_file->f_owner;
- /* Don't send SIGURG to processes which have not set a
- queued signum: SIGURG has its own default signalling
- mechanism. */
- if (!(sig == SIGURG && fown->signum == 0))
- send_sigio(fown, fa->fa_fd, band);
- fa = fa->fa_next;
+ spin_lock(&fa->fa_lock);
+ if (fa->fa_file) {
+ fown = &fa->fa_file->f_owner;
+ /* Don't send SIGURG to processes which have not set a
+ queued signum: SIGURG has its own default signalling
+ mechanism. */
+ if (!(sig == SIGURG && fown->signum == 0))
+ send_sigio(fown, fa->fa_fd, band);
+ }
+ spin_unlock(&fa->fa_lock);
+ fa = rcu_dereference(fa->fa_next);
}
}
-EXPORT_SYMBOL(__kill_fasync);
-
void kill_fasync(struct fasync_struct **fp, int sig, int band)
{
/* First a quick test without locking: usually
* the list is empty.
*/
if (*fp) {
- read_lock(&fasync_lock);
- /* reread *fp after obtaining the lock */
- __kill_fasync(*fp, sig, band);
- read_unlock(&fasync_lock);
+ rcu_read_lock();
+ kill_fasync_rcu(rcu_dereference(*fp), sig, band);
+ rcu_read_unlock();
}
}
EXPORT_SYMBOL(kill_fasync);
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 0c1d0b8..a739a0a 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -418,6 +418,7 @@ static int gfs2_jdata_writepages(struct address_space *mapping,
static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
{
struct buffer_head *dibh;
+ u64 dsize = i_size_read(&ip->i_inode);
void *kaddr;
int error;
@@ -437,9 +438,10 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
return error;
kaddr = kmap_atomic(page, KM_USER0);
- memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode),
- ip->i_disksize);
- memset(kaddr + ip->i_disksize, 0, PAGE_CACHE_SIZE - ip->i_disksize);
+ if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode)))
+ dsize = (dibh->b_size - sizeof(struct gfs2_dinode));
+ memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
+ memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize);
kunmap_atomic(kaddr, KM_USER0);
flush_dcache_page(page);
brelse(dibh);
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 5e411d5..4a48c0f 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -71,11 +71,13 @@ static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
if (!PageUptodate(page)) {
void *kaddr = kmap(page);
+ u64 dsize = i_size_read(inode);
+
+ if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode)))
+ dsize = dibh->b_size - sizeof(struct gfs2_dinode);
- memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode),
- ip->i_disksize);
- memset(kaddr + ip->i_disksize, 0,
- PAGE_CACHE_SIZE - ip->i_disksize);
+ memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
+ memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize);
kunmap(page);
SetPageUptodate(page);
@@ -1038,13 +1040,14 @@ static int trunc_start(struct gfs2_inode *ip, u64 size)
goto out;
if (gfs2_is_stuffed(ip)) {
- ip->i_disksize = size;
+ u64 dsize = size + sizeof(struct gfs2_inode);
ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
gfs2_dinode_out(ip, dibh->b_data);
- gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + size);
+ if (dsize > dibh->b_size)
+ dsize = dibh->b_size;
+ gfs2_buffer_clear_tail(dibh, dsize);
error = 1;
-
} else {
if (size & (u64)(sdp->sd_sb.sb_bsize - 1))
error = gfs2_block_truncate_page(ip->i_inode.i_mapping);
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 25fddc1..8295c5b 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -1475,7 +1475,7 @@ struct inode *gfs2_dir_search(struct inode *dir, const struct qstr *name)
inode = gfs2_inode_lookup(dir->i_sb,
be16_to_cpu(dent->de_type),
be64_to_cpu(dent->de_inum.no_addr),
- be64_to_cpu(dent->de_inum.no_formal_ino), 0);
+ be64_to_cpu(dent->de_inum.no_formal_ino));
brelse(bh);
return inode;
}
diff --git a/fs/gfs2/export.c b/fs/gfs2/export.c
index c22c211..dfe237a 100644
--- a/fs/gfs2/export.c
+++ b/fs/gfs2/export.c
@@ -168,7 +168,7 @@ static struct dentry *gfs2_get_dentry(struct super_block *sb,
if (error)
goto fail;
- inode = gfs2_inode_lookup(sb, DT_UNKNOWN, inum->no_addr, 0, 0);
+ inode = gfs2_inode_lookup(sb, DT_UNKNOWN, inum->no_addr, 0);
if (IS_ERR(inode)) {
error = PTR_ERR(inode);
goto fail;
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 454d4b4..ddcdbf4 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -855,6 +855,9 @@ void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *
gh->gh_flags = flags;
gh->gh_iflags = 0;
gh->gh_ip = (unsigned long)__builtin_return_address(0);
+ if (gh->gh_owner_pid)
+ put_pid(gh->gh_owner_pid);
+ gh->gh_owner_pid = get_pid(task_pid(current));
}
/**
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 3aac46f..b5d7363 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -439,9 +439,6 @@ struct gfs2_args {
struct gfs2_tune {
spinlock_t gt_spin;
- unsigned int gt_incore_log_blocks;
- unsigned int gt_log_flush_secs;
-
unsigned int gt_logd_secs;
unsigned int gt_quota_simul_sync; /* Max quotavals to sync at once */
@@ -462,6 +459,7 @@ enum {
SDF_SHUTDOWN = 2,
SDF_NOBARRIERS = 3,
SDF_NORECOVERY = 4,
+ SDF_DEMOTE = 5,
};
#define GFS2_FSNAME_LEN 256
@@ -618,6 +616,7 @@ struct gfs2_sbd {
unsigned int sd_log_commited_databuf;
int sd_log_commited_revoke;
+ atomic_t sd_log_pinned;
unsigned int sd_log_num_buf;
unsigned int sd_log_num_revoke;
unsigned int sd_log_num_rg;
@@ -629,15 +628,17 @@ struct gfs2_sbd {
struct list_head sd_log_le_databuf;
struct list_head sd_log_le_ordered;
+ atomic_t sd_log_thresh1;
+ atomic_t sd_log_thresh2;
atomic_t sd_log_blks_free;
- struct mutex sd_log_reserve_mutex;
+ wait_queue_head_t sd_log_waitq;
+ wait_queue_head_t sd_logd_waitq;
u64 sd_log_sequence;
unsigned int sd_log_head;
unsigned int sd_log_tail;
int sd_log_idle;
- unsigned long sd_log_flush_time;
struct rw_semaphore sd_log_flush_lock;
atomic_t sd_log_in_flight;
wait_queue_head_t sd_log_flush_wait;
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index b1bf269..51d8061 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -158,7 +158,6 @@ void gfs2_set_iop(struct inode *inode)
* @sb: The super block
* @no_addr: The inode number
* @type: The type of the inode
- * @skip_freeing: set this not return an inode if it is currently being freed.
*
* Returns: A VFS inode, or an error
*/
@@ -166,17 +165,14 @@ void gfs2_set_iop(struct inode *inode)
struct inode *gfs2_inode_lookup(struct super_block *sb,
unsigned int type,
u64 no_addr,
- u64 no_formal_ino, int skip_freeing)
+ u64 no_formal_ino)
{
struct inode *inode;
struct gfs2_inode *ip;
struct gfs2_glock *io_gl;
int error;
- if (skip_freeing)
- inode = gfs2_iget_skip(sb, no_addr);
- else
- inode = gfs2_iget(sb, no_addr);
+ inode = gfs2_iget(sb, no_addr);
ip = GFS2_I(inode);
if (!inode)
@@ -234,13 +230,100 @@ fail_glock:
fail_iopen:
gfs2_glock_put(io_gl);
fail_put:
- ip->i_gl->gl_object = NULL;
+ if (inode->i_state & I_NEW)
+ ip->i_gl->gl_object = NULL;
gfs2_glock_put(ip->i_gl);
fail:
- iget_failed(inode);
+ if (inode->i_state & I_NEW)
+ iget_failed(inode);
+ else
+ iput(inode);
return ERR_PTR(error);
}
+/**
+ * gfs2_unlinked_inode_lookup - Lookup an unlinked inode for reclamation
+ * @sb: The super block
+ * no_addr: The inode number
+ * @@inode: A pointer to the inode found, if any
+ *
+ * Returns: 0 and *inode if no errors occurred. If an error occurs,
+ * the resulting *inode may or may not be NULL.
+ */
+
+int gfs2_unlinked_inode_lookup(struct super_block *sb, u64 no_addr,
+ struct inode **inode)
+{
+ struct gfs2_sbd *sdp;
+ struct gfs2_inode *ip;
+ struct gfs2_glock *io_gl;
+ int error;
+ struct gfs2_holder gh;
+
+ *inode = gfs2_iget_skip(sb, no_addr);
+
+ if (!(*inode))
+ return -ENOBUFS;
+
+ if (!((*inode)->i_state & I_NEW))
+ return -ENOBUFS;
+
+ ip = GFS2_I(*inode);
+ sdp = GFS2_SB(*inode);
+ ip->i_no_formal_ino = -1;
+
+ error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
+ if (unlikely(error))
+ goto fail;
+ ip->i_gl->gl_object = ip;
+
+ error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
+ if (unlikely(error))
+ goto fail_put;
+
+ set_bit(GIF_INVALID, &ip->i_flags);
+ error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, LM_FLAG_TRY | GL_EXACT,
+ &ip->i_iopen_gh);
+ if (unlikely(error)) {
+ if (error == GLR_TRYFAILED)
+ error = 0;
+ goto fail_iopen;
+ }
+ ip->i_iopen_gh.gh_gl->gl_object = ip;
+ gfs2_glock_put(io_gl);
+
+ (*inode)->i_mode = DT2IF(DT_UNKNOWN);
+
+ /*
+ * We must read the inode in order to work out its type in
+ * this case. Note that this doesn't happen often as we normally
+ * know the type beforehand. This code path only occurs during
+ * unlinked inode recovery (where it is safe to do this glock,
+ * which is not true in the general case).
+ */
+ error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, LM_FLAG_TRY,
+ &gh);
+ if (unlikely(error)) {
+ if (error == GLR_TRYFAILED)
+ error = 0;
+ goto fail_glock;
+ }
+ /* Inode is now uptodate */
+ gfs2_glock_dq_uninit(&gh);
+ gfs2_set_iop(*inode);
+
+ return 0;
+fail_glock:
+ gfs2_glock_dq(&ip->i_iopen_gh);
+fail_iopen:
+ gfs2_glock_put(io_gl);
+fail_put:
+ ip->i_gl->gl_object = NULL;
+ gfs2_glock_put(ip->i_gl);
+fail:
+ return error;
+}
+
static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
{
const struct gfs2_dinode *str = buf;
@@ -862,7 +945,7 @@ struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
goto fail_gunlock2;
inode = gfs2_inode_lookup(dir->i_sb, IF2DT(mode), inum.no_addr,
- inum.no_formal_ino, 0);
+ inum.no_formal_ino);
if (IS_ERR(inode))
goto fail_gunlock2;
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
index c341aaf..e161461 100644
--- a/fs/gfs2/inode.h
+++ b/fs/gfs2/inode.h
@@ -83,8 +83,9 @@ static inline void gfs2_inum_out(const struct gfs2_inode *ip,
extern void gfs2_set_iop(struct inode *inode);
extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type,
- u64 no_addr, u64 no_formal_ino,
- int skip_freeing);
+ u64 no_addr, u64 no_formal_ino);
+extern int gfs2_unlinked_inode_lookup(struct super_block *sb, u64 no_addr,
+ struct inode **inode);
extern struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr);
extern int gfs2_inode_refresh(struct gfs2_inode *ip);
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index e5bf4b5..b593f0e 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -168,12 +168,11 @@ static int gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai, int fl
return list_empty(&ai->ai_ail1_list);
}
-static void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags)
+static void gfs2_ail1_start(struct gfs2_sbd *sdp)
{
struct list_head *head;
u64 sync_gen;
- struct list_head *first;
- struct gfs2_ail *first_ai, *ai, *tmp;
+ struct gfs2_ail *ai;
int done = 0;
gfs2_log_lock(sdp);
@@ -184,21 +183,9 @@ static void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags)
}
sync_gen = sdp->sd_ail_sync_gen++;
- first = head->prev;
- first_ai = list_entry(first, struct gfs2_ail, ai_list);
- first_ai->ai_sync_gen = sync_gen;
- gfs2_ail1_start_one(sdp, first_ai); /* This may drop log lock */
-
- if (flags & DIO_ALL)
- first = NULL;
-
while(!done) {
- if (first && (head->prev != first ||
- gfs2_ail1_empty_one(sdp, first_ai, 0)))
- break;
-
done = 1;
- list_for_each_entry_safe_reverse(ai, tmp, head, ai_list) {
+ list_for_each_entry_reverse(ai, head, ai_list) {
if (ai->ai_sync_gen >= sync_gen)
continue;
ai->ai_sync_gen = sync_gen;
@@ -290,58 +277,57 @@ static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
* flush time, so we ensure that we have just enough free blocks at all
* times to avoid running out during a log flush.
*
+ * We no longer flush the log here, instead we wake up logd to do that
+ * for us. To avoid the thundering herd and to ensure that we deal fairly
+ * with queued waiters, we use an exclusive wait. This means that when we
+ * get woken with enough journal space to get our reservation, we need to
+ * wake the next waiter on the list.
+ *
* Returns: errno
*/
int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
{
- unsigned int try = 0;
unsigned reserved_blks = 6 * (4096 / sdp->sd_vfs->s_blocksize);
+ unsigned wanted = blks + reserved_blks;
+ DEFINE_WAIT(wait);
+ int did_wait = 0;
+ unsigned int free_blocks;
if (gfs2_assert_warn(sdp, blks) ||
gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks))
return -EINVAL;
-
- mutex_lock(&sdp->sd_log_reserve_mutex);
- gfs2_log_lock(sdp);
- while(atomic_read(&sdp->sd_log_blks_free) <= (blks + reserved_blks)) {
- gfs2_log_unlock(sdp);
- gfs2_ail1_empty(sdp, 0);
- gfs2_log_flush(sdp, NULL);
-
- if (try++)
- gfs2_ail1_start(sdp, 0);
- gfs2_log_lock(sdp);
+retry:
+ free_blocks = atomic_read(&sdp->sd_log_blks_free);
+ if (unlikely(free_blocks <= wanted)) {
+ do {
+ prepare_to_wait_exclusive(&sdp->sd_log_waitq, &wait,
+ TASK_UNINTERRUPTIBLE);
+ wake_up(&sdp->sd_logd_waitq);
+ did_wait = 1;
+ if (atomic_read(&sdp->sd_log_blks_free) <= wanted)
+ io_schedule();
+ free_blocks = atomic_read(&sdp->sd_log_blks_free);
+ } while(free_blocks <= wanted);
+ finish_wait(&sdp->sd_log_waitq, &wait);
}
- atomic_sub(blks, &sdp->sd_log_blks_free);
+ if (atomic_cmpxchg(&sdp->sd_log_blks_free, free_blocks,
+ free_blocks - blks) != free_blocks)
+ goto retry;
trace_gfs2_log_blocks(sdp, -blks);
- gfs2_log_unlock(sdp);
- mutex_unlock(&sdp->sd_log_reserve_mutex);
+
+ /*
+ * If we waited, then so might others, wake them up _after_ we get
+ * our share of the log.
+ */
+ if (unlikely(did_wait))
+ wake_up(&sdp->sd_log_waitq);
down_read(&sdp->sd_log_flush_lock);
return 0;
}
-/**
- * gfs2_log_release - Release a given number of log blocks
- * @sdp: The GFS2 superblock
- * @blks: The number of blocks
- *
- */
-
-void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
-{
-
- gfs2_log_lock(sdp);
- atomic_add(blks, &sdp->sd_log_blks_free);
- trace_gfs2_log_blocks(sdp, blks);
- gfs2_assert_withdraw(sdp,
- atomic_read(&sdp->sd_log_blks_free) <= sdp->sd_jdesc->jd_blocks);
- gfs2_log_unlock(sdp);
- up_read(&sdp->sd_log_flush_lock);
-}
-
static u64 log_bmap(struct gfs2_sbd *sdp, unsigned int lbn)
{
struct gfs2_journal_extent *je;
@@ -559,11 +545,10 @@ static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail)
ail2_empty(sdp, new_tail);
- gfs2_log_lock(sdp);
atomic_add(dist, &sdp->sd_log_blks_free);
trace_gfs2_log_blocks(sdp, dist);
- gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= sdp->sd_jdesc->jd_blocks);
- gfs2_log_unlock(sdp);
+ gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
+ sdp->sd_jdesc->jd_blocks);
sdp->sd_log_tail = new_tail;
}
@@ -615,6 +600,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
if (buffer_eopnotsupp(bh)) {
clear_buffer_eopnotsupp(bh);
set_buffer_uptodate(bh);
+ fs_info(sdp, "barrier sync failed - disabling barriers\n");
set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
lock_buffer(bh);
skip_barrier:
@@ -822,6 +808,13 @@ static void buf_lo_incore_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
* @sdp: the filesystem
* @tr: the transaction
*
+ * We wake up gfs2_logd if the number of pinned blocks exceed thresh1
+ * or the total number of used blocks (pinned blocks plus AIL blocks)
+ * is greater than thresh2.
+ *
+ * At mount time thresh1 is 1/3rd of journal size, thresh2 is 2/3rd of
+ * journal size.
+ *
* Returns: errno
*/
@@ -832,10 +825,10 @@ void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
up_read(&sdp->sd_log_flush_lock);
- gfs2_log_lock(sdp);
- if (sdp->sd_log_num_buf > gfs2_tune_get(sdp, gt_incore_log_blocks))
- wake_up_process(sdp->sd_logd_process);
- gfs2_log_unlock(sdp);
+ if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) ||
+ ((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) >
+ atomic_read(&sdp->sd_log_thresh2)))
+ wake_up(&sdp->sd_logd_waitq);
}
/**
@@ -882,13 +875,23 @@ void gfs2_meta_syncfs(struct gfs2_sbd *sdp)
{
gfs2_log_flush(sdp, NULL);
for (;;) {
- gfs2_ail1_start(sdp, DIO_ALL);
+ gfs2_ail1_start(sdp);
if (gfs2_ail1_empty(sdp, DIO_ALL))
break;
msleep(10);
}
}
+static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
+{
+ return (atomic_read(&sdp->sd_log_pinned) >= atomic_read(&sdp->sd_log_thresh1));
+}
+
+static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
+{
+ unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free);
+ return used_blocks >= atomic_read(&sdp->sd_log_thresh2);
+}
/**
* gfs2_logd - Update log tail as Active Items get flushed to in-place blocks
@@ -901,28 +904,43 @@ void gfs2_meta_syncfs(struct gfs2_sbd *sdp)
int gfs2_logd(void *data)
{
struct gfs2_sbd *sdp = data;
- unsigned long t;
- int need_flush;
+ unsigned long t = 1;
+ DEFINE_WAIT(wait);
+ unsigned preflush;
while (!kthread_should_stop()) {
- /* Advance the log tail */
- t = sdp->sd_log_flush_time +
- gfs2_tune_get(sdp, gt_log_flush_secs) * HZ;
+ preflush = atomic_read(&sdp->sd_log_pinned);
+ if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
+ gfs2_ail1_empty(sdp, DIO_ALL);
+ gfs2_log_flush(sdp, NULL);
+ gfs2_ail1_empty(sdp, DIO_ALL);
+ }
- gfs2_ail1_empty(sdp, DIO_ALL);
- gfs2_log_lock(sdp);
- need_flush = sdp->sd_log_num_buf > gfs2_tune_get(sdp, gt_incore_log_blocks);
- gfs2_log_unlock(sdp);
- if (need_flush || time_after_eq(jiffies, t)) {
+ if (gfs2_ail_flush_reqd(sdp)) {
+ gfs2_ail1_start(sdp);
+ io_schedule();
+ gfs2_ail1_empty(sdp, 0);
gfs2_log_flush(sdp, NULL);
- sdp->sd_log_flush_time = jiffies;
+ gfs2_ail1_empty(sdp, DIO_ALL);
}
+ wake_up(&sdp->sd_log_waitq);
t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
if (freezing(current))
refrigerator();
- schedule_timeout_interruptible(t);
+
+ do {
+ prepare_to_wait(&sdp->sd_logd_waitq, &wait,
+ TASK_UNINTERRUPTIBLE);
+ if (!gfs2_ail_flush_reqd(sdp) &&
+ !gfs2_jrnl_flush_reqd(sdp) &&
+ !kthread_should_stop())
+ t = schedule_timeout(t);
+ } while(t && !gfs2_ail_flush_reqd(sdp) &&
+ !gfs2_jrnl_flush_reqd(sdp) &&
+ !kthread_should_stop());
+ finish_wait(&sdp->sd_logd_waitq, &wait);
}
return 0;
diff --git a/fs/gfs2/log.h b/fs/gfs2/log.h
index 7c64510..eb570b4 100644
--- a/fs/gfs2/log.h
+++ b/fs/gfs2/log.h
@@ -51,7 +51,6 @@ unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
unsigned int ssize);
int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks);
-void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks);
void gfs2_log_incr_head(struct gfs2_sbd *sdp);
struct buffer_head *gfs2_log_get_buf(struct gfs2_sbd *sdp);
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index adc260f..bf33f82 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -54,6 +54,7 @@ static void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
if (bd->bd_ail)
list_move(&bd->bd_ail_st_list, &bd->bd_ail->ai_ail2_list);
get_bh(bh);
+ atomic_inc(&sdp->sd_log_pinned);
trace_gfs2_pin(bd, 1);
}
@@ -94,6 +95,7 @@ static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
trace_gfs2_pin(bd, 0);
gfs2_log_unlock(sdp);
unlock_buffer(bh);
+ atomic_dec(&sdp->sd_log_pinned);
}
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index a88fadc..fb2a5f9 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -94,7 +94,7 @@ static int __init init_gfs2_fs(void)
if (!gfs2_glock_cachep)
goto fail;
- gfs2_glock_aspace_cachep = kmem_cache_create("gfs2_glock (aspace)",
+ gfs2_glock_aspace_cachep = kmem_cache_create("gfs2_glock(aspace)",
sizeof(struct gfs2_glock) +
sizeof(struct address_space),
0, 0, gfs2_init_gl_aspace_once);
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 0bb12c8..18176d0 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -34,7 +34,6 @@
static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wbc)
{
- int err;
struct buffer_head *bh, *head;
int nr_underway = 0;
int write_op = (1 << BIO_RW_META) | ((wbc->sync_mode == WB_SYNC_ALL ?
@@ -86,11 +85,10 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
} while (bh != head);
unlock_page(page);
- err = 0;
if (nr_underway == 0)
end_page_writeback(page);
- return err;
+ return 0;
}
const struct address_space_operations gfs2_meta_aops = {
@@ -313,6 +311,7 @@ void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int
struct gfs2_bufdata *bd = bh->b_private;
if (test_clear_buffer_pinned(bh)) {
+ atomic_dec(&sdp->sd_log_pinned);
list_del_init(&bd->bd_le.le_list);
if (meta) {
gfs2_assert_warn(sdp, sdp->sd_log_num_buf);
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index c1309ed..3593b3a 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -57,8 +57,6 @@ static void gfs2_tune_init(struct gfs2_tune *gt)
{
spin_lock_init(&gt->gt_spin);
- gt->gt_incore_log_blocks = 1024;
- gt->gt_logd_secs = 1;
gt->gt_quota_simul_sync = 64;
gt->gt_quota_warn_period = 10;
gt->gt_quota_scale_num = 1;
@@ -101,14 +99,15 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
spin_lock_init(&sdp->sd_trunc_lock);
spin_lock_init(&sdp->sd_log_lock);
-
+ atomic_set(&sdp->sd_log_pinned, 0);
INIT_LIST_HEAD(&sdp->sd_log_le_buf);
INIT_LIST_HEAD(&sdp->sd_log_le_revoke);
INIT_LIST_HEAD(&sdp->sd_log_le_rg);
INIT_LIST_HEAD(&sdp->sd_log_le_databuf);
INIT_LIST_HEAD(&sdp->sd_log_le_ordered);
- mutex_init(&sdp->sd_log_reserve_mutex);
+ init_waitqueue_head(&sdp->sd_log_waitq);
+ init_waitqueue_head(&sdp->sd_logd_waitq);
INIT_LIST_HEAD(&sdp->sd_ail1_list);
INIT_LIST_HEAD(&sdp->sd_ail2_list);
@@ -487,7 +486,7 @@ static int gfs2_lookup_root(struct super_block *sb, struct dentry **dptr,
struct dentry *dentry;
struct inode *inode;
- inode = gfs2_inode_lookup(sb, DT_DIR, no_addr, 0, 0);
+ inode = gfs2_inode_lookup(sb, DT_DIR, no_addr, 0);
if (IS_ERR(inode)) {
fs_err(sdp, "can't read in %s inode: %ld\n", name, PTR_ERR(inode));
return PTR_ERR(inode);
@@ -733,6 +732,8 @@ static int init_journal(struct gfs2_sbd *sdp, int undo)
if (sdp->sd_args.ar_spectator) {
sdp->sd_jdesc = gfs2_jdesc_find(sdp, 0);
atomic_set(&sdp->sd_log_blks_free, sdp->sd_jdesc->jd_blocks);
+ atomic_set(&sdp->sd_log_thresh1, 2*sdp->sd_jdesc->jd_blocks/5);
+ atomic_set(&sdp->sd_log_thresh2, 4*sdp->sd_jdesc->jd_blocks/5);
} else {
if (sdp->sd_lockstruct.ls_jid >= gfs2_jindex_size(sdp)) {
fs_err(sdp, "can't mount journal #%u\n",
@@ -770,6 +771,8 @@ static int init_journal(struct gfs2_sbd *sdp, int undo)
goto fail_jinode_gh;
}
atomic_set(&sdp->sd_log_blks_free, sdp->sd_jdesc->jd_blocks);
+ atomic_set(&sdp->sd_log_thresh1, 2*sdp->sd_jdesc->jd_blocks/5);
+ atomic_set(&sdp->sd_log_thresh2, 4*sdp->sd_jdesc->jd_blocks/5);
/* Map the extents for this journal's blocks */
map_journal_extents(sdp);
@@ -951,8 +954,6 @@ static int init_threads(struct gfs2_sbd *sdp, int undo)
if (undo)
goto fail_quotad;
- sdp->sd_log_flush_time = jiffies;
-
p = kthread_run(gfs2_logd, sdp, "gfs2_logd");
error = IS_ERR(p);
if (error) {
@@ -1160,7 +1161,7 @@ static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent
GFS2_BASIC_BLOCK_SHIFT;
sdp->sd_fsb2bb = 1 << sdp->sd_fsb2bb_shift;
- sdp->sd_tune.gt_log_flush_secs = sdp->sd_args.ar_commit;
+ sdp->sd_tune.gt_logd_secs = sdp->sd_args.ar_commit;
sdp->sd_tune.gt_quota_quantum = sdp->sd_args.ar_quota_quantum;
if (sdp->sd_args.ar_statfs_quantum) {
sdp->sd_tune.gt_statfs_slow = 0;
@@ -1323,7 +1324,7 @@ static int gfs2_get_sb(struct file_system_type *fs_type, int flags,
memset(&args, 0, sizeof(args));
args.ar_quota = GFS2_QUOTA_DEFAULT;
args.ar_data = GFS2_DATA_DEFAULT;
- args.ar_commit = 60;
+ args.ar_commit = 30;
args.ar_statfs_quantum = 30;
args.ar_quota_quantum = 60;
args.ar_errors = GFS2_ERRORS_DEFAULT;
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 6dbcbad..49667d6 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -637,15 +637,40 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
unsigned blocksize, iblock, pos;
struct buffer_head *bh, *dibh;
struct page *page;
- void *kaddr;
- struct gfs2_quota *qp;
- s64 value;
- int err = -EIO;
+ void *kaddr, *ptr;
+ struct gfs2_quota q, *qp;
+ int err, nbytes;
u64 size;
if (gfs2_is_stuffed(ip))
gfs2_unstuff_dinode(ip, NULL);
-
+
+ memset(&q, 0, sizeof(struct gfs2_quota));
+ err = gfs2_internal_read(ip, NULL, (char *)&q, &loc, sizeof(q));
+ if (err < 0)
+ return err;
+
+ err = -EIO;
+ qp = &q;
+ qp->qu_value = be64_to_cpu(qp->qu_value);
+ qp->qu_value += change;
+ qp->qu_value = cpu_to_be64(qp->qu_value);
+ qd->qd_qb.qb_value = qp->qu_value;
+ if (fdq) {
+ if (fdq->d_fieldmask & FS_DQ_BSOFT) {
+ qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit);
+ qd->qd_qb.qb_warn = qp->qu_warn;
+ }
+ if (fdq->d_fieldmask & FS_DQ_BHARD) {
+ qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit);
+ qd->qd_qb.qb_limit = qp->qu_limit;
+ }
+ }
+
+ /* Write the quota into the quota file on disk */
+ ptr = qp;
+ nbytes = sizeof(struct gfs2_quota);
+get_a_page:
page = grab_cache_page(mapping, index);
if (!page)
return -ENOMEM;
@@ -667,7 +692,12 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
if (!buffer_mapped(bh)) {
gfs2_block_map(inode, iblock, bh, 1);
if (!buffer_mapped(bh))
- goto unlock;
+ goto unlock_out;
+ /* If it's a newly allocated disk block for quota, zero it */
+ if (buffer_new(bh)) {
+ memset(bh->b_data, 0, bh->b_size);
+ set_buffer_uptodate(bh);
+ }
}
if (PageUptodate(page))
@@ -677,32 +707,34 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
ll_rw_block(READ_META, 1, &bh);
wait_on_buffer(bh);
if (!buffer_uptodate(bh))
- goto unlock;
+ goto unlock_out;
}
gfs2_trans_add_bh(ip->i_gl, bh, 0);
kaddr = kmap_atomic(page, KM_USER0);
- qp = kaddr + offset;
- value = (s64)be64_to_cpu(qp->qu_value) + change;
- qp->qu_value = cpu_to_be64(value);
- qd->qd_qb.qb_value = qp->qu_value;
- if (fdq) {
- if (fdq->d_fieldmask & FS_DQ_BSOFT) {
- qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit);
- qd->qd_qb.qb_warn = qp->qu_warn;
- }
- if (fdq->d_fieldmask & FS_DQ_BHARD) {
- qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit);
- qd->qd_qb.qb_limit = qp->qu_limit;
- }
- }
+ if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE)
+ nbytes = PAGE_CACHE_SIZE - offset;
+ memcpy(kaddr + offset, ptr, nbytes);
flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
+ unlock_page(page);
+ page_cache_release(page);
+ /* If quota straddles page boundary, we need to update the rest of the
+ * quota at the beginning of the next page */
+ if (offset != 0) { /* first page, offset is closer to PAGE_CACHE_SIZE */
+ ptr = ptr + nbytes;
+ nbytes = sizeof(struct gfs2_quota) - nbytes;
+ offset = 0;
+ index++;
+ goto get_a_page;
+ }
+
+ /* Update the disk inode timestamp and size (if extended) */
err = gfs2_meta_inode_buffer(ip, &dibh);
if (err)
- goto unlock;
+ goto out;
size = loc + sizeof(struct gfs2_quota);
if (size > inode->i_size) {
@@ -715,7 +747,9 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
brelse(dibh);
mark_inode_dirty(inode);
-unlock:
+out:
+ return err;
+unlock_out:
unlock_page(page);
page_cache_release(page);
return err;
@@ -779,8 +813,10 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
* rgrp since it won't be allocated during the transaction
*/
al->al_requested = 1;
- /* +1 in the end for block requested above for unstuffing */
- blocks = num_qd * data_blocks + RES_DINODE + num_qd + 1;
+ /* +3 in the end for unstuffing block, inode size update block
+ * and another block in case quota straddles page boundary and
+ * two blocks need to be updated instead of 1 */
+ blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
if (nalloc)
al->al_requested += nalloc * (data_blocks + ind_blocks);
@@ -1418,10 +1454,18 @@ static int gfs2_quota_get_xstate(struct super_block *sb,
memset(fqs, 0, sizeof(struct fs_quota_stat));
fqs->qs_version = FS_QSTAT_VERSION;
- if (sdp->sd_args.ar_quota == GFS2_QUOTA_ON)
- fqs->qs_flags = (XFS_QUOTA_UDQ_ENFD | XFS_QUOTA_GDQ_ENFD);
- else if (sdp->sd_args.ar_quota == GFS2_QUOTA_ACCOUNT)
- fqs->qs_flags = (XFS_QUOTA_UDQ_ACCT | XFS_QUOTA_GDQ_ACCT);
+
+ switch (sdp->sd_args.ar_quota) {
+ case GFS2_QUOTA_ON:
+ fqs->qs_flags |= (XFS_QUOTA_UDQ_ENFD | XFS_QUOTA_GDQ_ENFD);
+ /*FALLTHRU*/
+ case GFS2_QUOTA_ACCOUNT:
+ fqs->qs_flags |= (XFS_QUOTA_UDQ_ACCT | XFS_QUOTA_GDQ_ACCT);
+ break;
+ case GFS2_QUOTA_OFF:
+ break;
+ }
+
if (sdp->sd_quota_inode) {
fqs->qs_uquota.qfs_ino = GFS2_I(sdp->sd_quota_inode)->i_no_addr;
fqs->qs_uquota.qfs_nblks = sdp->sd_quota_inode->i_blocks;
@@ -1432,8 +1476,8 @@ static int gfs2_quota_get_xstate(struct super_block *sb,
return 0;
}
-static int gfs2_xquota_get(struct super_block *sb, int type, qid_t id,
- struct fs_disk_quota *fdq)
+static int gfs2_get_dqblk(struct super_block *sb, int type, qid_t id,
+ struct fs_disk_quota *fdq)
{
struct gfs2_sbd *sdp = sb->s_fs_info;
struct gfs2_quota_lvb *qlvb;
@@ -1477,8 +1521,8 @@ out:
/* GFS2 only supports a subset of the XFS fields */
#define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD)
-static int gfs2_xquota_set(struct super_block *sb, int type, qid_t id,
- struct fs_disk_quota *fdq)
+static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
+ struct fs_disk_quota *fdq)
{
struct gfs2_sbd *sdp = sb->s_fs_info;
struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
@@ -1585,7 +1629,7 @@ out_put:
const struct quotactl_ops gfs2_quotactl_ops = {
.quota_sync = gfs2_quota_sync,
.get_xstate = gfs2_quota_get_xstate,
- .get_xquota = gfs2_xquota_get,
- .set_xquota = gfs2_xquota_set,
+ .get_dqblk = gfs2_get_dqblk,
+ .set_dqblk = gfs2_set_dqblk,
};
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index bf011dc..117fa41 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -949,13 +949,13 @@ static int try_rgrp_fit(struct gfs2_rgrpd *rgd, struct gfs2_alloc *al)
* try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes
* @rgd: The rgrp
*
- * Returns: The inode, if one has been found
+ * Returns: 0 if no error
+ * The inode, if one has been found, in inode.
*/
-static struct inode *try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked,
- u64 skip)
+static u64 try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked,
+ u64 skip)
{
- struct inode *inode;
u32 goal = 0, block;
u64 no_addr;
struct gfs2_sbd *sdp = rgd->rd_sbd;
@@ -980,14 +980,11 @@ static struct inode *try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked,
if (no_addr == skip)
continue;
*last_unlinked = no_addr;
- inode = gfs2_inode_lookup(rgd->rd_sbd->sd_vfs, DT_UNKNOWN,
- no_addr, -1, 1);
- if (!IS_ERR(inode))
- return inode;
+ return no_addr;
}
rgd->rd_flags &= ~GFS2_RDF_CHECK;
- return NULL;
+ return 0;
}
/**
@@ -1068,11 +1065,12 @@ static void forward_rgrp_set(struct gfs2_sbd *sdp, struct gfs2_rgrpd *rgd)
* Try to acquire rgrp in way which avoids contending with others.
*
* Returns: errno
+ * unlinked: the block address of an unlinked block to be reclaimed
*/
-static struct inode *get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
+static int get_local_rgrp(struct gfs2_inode *ip, u64 *unlinked,
+ u64 *last_unlinked)
{
- struct inode *inode = NULL;
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_rgrpd *rgd, *begin = NULL;
struct gfs2_alloc *al = ip->i_alloc;
@@ -1081,6 +1079,7 @@ static struct inode *get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
int loops = 0;
int error, rg_locked;
+ *unlinked = 0;
rgd = gfs2_blk2rgrpd(sdp, ip->i_goal);
while (rgd) {
@@ -1097,19 +1096,24 @@ static struct inode *get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
case 0:
if (try_rgrp_fit(rgd, al))
goto out;
- if (rgd->rd_flags & GFS2_RDF_CHECK)
- inode = try_rgrp_unlink(rgd, last_unlinked, ip->i_no_addr);
+ /* If the rg came in already locked, there's no
+ way we can recover from a failed try_rgrp_unlink
+ because that would require an iput which can only
+ happen after the rgrp is unlocked. */
+ if (!rg_locked && rgd->rd_flags & GFS2_RDF_CHECK)
+ *unlinked = try_rgrp_unlink(rgd, last_unlinked,
+ ip->i_no_addr);
if (!rg_locked)
gfs2_glock_dq_uninit(&al->al_rgd_gh);
- if (inode)
- return inode;
+ if (*unlinked)
+ return -EAGAIN;
/* fall through */
case GLR_TRYFAILED:
rgd = recent_rgrp_next(rgd);
break;
default:
- return ERR_PTR(error);
+ return error;
}
}
@@ -1131,12 +1135,13 @@ static struct inode *get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
case 0:
if (try_rgrp_fit(rgd, al))
goto out;
- if (rgd->rd_flags & GFS2_RDF_CHECK)
- inode = try_rgrp_unlink(rgd, last_unlinked, ip->i_no_addr);
+ if (!rg_locked && rgd->rd_flags & GFS2_RDF_CHECK)
+ *unlinked = try_rgrp_unlink(rgd, last_unlinked,
+ ip->i_no_addr);
if (!rg_locked)
gfs2_glock_dq_uninit(&al->al_rgd_gh);
- if (inode)
- return inode;
+ if (*unlinked)
+ return -EAGAIN;
break;
case GLR_TRYFAILED:
@@ -1144,7 +1149,7 @@ static struct inode *get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
break;
default:
- return ERR_PTR(error);
+ return error;
}
rgd = gfs2_rgrpd_get_next(rgd);
@@ -1153,7 +1158,7 @@ static struct inode *get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
if (rgd == begin) {
if (++loops >= 3)
- return ERR_PTR(-ENOSPC);
+ return -ENOSPC;
if (!skipped)
loops++;
flags = 0;
@@ -1173,7 +1178,7 @@ out:
forward_rgrp_set(sdp, rgd);
}
- return NULL;
+ return 0;
}
/**
@@ -1189,7 +1194,7 @@ int gfs2_inplace_reserve_i(struct gfs2_inode *ip, char *file, unsigned int line)
struct gfs2_alloc *al = ip->i_alloc;
struct inode *inode;
int error = 0;
- u64 last_unlinked = NO_BLOCK;
+ u64 last_unlinked = NO_BLOCK, unlinked;
if (gfs2_assert_warn(sdp, al->al_requested))
return -EINVAL;
@@ -1205,14 +1210,19 @@ try_again:
if (error)
return error;
- inode = get_local_rgrp(ip, &last_unlinked);
- if (inode) {
+ error = get_local_rgrp(ip, &unlinked, &last_unlinked);
+ if (error) {
if (ip != GFS2_I(sdp->sd_rindex))
gfs2_glock_dq_uninit(&al->al_ri_gh);
- if (IS_ERR(inode))
- return PTR_ERR(inode);
- iput(inode);
+ if (error != -EAGAIN)
+ return error;
+ error = gfs2_unlinked_inode_lookup(ip->i_inode.i_sb,
+ unlinked, &inode);
+ if (inode)
+ iput(inode);
gfs2_log_flush(sdp, NULL);
+ if (error == GLR_TRYFAILED)
+ error = 0;
goto try_again;
}
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 50aac60..4d1aad3 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1113,7 +1113,7 @@ static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
int error;
spin_lock(&gt->gt_spin);
- args.ar_commit = gt->gt_log_flush_secs;
+ args.ar_commit = gt->gt_logd_secs;
args.ar_quota_quantum = gt->gt_quota_quantum;
if (gt->gt_statfs_slow)
args.ar_statfs_quantum = 0;
@@ -1160,7 +1160,7 @@ static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
else
clear_bit(SDF_NOBARRIERS, &sdp->sd_flags);
spin_lock(&gt->gt_spin);
- gt->gt_log_flush_secs = args.ar_commit;
+ gt->gt_logd_secs = args.ar_commit;
gt->gt_quota_quantum = args.ar_quota_quantum;
if (args.ar_statfs_quantum) {
gt->gt_statfs_slow = 0;
@@ -1305,8 +1305,8 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
}
if (args->ar_discard)
seq_printf(s, ",discard");
- val = sdp->sd_tune.gt_log_flush_secs;
- if (val != 60)
+ val = sdp->sd_tune.gt_logd_secs;
+ if (val != 30)
seq_printf(s, ",commit=%d", val);
val = sdp->sd_tune.gt_statfs_quantum;
if (val != 30)
@@ -1334,7 +1334,8 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
}
if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
seq_printf(s, ",nobarrier");
-
+ if (test_bit(SDF_DEMOTE, &sdp->sd_flags))
+ seq_printf(s, ",demote_interface_used");
return 0;
}
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index 54fd984..37f5393 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -232,6 +232,8 @@ static ssize_t demote_rq_store(struct gfs2_sbd *sdp, const char *buf, size_t len
glops = gfs2_glops_list[gltype];
if (glops == NULL)
return -EINVAL;
+ if (!test_and_set_bit(SDF_DEMOTE, &sdp->sd_flags))
+ fs_info(sdp, "demote interface used\n");
rv = gfs2_glock_get(sdp, glnum, glops, 0, &gl);
if (rv)
return rv;
@@ -468,8 +470,6 @@ static ssize_t name##_store(struct gfs2_sbd *sdp, const char *buf, size_t len)\
} \
TUNE_ATTR_2(name, name##_store)
-TUNE_ATTR(incore_log_blocks, 0);
-TUNE_ATTR(log_flush_secs, 0);
TUNE_ATTR(quota_warn_period, 0);
TUNE_ATTR(quota_quantum, 0);
TUNE_ATTR(max_readahead, 0);
@@ -481,8 +481,6 @@ TUNE_ATTR(statfs_quantum, 1);
TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store);
static struct attribute *tune_attrs[] = {
- &tune_attr_incore_log_blocks.attr,
- &tune_attr_log_flush_secs.attr,
&tune_attr_quota_warn_period.attr,
&tune_attr_quota_quantum.attr,
&tune_attr_max_readahead.attr,
diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c
index 4ef0e9f..9ec73a8 100644
--- a/fs/gfs2/trans.c
+++ b/fs/gfs2/trans.c
@@ -23,6 +23,7 @@
#include "meta_io.h"
#include "trans.h"
#include "util.h"
+#include "trace_gfs2.h"
int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
unsigned int revokes)
@@ -75,6 +76,23 @@ fail_holder_uninit:
return error;
}
+/**
+ * gfs2_log_release - Release a given number of log blocks
+ * @sdp: The GFS2 superblock
+ * @blks: The number of blocks
+ *
+ */
+
+static void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
+{
+
+ atomic_add(blks, &sdp->sd_log_blks_free);
+ trace_gfs2_log_blocks(sdp, blks);
+ gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
+ sdp->sd_jdesc->jd_blocks);
+ up_read(&sdp->sd_log_flush_lock);
+}
+
void gfs2_trans_end(struct gfs2_sbd *sdp)
{
struct gfs2_trans *tr = current->journal_info;
diff --git a/fs/inode.c b/fs/inode.c
index 407bf39..258ec22 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -1205,8 +1205,6 @@ void generic_delete_inode(struct inode *inode)
inodes_stat.nr_inodes--;
spin_unlock(&inode_lock);
- security_inode_delete(inode);
-
if (op->delete_inode) {
void (*delete)(struct inode *) = op->delete_inode;
/* Filesystems implementing their own
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index ecb44c9..28a9dda 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -786,6 +786,12 @@ wait_for_iobuf:
jbd_debug(3, "JBD: commit phase 6\n");
+ /* All metadata is written, now write commit record and do cleanup */
+ spin_lock(&journal->j_state_lock);
+ J_ASSERT(commit_transaction->t_state == T_COMMIT);
+ commit_transaction->t_state = T_COMMIT_RECORD;
+ spin_unlock(&journal->j_state_lock);
+
if (journal_write_commit_record(journal, commit_transaction))
err = -EIO;
@@ -923,7 +929,7 @@ restart_loop:
jbd_debug(3, "JBD: commit phase 8\n");
- J_ASSERT(commit_transaction->t_state == T_COMMIT);
+ J_ASSERT(commit_transaction->t_state == T_COMMIT_RECORD);
commit_transaction->t_state = T_FINISHED;
J_ASSERT(commit_transaction == journal->j_committing_transaction);
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index bd224ee..93d1e47 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -565,6 +565,38 @@ int log_wait_commit(journal_t *journal, tid_t tid)
}
/*
+ * Return 1 if a given transaction has not yet sent barrier request
+ * connected with a transaction commit. If 0 is returned, transaction
+ * may or may not have sent the barrier. Used to avoid sending barrier
+ * twice in common cases.
+ */
+int journal_trans_will_send_data_barrier(journal_t *journal, tid_t tid)
+{
+ int ret = 0;
+ transaction_t *commit_trans;
+
+ if (!(journal->j_flags & JFS_BARRIER))
+ return 0;
+ spin_lock(&journal->j_state_lock);
+ /* Transaction already committed? */
+ if (tid_geq(journal->j_commit_sequence, tid))
+ goto out;
+ /*
+ * Transaction is being committed and we already proceeded to
+ * writing commit record?
+ */
+ commit_trans = journal->j_committing_transaction;
+ if (commit_trans && commit_trans->t_tid == tid &&
+ commit_trans->t_state >= T_COMMIT_RECORD)
+ goto out;
+ ret = 1;
+out:
+ spin_unlock(&journal->j_state_lock);
+ return ret;
+}
+EXPORT_SYMBOL(journal_trans_will_send_data_barrier);
+
+/*
* Log buffer allocation routines:
*/
@@ -1157,6 +1189,7 @@ int journal_destroy(journal_t *journal)
{
int err = 0;
+
/* Wait for the commit thread to wake up and die. */
journal_kill_thread(journal);
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index c03d4dc..bc2ff59 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1889,7 +1889,7 @@ static struct kmem_cache *get_slab(size_t size)
BUG_ON(i >= JBD2_MAX_SLABS);
if (unlikely(i < 0))
i = 0;
- BUG_ON(jbd2_slab[i] == 0);
+ BUG_ON(jbd2_slab[i] == NULL);
return jbd2_slab[i];
}
diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c
index 3ff50da..55f1dde 100644
--- a/fs/jffs2/background.c
+++ b/fs/jffs2/background.c
@@ -23,10 +23,9 @@ static int jffs2_garbage_collect_thread(void *);
void jffs2_garbage_collect_trigger(struct jffs2_sb_info *c)
{
- spin_lock(&c->erase_completion_lock);
+ assert_spin_locked(&c->erase_completion_lock);
if (c->gc_task && jffs2_thread_should_wake(c))
send_sig(SIGHUP, c->gc_task, 1);
- spin_unlock(&c->erase_completion_lock);
}
/* This must only ever be called when no GC thread is currently running */
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
index b47679b..6286ad9 100644
--- a/fs/jffs2/erase.c
+++ b/fs/jffs2/erase.c
@@ -103,9 +103,10 @@ static void jffs2_erase_block(struct jffs2_sb_info *c,
jffs2_erase_failed(c, jeb, bad_offset);
}
-void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count)
+int jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count)
{
struct jffs2_eraseblock *jeb;
+ int work_done = 0;
mutex_lock(&c->erase_free_sem);
@@ -121,6 +122,7 @@ void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count)
mutex_unlock(&c->erase_free_sem);
jffs2_mark_erased_block(c, jeb);
+ work_done++;
if (!--count) {
D1(printk(KERN_DEBUG "Count reached. jffs2_erase_pending_blocks leaving\n"));
goto done;
@@ -157,6 +159,7 @@ void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count)
mutex_unlock(&c->erase_free_sem);
done:
D1(printk(KERN_DEBUG "jffs2_erase_pending_blocks completed\n"));
+ return work_done;
}
static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
@@ -165,10 +168,11 @@ static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblo
mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
list_move_tail(&jeb->list, &c->erase_complete_list);
+ /* Wake the GC thread to mark them clean */
+ jffs2_garbage_collect_trigger(c);
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->erase_free_sem);
- /* Ensure that kupdated calls us again to mark them clean */
- jffs2_erase_pending_trigger(c);
+ wake_up(&c->erase_wait);
}
static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
@@ -487,9 +491,9 @@ filebad:
refile:
/* Stick it back on the list from whence it came and come back later */
- jffs2_erase_pending_trigger(c);
mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
+ jffs2_garbage_collect_trigger(c);
list_move(&jeb->list, &c->erase_complete_list);
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->erase_free_sem);
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index 3451a81..86e0821 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -313,8 +313,8 @@ struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
case S_IFBLK:
case S_IFCHR:
/* Read the device numbers from the media */
- if (f->metadata->size != sizeof(jdev.old) &&
- f->metadata->size != sizeof(jdev.new)) {
+ if (f->metadata->size != sizeof(jdev.old_id) &&
+ f->metadata->size != sizeof(jdev.new_id)) {
printk(KERN_NOTICE "Device node has strange size %d\n", f->metadata->size);
goto error_io;
}
@@ -325,10 +325,10 @@ struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
printk(KERN_NOTICE "Read device numbers for inode %lu failed\n", (unsigned long)inode->i_ino);
goto error;
}
- if (f->metadata->size == sizeof(jdev.old))
- rdev = old_decode_dev(je16_to_cpu(jdev.old));
+ if (f->metadata->size == sizeof(jdev.old_id))
+ rdev = old_decode_dev(je16_to_cpu(jdev.old_id));
else
- rdev = new_decode_dev(je32_to_cpu(jdev.new));
+ rdev = new_decode_dev(je32_to_cpu(jdev.new_id));
case S_IFSOCK:
case S_IFIFO:
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
index 3b6f2fa..f5e96bd 100644
--- a/fs/jffs2/gc.c
+++ b/fs/jffs2/gc.c
@@ -214,6 +214,19 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
return ret;
}
+ /* If there are any blocks which need erasing, erase them now */
+ if (!list_empty(&c->erase_complete_list) ||
+ !list_empty(&c->erase_pending_list)) {
+ spin_unlock(&c->erase_completion_lock);
+ D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() erasing pending blocks\n"));
+ if (jffs2_erase_pending_blocks(c, 1)) {
+ mutex_unlock(&c->alloc_sem);
+ return 0;
+ }
+ D1(printk(KERN_DEBUG "No progress from erasing blocks; doing GC anyway\n"));
+ spin_lock(&c->erase_completion_lock);
+ }
+
/* First, work out which block we're garbage-collecting */
jeb = c->gcblock;
@@ -222,7 +235,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
if (!jeb) {
/* Couldn't find a free block. But maybe we can just erase one and make 'progress'? */
- if (!list_empty(&c->erase_pending_list)) {
+ if (c->nr_erasing_blocks) {
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->alloc_sem);
return -EAGAIN;
@@ -435,7 +448,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
list_add_tail(&c->gcblock->list, &c->erase_pending_list);
c->gcblock = NULL;
c->nr_erasing_blocks++;
- jffs2_erase_pending_trigger(c);
+ jffs2_garbage_collect_trigger(c);
}
spin_unlock(&c->erase_completion_lock);
diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h
index 507ed6e..a881a42 100644
--- a/fs/jffs2/nodelist.h
+++ b/fs/jffs2/nodelist.h
@@ -312,11 +312,11 @@ static inline int jffs2_blocks_use_vmalloc(struct jffs2_sb_info *c)
static inline int jffs2_encode_dev(union jffs2_device_node *jdev, dev_t rdev)
{
if (old_valid_dev(rdev)) {
- jdev->old = cpu_to_je16(old_encode_dev(rdev));
- return sizeof(jdev->old);
+ jdev->old_id = cpu_to_je16(old_encode_dev(rdev));
+ return sizeof(jdev->old_id);
} else {
- jdev->new = cpu_to_je32(new_encode_dev(rdev));
- return sizeof(jdev->new);
+ jdev->new_id = cpu_to_je32(new_encode_dev(rdev));
+ return sizeof(jdev->new_id);
}
}
@@ -464,7 +464,7 @@ int jffs2_scan_dirty_space(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb
int jffs2_do_mount_fs(struct jffs2_sb_info *c);
/* erase.c */
-void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count);
+int jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count);
void jffs2_free_jeb_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c
index 191359d..694aa5b 100644
--- a/fs/jffs2/nodemgmt.c
+++ b/fs/jffs2/nodemgmt.c
@@ -116,9 +116,21 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
ret = jffs2_garbage_collect_pass(c);
- if (ret == -EAGAIN)
- jffs2_erase_pending_blocks(c, 1);
- else if (ret)
+ if (ret == -EAGAIN) {
+ spin_lock(&c->erase_completion_lock);
+ if (c->nr_erasing_blocks &&
+ list_empty(&c->erase_pending_list) &&
+ list_empty(&c->erase_complete_list)) {
+ DECLARE_WAITQUEUE(wait, current);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&c->erase_wait, &wait);
+ D1(printk(KERN_DEBUG "%s waiting for erase to complete\n", __func__));
+ spin_unlock(&c->erase_completion_lock);
+
+ schedule();
+ } else
+ spin_unlock(&c->erase_completion_lock);
+ } else if (ret)
return ret;
cond_resched();
@@ -217,7 +229,7 @@ static int jffs2_find_nextblock(struct jffs2_sb_info *c)
ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
list_move_tail(&ejeb->list, &c->erase_pending_list);
c->nr_erasing_blocks++;
- jffs2_erase_pending_trigger(c);
+ jffs2_garbage_collect_trigger(c);
D1(printk(KERN_DEBUG "jffs2_find_nextblock: Triggering erase of erasable block at 0x%08x\n",
ejeb->offset));
}
@@ -469,7 +481,9 @@ struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
void jffs2_complete_reservation(struct jffs2_sb_info *c)
{
D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n"));
+ spin_lock(&c->erase_completion_lock);
jffs2_garbage_collect_trigger(c);
+ spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->alloc_sem);
}
@@ -611,7 +625,7 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
list_add_tail(&jeb->list, &c->erase_pending_list);
c->nr_erasing_blocks++;
- jffs2_erase_pending_trigger(c);
+ jffs2_garbage_collect_trigger(c);
} else {
/* Sometimes, however, we leave it elsewhere so it doesn't get
immediately reused, and we spread the load a bit. */
@@ -732,6 +746,10 @@ int jffs2_thread_should_wake(struct jffs2_sb_info *c)
int nr_very_dirty = 0;
struct jffs2_eraseblock *jeb;
+ if (!list_empty(&c->erase_complete_list) ||
+ !list_empty(&c->erase_pending_list))
+ return 1;
+
if (c->unchecked_size) {
D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
c->unchecked_size, c->checked_ino));
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
index a7f03b7..035a767 100644
--- a/fs/jffs2/os-linux.h
+++ b/fs/jffs2/os-linux.h
@@ -140,8 +140,7 @@ void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c);
#endif /* WRITEBUFFER */
-/* erase.c */
-static inline void jffs2_erase_pending_trigger(struct jffs2_sb_info *c)
+static inline void jffs2_dirty_trigger(struct jffs2_sb_info *c)
{
OFNI_BS_2SFFJ(c)->s_dirt = 1;
}
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
index 696686c..46f870d 100644
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -260,7 +260,9 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
ret = -EIO;
goto out;
}
- jffs2_erase_pending_trigger(c);
+ spin_lock(&c->erase_completion_lock);
+ jffs2_garbage_collect_trigger(c);
+ spin_unlock(&c->erase_completion_lock);
}
ret = 0;
out:
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index 9a80e8e..511e2d6 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -63,8 +63,6 @@ static void jffs2_write_super(struct super_block *sb)
if (!(sb->s_flags & MS_RDONLY)) {
D1(printk(KERN_DEBUG "jffs2_write_super()\n"));
- jffs2_garbage_collect_trigger(c);
- jffs2_erase_pending_blocks(c, 0);
jffs2_flush_wbuf_gc(c, 0);
}
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
index 5ef7bac..07ee154 100644
--- a/fs/jffs2/wbuf.c
+++ b/fs/jffs2/wbuf.c
@@ -84,7 +84,7 @@ static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino)
struct jffs2_inodirty *new;
/* Mark the superblock dirty so that kupdated will flush... */
- jffs2_erase_pending_trigger(c);
+ jffs2_dirty_trigger(c);
if (jffs2_wbuf_pending_for_ino(c, ino))
return;
@@ -121,7 +121,7 @@ static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c)
D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
list_add_tail(&jeb->list, &c->erase_pending_list);
c->nr_erasing_blocks++;
- jffs2_erase_pending_trigger(c);
+ jffs2_garbage_collect_trigger(c);
} else {
/* Sometimes, however, we leave it elsewhere so it doesn't get
immediately reused, and we spread the load a bit. */
@@ -152,7 +152,7 @@ static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock
D1(printk("Refiling block at %08x to erase_pending_list\n", jeb->offset));
list_add(&jeb->list, &c->erase_pending_list);
c->nr_erasing_blocks++;
- jffs2_erase_pending_trigger(c);
+ jffs2_garbage_collect_trigger(c);
}
if (!jffs2_prealloc_raw_node_refs(c, jeb, 1)) {
@@ -543,7 +543,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset));
list_move(&jeb->list, &c->erase_pending_list);
c->nr_erasing_blocks++;
- jffs2_erase_pending_trigger(c);
+ jffs2_garbage_collect_trigger(c);
}
jffs2_dbg_acct_sanity_check_nolock(c, jeb);
diff --git a/fs/jfs/file.c b/fs/jfs/file.c
index 14ba982..85d9ec6 100644
--- a/fs/jfs/file.c
+++ b/fs/jfs/file.c
@@ -98,7 +98,7 @@ int jfs_setattr(struct dentry *dentry, struct iattr *iattr)
if (rc)
return rc;
- if (iattr->ia_valid & ATTR_SIZE)
+ if (is_quota_modification(inode, iattr))
dquot_initialize(inode);
if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
(iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
index 9e2f6a7..c92ea3b 100644
--- a/fs/jfs/jfs_dmap.c
+++ b/fs/jfs/jfs_dmap.c
@@ -2438,7 +2438,7 @@ dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc, int level)
/* check if this is a control page update for an allocation.
* if so, update the leaf to reflect the new leaf value using
- * dbSplit(); otherwise (deallocation), use dbJoin() to udpate
+ * dbSplit(); otherwise (deallocation), use dbJoin() to update
* the leaf with the new value. in addition to updating the
* leaf, dbSplit() will also split the binary buddy system of
* the leaves, if required, and bubble new values within the
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 157382f..b66832a 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -446,10 +446,8 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
/* initialize the mount flag and determine the default error handler */
flag = JFS_ERR_REMOUNT_RO;
- if (!parse_options((char *) data, sb, &newLVSize, &flag)) {
- kfree(sbi);
- return -EINVAL;
- }
+ if (!parse_options((char *) data, sb, &newLVSize, &flag))
+ goto out_kfree;
sbi->flag = flag;
#ifdef CONFIG_JFS_POSIX_ACL
@@ -458,7 +456,7 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
if (newLVSize) {
printk(KERN_ERR "resize option for remount only\n");
- return -EINVAL;
+ goto out_kfree;
}
/*
@@ -478,7 +476,7 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
inode = new_inode(sb);
if (inode == NULL) {
ret = -ENOMEM;
- goto out_kfree;
+ goto out_unload;
}
inode->i_ino = 0;
inode->i_nlink = 1;
@@ -550,9 +548,10 @@ out_mount_failed:
make_bad_inode(sbi->direct_inode);
iput(sbi->direct_inode);
sbi->direct_inode = NULL;
-out_kfree:
+out_unload:
if (sbi->nls_tab)
unload_nls(sbi->nls_tab);
+out_kfree:
kfree(sbi);
return ret;
}
diff --git a/fs/libfs.c b/fs/libfs.c
index ea9a6cc..232bea4 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -547,6 +547,40 @@ ssize_t simple_read_from_buffer(void __user *to, size_t count, loff_t *ppos,
}
/**
+ * simple_write_to_buffer - copy data from user space to the buffer
+ * @to: the buffer to write to
+ * @available: the size of the buffer
+ * @ppos: the current position in the buffer
+ * @from: the user space buffer to read from
+ * @count: the maximum number of bytes to read
+ *
+ * The simple_write_to_buffer() function reads up to @count bytes from the user
+ * space address starting at @from into the buffer @to at offset @ppos.
+ *
+ * On success, the number of bytes written is returned and the offset @ppos is
+ * advanced by this number, or negative value is returned on error.
+ **/
+ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
+ const void __user *from, size_t count)
+{
+ loff_t pos = *ppos;
+ size_t res;
+
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= available || !count)
+ return 0;
+ if (count > available - pos)
+ count = available - pos;
+ res = copy_from_user(to + pos, from, count);
+ if (res == count)
+ return -EFAULT;
+ count -= res;
+ *ppos = pos + count;
+ return count;
+}
+
+/**
* memory_read_from_buffer - copy data from the buffer
* @to: the kernel space buffer to read to
* @count: the maximum number of bytes to read
@@ -864,6 +898,7 @@ EXPORT_SYMBOL(simple_statfs);
EXPORT_SYMBOL(simple_sync_file);
EXPORT_SYMBOL(simple_unlink);
EXPORT_SYMBOL(simple_read_from_buffer);
+EXPORT_SYMBOL(simple_write_to_buffer);
EXPORT_SYMBOL(memory_read_from_buffer);
EXPORT_SYMBOL(simple_transaction_set);
EXPORT_SYMBOL(simple_transaction_get);
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c
index 243c000..9bd2ce2 100644
--- a/fs/logfs/dev_bdev.c
+++ b/fs/logfs/dev_bdev.c
@@ -303,6 +303,11 @@ static void bdev_put_device(struct super_block *sb)
close_bdev_exclusive(logfs_super(sb)->s_bdev, FMODE_READ|FMODE_WRITE);
}
+static int bdev_can_write_buf(struct super_block *sb, u64 ofs)
+{
+ return 0;
+}
+
static const struct logfs_device_ops bd_devops = {
.find_first_sb = bdev_find_first_sb,
.find_last_sb = bdev_find_last_sb,
@@ -310,6 +315,7 @@ static const struct logfs_device_ops bd_devops = {
.readpage = bdev_readpage,
.writeseg = bdev_writeseg,
.erase = bdev_erase,
+ .can_write_buf = bdev_can_write_buf,
.sync = bdev_sync,
.put_device = bdev_put_device,
};
diff --git a/fs/logfs/dev_mtd.c b/fs/logfs/dev_mtd.c
index cafb6ef..a85d47d 100644
--- a/fs/logfs/dev_mtd.c
+++ b/fs/logfs/dev_mtd.c
@@ -9,6 +9,7 @@
#include <linux/completion.h>
#include <linux/mount.h>
#include <linux/sched.h>
+#include <linux/slab.h>
#define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1))
@@ -126,7 +127,8 @@ static int mtd_readpage(void *_sb, struct page *page)
err = mtd_read(sb, page->index << PAGE_SHIFT, PAGE_SIZE,
page_address(page));
- if (err == -EUCLEAN) {
+ if (err == -EUCLEAN || err == -EBADMSG) {
+ /* -EBADMSG happens regularly on power failures */
err = 0;
/* FIXME: force GC this segment */
}
@@ -233,12 +235,32 @@ static void mtd_put_device(struct super_block *sb)
put_mtd_device(logfs_super(sb)->s_mtd);
}
+static int mtd_can_write_buf(struct super_block *sb, u64 ofs)
+{
+ struct logfs_super *super = logfs_super(sb);
+ void *buf;
+ int err;
+
+ buf = kmalloc(super->s_writesize, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ err = mtd_read(sb, ofs, super->s_writesize, buf);
+ if (err)
+ goto out;
+ if (memchr_inv(buf, 0xff, super->s_writesize))
+ err = -EIO;
+ kfree(buf);
+out:
+ return err;
+}
+
static const struct logfs_device_ops mtd_devops = {
.find_first_sb = mtd_find_first_sb,
.find_last_sb = mtd_find_last_sb,
.readpage = mtd_readpage,
.writeseg = mtd_writeseg,
.erase = mtd_erase,
+ .can_write_buf = mtd_can_write_buf,
.sync = mtd_sync,
.put_device = mtd_put_device,
};
@@ -250,5 +272,7 @@ int logfs_get_sb_mtd(struct file_system_type *type, int flags,
const struct logfs_device_ops *devops = &mtd_devops;
mtd = get_mtd_device(NULL, mtdnr);
+ if (IS_ERR(mtd))
+ return PTR_ERR(mtd);
return logfs_get_sb_device(type, flags, mtd, NULL, devops, mnt);
}
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
index 2396a85..72d1893 100644
--- a/fs/logfs/dir.c
+++ b/fs/logfs/dir.c
@@ -12,7 +12,7 @@
* Atomic dir operations
*
* Directory operations are by default not atomic. Dentries and Inodes are
- * created/removed/altered in seperate operations. Therefore we need to do
+ * created/removed/altered in separate operations. Therefore we need to do
* a small amount of journaling.
*
* Create, link, mkdir, mknod and symlink all share the same function to do
diff --git a/fs/logfs/file.c b/fs/logfs/file.c
index 370f367..0de5240 100644
--- a/fs/logfs/file.c
+++ b/fs/logfs/file.c
@@ -161,7 +161,17 @@ static int logfs_writepage(struct page *page, struct writeback_control *wbc)
static void logfs_invalidatepage(struct page *page, unsigned long offset)
{
- move_page_to_btree(page);
+ struct logfs_block *block = logfs_block(page);
+
+ if (block->reserved_bytes) {
+ struct super_block *sb = page->mapping->host->i_sb;
+ struct logfs_super *super = logfs_super(sb);
+
+ super->s_dirty_pages -= block->reserved_bytes;
+ block->ops->free_block(sb, block);
+ BUG_ON(bitmap_weight(block->alias_map, LOGFS_BLOCK_FACTOR));
+ } else
+ move_page_to_btree(page);
BUG_ON(PagePrivate(page) || page->private);
}
@@ -212,10 +222,8 @@ int logfs_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
int logfs_fsync(struct file *file, struct dentry *dentry, int datasync)
{
struct super_block *sb = dentry->d_inode->i_sb;
- struct logfs_super *super = logfs_super(sb);
- /* FIXME: write anchor */
- super->s_devops->sync(sb);
+ logfs_write_anchor(sb);
return 0;
}
diff --git a/fs/logfs/gc.c b/fs/logfs/gc.c
index 76c242f..caa4419 100644
--- a/fs/logfs/gc.c
+++ b/fs/logfs/gc.c
@@ -122,7 +122,7 @@ static void logfs_cleanse_block(struct super_block *sb, u64 ofs, u64 ino,
logfs_safe_iput(inode, cookie);
}
-static u32 logfs_gc_segment(struct super_block *sb, u32 segno, u8 dist)
+static u32 logfs_gc_segment(struct super_block *sb, u32 segno)
{
struct logfs_super *super = logfs_super(sb);
struct logfs_segment_header sh;
@@ -401,7 +401,7 @@ static int __logfs_gc_once(struct super_block *sb, struct gc_candidate *cand)
segno, (u64)segno << super->s_segshift,
dist, no_free_segments(sb), valid,
super->s_free_bytes);
- cleaned = logfs_gc_segment(sb, segno, dist);
+ cleaned = logfs_gc_segment(sb, segno);
log_gc("GC segment #%02x complete - now %x valid\n", segno,
valid - cleaned);
BUG_ON(cleaned != valid);
@@ -632,38 +632,31 @@ static int check_area(struct super_block *sb, int i)
{
struct logfs_super *super = logfs_super(sb);
struct logfs_area *area = super->s_area[i];
- struct logfs_object_header oh;
+ gc_level_t gc_level;
+ u32 cleaned, valid, ec;
u32 segno = area->a_segno;
- u32 ofs = area->a_used_bytes;
- __be32 crc;
- int err;
+ u64 ofs = dev_ofs(sb, area->a_segno, area->a_written_bytes);
if (!area->a_is_open)
return 0;
- for (ofs = area->a_used_bytes;
- ofs <= super->s_segsize - sizeof(oh);
- ofs += (u32)be16_to_cpu(oh.len) + sizeof(oh)) {
- err = wbuf_read(sb, dev_ofs(sb, segno, ofs), sizeof(oh), &oh);
- if (err)
- return err;
-
- if (!memchr_inv(&oh, 0xff, sizeof(oh)))
- break;
+ if (super->s_devops->can_write_buf(sb, ofs) == 0)
+ return 0;
- crc = logfs_crc32(&oh, sizeof(oh) - 4, 4);
- if (crc != oh.crc) {
- printk(KERN_INFO "interrupted header at %llx\n",
- dev_ofs(sb, segno, ofs));
- return 0;
- }
- }
- if (ofs != area->a_used_bytes) {
- printk(KERN_INFO "%x bytes unaccounted data found at %llx\n",
- ofs - area->a_used_bytes,
- dev_ofs(sb, segno, area->a_used_bytes));
- area->a_used_bytes = ofs;
- }
+ printk(KERN_INFO"LogFS: Possibly incomplete write at %llx\n", ofs);
+ /*
+ * The device cannot write back the write buffer. Most likely the
+ * wbuf was already written out and the system crashed at some point
+ * before the journal commit happened. In that case we wouldn't have
+ * to do anything. But if the crash happened before the wbuf was
+ * written out correctly, we must GC this segment. So assume the
+ * worst and always do the GC run.
+ */
+ area->a_is_open = 0;
+ valid = logfs_valid_bytes(sb, segno, &ec, &gc_level);
+ cleaned = logfs_gc_segment(sb, segno);
+ if (cleaned != valid)
+ return -EIO;
return 0;
}
diff --git a/fs/logfs/inode.c b/fs/logfs/inode.c
index 14ed272..755a92e 100644
--- a/fs/logfs/inode.c
+++ b/fs/logfs/inode.c
@@ -193,6 +193,7 @@ static void logfs_init_inode(struct super_block *sb, struct inode *inode)
inode->i_ctime = CURRENT_TIME;
inode->i_mtime = CURRENT_TIME;
inode->i_nlink = 1;
+ li->li_refcount = 1;
INIT_LIST_HEAD(&li->li_freeing_list);
for (i = 0; i < LOGFS_EMBEDDED_FIELDS; i++)
@@ -326,7 +327,7 @@ static void logfs_set_ino_generation(struct super_block *sb,
u64 ino;
mutex_lock(&super->s_journal_mutex);
- ino = logfs_seek_hole(super->s_master_inode, super->s_last_ino);
+ ino = logfs_seek_hole(super->s_master_inode, super->s_last_ino + 1);
super->s_last_ino = ino;
super->s_inos_till_wrap--;
if (super->s_inos_till_wrap < 0) {
@@ -386,8 +387,7 @@ static void logfs_init_once(void *_li)
static int logfs_sync_fs(struct super_block *sb, int wait)
{
- /* FIXME: write anchor */
- logfs_super(sb)->s_devops->sync(sb);
+ logfs_write_anchor(sb);
return 0;
}
diff --git a/fs/logfs/journal.c b/fs/logfs/journal.c
index fb0a613..4b0e061 100644
--- a/fs/logfs/journal.c
+++ b/fs/logfs/journal.c
@@ -132,10 +132,9 @@ static int read_area(struct super_block *sb, struct logfs_je_area *a)
ofs = dev_ofs(sb, area->a_segno, area->a_written_bytes);
if (super->s_writesize > 1)
- logfs_buf_recover(area, ofs, a + 1, super->s_writesize);
+ return logfs_buf_recover(area, ofs, a + 1, super->s_writesize);
else
- logfs_buf_recover(area, ofs, NULL, 0);
- return 0;
+ return logfs_buf_recover(area, ofs, NULL, 0);
}
static void *unpack(void *from, void *to)
@@ -245,7 +244,7 @@ static int read_je(struct super_block *sb, u64 ofs)
read_erasecount(sb, unpack(jh, scratch));
break;
case JE_AREA:
- read_area(sb, unpack(jh, scratch));
+ err = read_area(sb, unpack(jh, scratch));
break;
case JE_OBJ_ALIAS:
err = logfs_load_object_aliases(sb, unpack(jh, scratch),
diff --git a/fs/logfs/logfs.h b/fs/logfs/logfs.h
index 0a3df1a..1a9db84 100644
--- a/fs/logfs/logfs.h
+++ b/fs/logfs/logfs.h
@@ -144,6 +144,7 @@ struct logfs_area_ops {
* @erase: erase one segment
* @read: read from the device
* @erase: erase part of the device
+ * @can_write_buf: decide whether wbuf can be written to ofs
*/
struct logfs_device_ops {
struct page *(*find_first_sb)(struct super_block *sb, u64 *ofs);
@@ -153,6 +154,7 @@ struct logfs_device_ops {
void (*writeseg)(struct super_block *sb, u64 ofs, size_t len);
int (*erase)(struct super_block *sb, loff_t ofs, size_t len,
int ensure_write);
+ int (*can_write_buf)(struct super_block *sb, u64 ofs);
void (*sync)(struct super_block *sb);
void (*put_device)(struct super_block *sb);
};
@@ -394,6 +396,7 @@ struct logfs_super {
int s_lock_count;
mempool_t *s_block_pool; /* struct logfs_block pool */
mempool_t *s_shadow_pool; /* struct logfs_shadow pool */
+ struct list_head s_writeback_list; /* writeback pages */
/*
* Space accounting:
* - s_used_bytes specifies space used to store valid data objects.
@@ -598,19 +601,19 @@ void freeseg(struct super_block *sb, u32 segno);
int logfs_init_areas(struct super_block *sb);
void logfs_cleanup_areas(struct super_block *sb);
int logfs_open_area(struct logfs_area *area, size_t bytes);
-void __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len,
+int __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len,
int use_filler);
-static inline void logfs_buf_write(struct logfs_area *area, u64 ofs,
+static inline int logfs_buf_write(struct logfs_area *area, u64 ofs,
void *buf, size_t len)
{
- __logfs_buf_write(area, ofs, buf, len, 0);
+ return __logfs_buf_write(area, ofs, buf, len, 0);
}
-static inline void logfs_buf_recover(struct logfs_area *area, u64 ofs,
+static inline int logfs_buf_recover(struct logfs_area *area, u64 ofs,
void *buf, size_t len)
{
- __logfs_buf_write(area, ofs, buf, len, 1);
+ return __logfs_buf_write(area, ofs, buf, len, 1);
}
/* super.c */
@@ -704,7 +707,7 @@ static inline gc_level_t expand_level(u64 ino, level_t __level)
u8 level = (__force u8)__level;
if (ino == LOGFS_INO_MASTER) {
- /* ifile has seperate areas */
+ /* ifile has separate areas */
level += LOGFS_MAX_LEVELS;
}
return (__force gc_level_t)level;
diff --git a/fs/logfs/logfs_abi.h b/fs/logfs/logfs_abi.h
index f674725..ae96051 100644
--- a/fs/logfs/logfs_abi.h
+++ b/fs/logfs/logfs_abi.h
@@ -50,9 +50,9 @@ static inline void check_##type(void) \
* 12 - gc recycled blocks, long-lived data
* 13 - replacement blocks, short-lived data
*
- * Levels 1-11 are necessary for robust gc operations and help seperate
+ * Levels 1-11 are necessary for robust gc operations and help separate
* short-lived metadata from longer-lived file data. In the future,
- * file data should get seperated into several segments based on simple
+ * file data should get separated into several segments based on simple
* heuristics. Old data recycled during gc operation is expected to be
* long-lived. New data is of uncertain life expectancy. New data
* used to replace older blocks in existing files is expected to be
@@ -117,7 +117,7 @@ static inline void check_##type(void) \
#define pure_ofs(ofs) (ofs & ~LOGFS_FULLY_POPULATED)
/*
- * LogFS needs to seperate data into levels. Each level is defined as the
+ * LogFS needs to separate data into levels. Each level is defined as the
* maximal possible distance from the master inode (inode of the inode file).
* Data blocks reside on level 0, 1x indirect block on level 1, etc.
* Inodes reside on level 6, indirect blocks for the inode file on levels 7-11.
@@ -204,7 +204,7 @@ SIZE_CHECK(logfs_segment_header, LOGFS_SEGMENT_HEADERSIZE);
* @ds_crc: crc32 of structure starting with the next field
* @ds_ifile_levels: maximum number of levels for ifile
* @ds_iblock_levels: maximum number of levels for regular files
- * @ds_data_levels: number of seperate levels for data
+ * @ds_data_levels: number of separate levels for data
* @pad0: reserved, must be 0
* @ds_feature_incompat: incompatible filesystem features
* @ds_feature_ro_compat: read-only compatible filesystem features
@@ -456,7 +456,7 @@ enum logfs_vim {
* @vim: life expectancy of data
*
* "Areas" are segments currently being used for writing. There is at least
- * one area per GC level. Several may be used to seperate long-living from
+ * one area per GC level. Several may be used to separate long-living from
* short-living data. If an area with unknown vim is encountered, it can
* simply be closed.
* The write buffer immediately follow this header.
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c
index 3159db6..0718d11 100644
--- a/fs/logfs/readwrite.c
+++ b/fs/logfs/readwrite.c
@@ -892,6 +892,8 @@ u64 logfs_seek_hole(struct inode *inode, u64 bix)
return bix;
else if (li->li_data[INDIRECT_INDEX] & LOGFS_FULLY_POPULATED)
bix = maxbix(li->li_height);
+ else if (bix >= maxbix(li->li_height))
+ return bix;
else {
bix = seek_holedata_loop(inode, bix, 0);
if (bix < maxbix(li->li_height))
@@ -1093,17 +1095,25 @@ static int logfs_reserve_bytes(struct inode *inode, int bytes)
int get_page_reserve(struct inode *inode, struct page *page)
{
struct logfs_super *super = logfs_super(inode->i_sb);
+ struct logfs_block *block = logfs_block(page);
int ret;
- if (logfs_block(page) && logfs_block(page)->reserved_bytes)
+ if (block && block->reserved_bytes)
return 0;
logfs_get_wblocks(inode->i_sb, page, WF_LOCK);
- ret = logfs_reserve_bytes(inode, 6 * LOGFS_MAX_OBJECTSIZE);
+ while ((ret = logfs_reserve_bytes(inode, 6 * LOGFS_MAX_OBJECTSIZE)) &&
+ !list_empty(&super->s_writeback_list)) {
+ block = list_entry(super->s_writeback_list.next,
+ struct logfs_block, alias_list);
+ block->ops->write_block(block);
+ }
if (!ret) {
alloc_data_block(inode, page);
- logfs_block(page)->reserved_bytes += 6 * LOGFS_MAX_OBJECTSIZE;
+ block = logfs_block(page);
+ block->reserved_bytes += 6 * LOGFS_MAX_OBJECTSIZE;
super->s_dirty_pages += 6 * LOGFS_MAX_OBJECTSIZE;
+ list_move_tail(&block->alias_list, &super->s_writeback_list);
}
logfs_put_wblocks(inode->i_sb, page, WF_LOCK);
return ret;
@@ -1861,7 +1871,7 @@ int logfs_truncate(struct inode *inode, u64 target)
size = target;
logfs_get_wblocks(sb, NULL, 1);
- err = __logfs_truncate(inode, target);
+ err = __logfs_truncate(inode, size);
if (!err)
err = __logfs_write_inode(inode, 0);
logfs_put_wblocks(sb, NULL, 1);
@@ -2249,6 +2259,7 @@ int logfs_init_rw(struct super_block *sb)
int min_fill = 3 * super->s_no_blocks;
INIT_LIST_HEAD(&super->s_object_alias);
+ INIT_LIST_HEAD(&super->s_writeback_list);
mutex_init(&super->s_write_mutex);
super->s_block_pool = mempool_create_kmalloc_pool(min_fill,
sizeof(struct logfs_block));
diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c
index f77ce2b..a9657af 100644
--- a/fs/logfs/segment.c
+++ b/fs/logfs/segment.c
@@ -67,7 +67,7 @@ static struct page *get_mapping_page(struct super_block *sb, pgoff_t index,
return page;
}
-void __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len,
+int __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len,
int use_filler)
{
pgoff_t index = ofs >> PAGE_SHIFT;
@@ -81,8 +81,10 @@ void __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len,
copylen = min((ulong)len, PAGE_SIZE - offset);
page = get_mapping_page(area->a_sb, index, use_filler);
- SetPageUptodate(page);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
BUG_ON(!page); /* FIXME: reserve a pool */
+ SetPageUptodate(page);
memcpy(page_address(page) + offset, buf, copylen);
SetPagePrivate(page);
page_cache_release(page);
@@ -92,6 +94,7 @@ void __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len,
offset = 0;
index++;
} while (len);
+ return 0;
}
static void pad_partial_page(struct logfs_area *area)
diff --git a/fs/logfs/super.c b/fs/logfs/super.c
index 5866ee6..d651e10 100644
--- a/fs/logfs/super.c
+++ b/fs/logfs/super.c
@@ -138,10 +138,14 @@ static int logfs_sb_set(struct super_block *sb, void *_super)
sb->s_fs_info = super;
sb->s_mtd = super->s_mtd;
sb->s_bdev = super->s_bdev;
+#ifdef CONFIG_BLOCK
if (sb->s_bdev)
sb->s_bdi = &bdev_get_queue(sb->s_bdev)->backing_dev_info;
+#endif
+#ifdef CONFIG_MTD
if (sb->s_mtd)
sb->s_bdi = sb->s_mtd->backing_dev_info;
+#endif
return 0;
}
@@ -333,27 +337,27 @@ static int logfs_get_sb_final(struct super_block *sb, struct vfsmount *mnt)
goto fail;
sb->s_root = d_alloc_root(rootdir);
- if (!sb->s_root)
- goto fail2;
+ if (!sb->s_root) {
+ iput(rootdir);
+ goto fail;
+ }
super->s_erase_page = alloc_pages(GFP_KERNEL, 0);
if (!super->s_erase_page)
- goto fail2;
+ goto fail;
memset(page_address(super->s_erase_page), 0xFF, PAGE_SIZE);
/* FIXME: check for read-only mounts */
err = logfs_make_writeable(sb);
if (err)
- goto fail3;
+ goto fail1;
log_super("LogFS: Finished mounting\n");
simple_set_mnt(mnt, sb);
return 0;
-fail3:
+fail1:
__free_page(super->s_erase_page);
-fail2:
- iput(rootdir);
fail:
iput(logfs_super(sb)->s_master_inode);
return -EIO;
@@ -382,7 +386,7 @@ static struct page *find_super_block(struct super_block *sb)
if (!first || IS_ERR(first))
return NULL;
last = super->s_devops->find_last_sb(sb, &super->s_sb_ofs[1]);
- if (!last || IS_ERR(first)) {
+ if (!last || IS_ERR(last)) {
page_cache_release(first);
return NULL;
}
@@ -413,7 +417,7 @@ static int __logfs_read_sb(struct super_block *sb)
page = find_super_block(sb);
if (!page)
- return -EIO;
+ return -EINVAL;
ds = page_address(page);
super->s_size = be64_to_cpu(ds->ds_filesystem_size);
diff --git a/fs/namei.c b/fs/namei.c
index a7dce91..b86b96f 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1641,7 +1641,7 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
if (nd->last.name[nd->last.len]) {
if (open_flag & O_CREAT)
goto exit;
- nd->flags |= LOOKUP_DIRECTORY;
+ nd->flags |= LOOKUP_DIRECTORY | LOOKUP_FOLLOW;
}
/* just plain open? */
@@ -1830,6 +1830,8 @@ reval:
}
if (open_flag & O_DIRECTORY)
nd.flags |= LOOKUP_DIRECTORY;
+ if (!(open_flag & O_NOFOLLOW))
+ nd.flags |= LOOKUP_FOLLOW;
filp = do_last(&nd, &path, open_flag, acc_mode, mode, pathname);
while (unlikely(!filp)) { /* trailing symlink */
struct path holder;
@@ -1837,7 +1839,7 @@ reval:
void *cookie;
error = -ELOOP;
/* S_ISDIR part is a temporary automount kludge */
- if ((open_flag & O_NOFOLLOW) && !S_ISDIR(inode->i_mode))
+ if (!(nd.flags & LOOKUP_FOLLOW) && !S_ISDIR(inode->i_mode))
goto exit_dput;
if (count++ == 32)
goto exit_dput;
@@ -2174,8 +2176,10 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry)
error = security_inode_rmdir(dir, dentry);
if (!error) {
error = dir->i_op->rmdir(dir, dentry);
- if (!error)
+ if (!error) {
dentry->d_inode->i_flags |= S_DEAD;
+ dont_mount(dentry);
+ }
}
}
mutex_unlock(&dentry->d_inode->i_mutex);
@@ -2259,7 +2263,7 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry)
if (!error) {
error = dir->i_op->unlink(dir, dentry);
if (!error)
- dentry->d_inode->i_flags |= S_DEAD;
+ dont_mount(dentry);
}
}
mutex_unlock(&dentry->d_inode->i_mutex);
@@ -2570,17 +2574,20 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,
return error;
target = new_dentry->d_inode;
- if (target) {
+ if (target)
mutex_lock(&target->i_mutex);
- dentry_unhash(new_dentry);
- }
if (d_mountpoint(old_dentry)||d_mountpoint(new_dentry))
error = -EBUSY;
- else
+ else {
+ if (target)
+ dentry_unhash(new_dentry);
error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry);
+ }
if (target) {
- if (!error)
+ if (!error) {
target->i_flags |= S_DEAD;
+ dont_mount(new_dentry);
+ }
mutex_unlock(&target->i_mutex);
if (d_unhashed(new_dentry))
d_rehash(new_dentry);
@@ -2612,7 +2619,7 @@ static int vfs_rename_other(struct inode *old_dir, struct dentry *old_dentry,
error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry);
if (!error) {
if (target)
- target->i_flags |= S_DEAD;
+ dont_mount(new_dentry);
if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE))
d_move(old_dentry, new_dentry);
}
diff --git a/fs/namespace.c b/fs/namespace.c
index 8174c8a..88058de 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -628,7 +628,6 @@ repeat:
mnt->mnt_pinned = 0;
spin_unlock(&vfsmount_lock);
acct_auto_close_mnt(mnt);
- security_sb_umount_close(mnt);
goto repeat;
}
}
@@ -1117,8 +1116,6 @@ static int do_umount(struct vfsmount *mnt, int flags)
retval = 0;
}
spin_unlock(&vfsmount_lock);
- if (retval)
- security_sb_umount_busy(mnt);
up_write(&namespace_sem);
release_mounts(&umount_list);
return retval;
@@ -1432,20 +1429,13 @@ static int graft_tree(struct vfsmount *mnt, struct path *path)
err = -ENOENT;
mutex_lock(&path->dentry->d_inode->i_mutex);
- if (IS_DEADDIR(path->dentry->d_inode))
- goto out_unlock;
-
- err = security_sb_check_sb(mnt, path);
- if (err)
+ if (cant_mount(path->dentry))
goto out_unlock;
- err = -ENOENT;
if (!d_unlinked(path->dentry))
err = attach_recursive_mnt(mnt, path, NULL);
out_unlock:
mutex_unlock(&path->dentry->d_inode->i_mutex);
- if (!err)
- security_sb_post_addmount(mnt, path);
return err;
}
@@ -1581,8 +1571,6 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
}
up_write(&sb->s_umount);
if (!err) {
- security_sb_post_remount(path->mnt, flags, data);
-
spin_lock(&vfsmount_lock);
touch_mnt_namespace(path->mnt->mnt_ns);
spin_unlock(&vfsmount_lock);
@@ -1623,7 +1611,7 @@ static int do_move_mount(struct path *path, char *old_name)
err = -ENOENT;
mutex_lock(&path->dentry->d_inode->i_mutex);
- if (IS_DEADDIR(path->dentry->d_inode))
+ if (cant_mount(path->dentry))
goto out1;
if (d_unlinked(path->dentry))
@@ -2234,7 +2222,7 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
if (!check_mnt(root.mnt))
goto out2;
error = -ENOENT;
- if (IS_DEADDIR(new.dentry->d_inode))
+ if (cant_mount(old.dentry))
goto out2;
if (d_unlinked(new.dentry))
goto out2;
@@ -2277,7 +2265,6 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
touch_mnt_namespace(current->nsproxy->mnt_ns);
spin_unlock(&vfsmount_lock);
chroot_fs_refs(&root, &new);
- security_sb_post_pivotroot(&root, &new);
error = 0;
path_put(&root_parent);
path_put(&parent_path);
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index a8766c4..7ec9b34 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -934,7 +934,6 @@ static int nfs_probe_fsinfo(struct nfs_server *server, struct nfs_fh *mntfh, str
}
fsinfo.fattr = fattr;
- nfs_fattr_init(fattr);
error = clp->rpc_ops->fsinfo(server, mntfh, &fsinfo);
if (error < 0)
goto out_error;
@@ -966,6 +965,8 @@ out_error:
static void nfs_server_copy_userdata(struct nfs_server *target, struct nfs_server *source)
{
target->flags = source->flags;
+ target->rsize = source->rsize;
+ target->wsize = source->wsize;
target->acregmin = source->acregmin;
target->acregmax = source->acregmax;
target->acdirmin = source->acdirmin;
@@ -1045,13 +1046,18 @@ struct nfs_server *nfs_create_server(const struct nfs_parsed_mount_data *data,
struct nfs_fh *mntfh)
{
struct nfs_server *server;
- struct nfs_fattr fattr;
+ struct nfs_fattr *fattr;
int error;
server = nfs_alloc_server();
if (!server)
return ERR_PTR(-ENOMEM);
+ error = -ENOMEM;
+ fattr = nfs_alloc_fattr();
+ if (fattr == NULL)
+ goto error;
+
/* Get a client representation */
error = nfs_init_server(server, data);
if (error < 0)
@@ -1062,7 +1068,7 @@ struct nfs_server *nfs_create_server(const struct nfs_parsed_mount_data *data,
BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops);
/* Probe the root fh to retrieve its FSID */
- error = nfs_probe_fsinfo(server, mntfh, &fattr);
+ error = nfs_probe_fsinfo(server, mntfh, fattr);
if (error < 0)
goto error;
if (server->nfs_client->rpc_ops->version == 3) {
@@ -1075,14 +1081,14 @@ struct nfs_server *nfs_create_server(const struct nfs_parsed_mount_data *data,
server->namelen = NFS2_MAXNAMLEN;
}
- if (!(fattr.valid & NFS_ATTR_FATTR)) {
- error = server->nfs_client->rpc_ops->getattr(server, mntfh, &fattr);
+ if (!(fattr->valid & NFS_ATTR_FATTR)) {
+ error = server->nfs_client->rpc_ops->getattr(server, mntfh, fattr);
if (error < 0) {
dprintk("nfs_create_server: getattr error = %d\n", -error);
goto error;
}
}
- memcpy(&server->fsid, &fattr.fsid, sizeof(server->fsid));
+ memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
dprintk("Server FSID: %llx:%llx\n",
(unsigned long long) server->fsid.major,
@@ -1094,9 +1100,11 @@ struct nfs_server *nfs_create_server(const struct nfs_parsed_mount_data *data,
spin_unlock(&nfs_client_lock);
server->mount_time = jiffies;
+ nfs_free_fattr(fattr);
return server;
error:
+ nfs_free_fattr(fattr);
nfs_free_server(server);
return ERR_PTR(error);
}
@@ -1338,7 +1346,7 @@ error:
struct nfs_server *nfs4_create_server(const struct nfs_parsed_mount_data *data,
struct nfs_fh *mntfh)
{
- struct nfs_fattr fattr;
+ struct nfs_fattr *fattr;
struct nfs_server *server;
int error;
@@ -1348,6 +1356,11 @@ struct nfs_server *nfs4_create_server(const struct nfs_parsed_mount_data *data,
if (!server)
return ERR_PTR(-ENOMEM);
+ error = -ENOMEM;
+ fattr = nfs_alloc_fattr();
+ if (fattr == NULL)
+ goto error;
+
/* set up the general RPC client */
error = nfs4_init_server(server, data);
if (error < 0)
@@ -1362,7 +1375,7 @@ struct nfs_server *nfs4_create_server(const struct nfs_parsed_mount_data *data,
goto error;
/* Probe the root fh to retrieve its FSID */
- error = nfs4_path_walk(server, mntfh, data->nfs_server.export_path);
+ error = nfs4_get_rootfh(server, mntfh);
if (error < 0)
goto error;
@@ -1373,7 +1386,7 @@ struct nfs_server *nfs4_create_server(const struct nfs_parsed_mount_data *data,
nfs4_session_set_rwsize(server);
- error = nfs_probe_fsinfo(server, mntfh, &fattr);
+ error = nfs_probe_fsinfo(server, mntfh, fattr);
if (error < 0)
goto error;
@@ -1387,9 +1400,11 @@ struct nfs_server *nfs4_create_server(const struct nfs_parsed_mount_data *data,
server->mount_time = jiffies;
dprintk("<-- nfs4_create_server() = %p\n", server);
+ nfs_free_fattr(fattr);
return server;
error:
+ nfs_free_fattr(fattr);
nfs_free_server(server);
dprintk("<-- nfs4_create_server() = error %d\n", error);
return ERR_PTR(error);
@@ -1403,7 +1418,7 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
{
struct nfs_client *parent_client;
struct nfs_server *server, *parent_server;
- struct nfs_fattr fattr;
+ struct nfs_fattr *fattr;
int error;
dprintk("--> nfs4_create_referral_server()\n");
@@ -1412,6 +1427,11 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
if (!server)
return ERR_PTR(-ENOMEM);
+ error = -ENOMEM;
+ fattr = nfs_alloc_fattr();
+ if (fattr == NULL)
+ goto error;
+
parent_server = NFS_SB(data->sb);
parent_client = parent_server->nfs_client;
@@ -1441,12 +1461,12 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops);
/* Probe the root fh to retrieve its FSID and filehandle */
- error = nfs4_path_walk(server, mntfh, data->mnt_path);
+ error = nfs4_get_rootfh(server, mntfh);
if (error < 0)
goto error;
/* probe the filesystem info for this server filesystem */
- error = nfs_probe_fsinfo(server, mntfh, &fattr);
+ error = nfs_probe_fsinfo(server, mntfh, fattr);
if (error < 0)
goto error;
@@ -1464,10 +1484,12 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
server->mount_time = jiffies;
+ nfs_free_fattr(fattr);
dprintk("<-- nfs_create_referral_server() = %p\n", server);
return server;
error:
+ nfs_free_fattr(fattr);
nfs_free_server(server);
dprintk("<-- nfs4_create_referral_server() = error %d\n", error);
return ERR_PTR(error);
@@ -1483,7 +1505,7 @@ struct nfs_server *nfs_clone_server(struct nfs_server *source,
struct nfs_fattr *fattr)
{
struct nfs_server *server;
- struct nfs_fattr fattr_fsinfo;
+ struct nfs_fattr *fattr_fsinfo;
int error;
dprintk("--> nfs_clone_server(,%llx:%llx,)\n",
@@ -1494,6 +1516,11 @@ struct nfs_server *nfs_clone_server(struct nfs_server *source,
if (!server)
return ERR_PTR(-ENOMEM);
+ error = -ENOMEM;
+ fattr_fsinfo = nfs_alloc_fattr();
+ if (fattr_fsinfo == NULL)
+ goto out_free_server;
+
/* Copy data from the source */
server->nfs_client = source->nfs_client;
atomic_inc(&server->nfs_client->cl_count);
@@ -1510,7 +1537,7 @@ struct nfs_server *nfs_clone_server(struct nfs_server *source,
nfs_init_server_aclclient(server);
/* probe the filesystem info for this server filesystem */
- error = nfs_probe_fsinfo(server, fh, &fattr_fsinfo);
+ error = nfs_probe_fsinfo(server, fh, fattr_fsinfo);
if (error < 0)
goto out_free_server;
@@ -1532,10 +1559,12 @@ struct nfs_server *nfs_clone_server(struct nfs_server *source,
server->mount_time = jiffies;
+ nfs_free_fattr(fattr_fsinfo);
dprintk("<-- nfs_clone_server() = %p\n", server);
return server;
out_free_server:
+ nfs_free_fattr(fattr_fsinfo);
nfs_free_server(server);
dprintk("<-- nfs_clone_server() = error %d\n", error);
return ERR_PTR(error);
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 1567124..3016345 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -24,6 +24,8 @@
static void nfs_do_free_delegation(struct nfs_delegation *delegation)
{
+ if (delegation->cred)
+ put_rpccred(delegation->cred);
kfree(delegation);
}
@@ -36,13 +38,7 @@ static void nfs_free_delegation_callback(struct rcu_head *head)
static void nfs_free_delegation(struct nfs_delegation *delegation)
{
- struct rpc_cred *cred;
-
- cred = rcu_dereference(delegation->cred);
- rcu_assign_pointer(delegation->cred, NULL);
call_rcu(&delegation->rcu, nfs_free_delegation_callback);
- if (cred)
- put_rpccred(cred);
}
void nfs_mark_delegation_referenced(struct nfs_delegation *delegation)
@@ -129,21 +125,35 @@ again:
*/
void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
{
- struct nfs_delegation *delegation = NFS_I(inode)->delegation;
- struct rpc_cred *oldcred;
+ struct nfs_delegation *delegation;
+ struct rpc_cred *oldcred = NULL;
- if (delegation == NULL)
- return;
- memcpy(delegation->stateid.data, res->delegation.data,
- sizeof(delegation->stateid.data));
- delegation->type = res->delegation_type;
- delegation->maxsize = res->maxsize;
- oldcred = delegation->cred;
- delegation->cred = get_rpccred(cred);
- clear_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
- NFS_I(inode)->delegation_state = delegation->type;
- smp_wmb();
- put_rpccred(oldcred);
+ rcu_read_lock();
+ delegation = rcu_dereference(NFS_I(inode)->delegation);
+ if (delegation != NULL) {
+ spin_lock(&delegation->lock);
+ if (delegation->inode != NULL) {
+ memcpy(delegation->stateid.data, res->delegation.data,
+ sizeof(delegation->stateid.data));
+ delegation->type = res->delegation_type;
+ delegation->maxsize = res->maxsize;
+ oldcred = delegation->cred;
+ delegation->cred = get_rpccred(cred);
+ clear_bit(NFS_DELEGATION_NEED_RECLAIM,
+ &delegation->flags);
+ NFS_I(inode)->delegation_state = delegation->type;
+ spin_unlock(&delegation->lock);
+ put_rpccred(oldcred);
+ rcu_read_unlock();
+ } else {
+ /* We appear to have raced with a delegation return. */
+ spin_unlock(&delegation->lock);
+ rcu_read_unlock();
+ nfs_inode_set_delegation(inode, cred, res);
+ }
+ } else {
+ rcu_read_unlock();
+ }
}
static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation, int issync)
@@ -166,9 +176,13 @@ static struct inode *nfs_delegation_grab_inode(struct nfs_delegation *delegation
return inode;
}
-static struct nfs_delegation *nfs_detach_delegation_locked(struct nfs_inode *nfsi, const nfs4_stateid *stateid)
+static struct nfs_delegation *nfs_detach_delegation_locked(struct nfs_inode *nfsi,
+ const nfs4_stateid *stateid,
+ struct nfs_client *clp)
{
- struct nfs_delegation *delegation = rcu_dereference(nfsi->delegation);
+ struct nfs_delegation *delegation =
+ rcu_dereference_protected(nfsi->delegation,
+ lockdep_is_held(&clp->cl_lock));
if (delegation == NULL)
goto nomatch;
@@ -195,11 +209,11 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
{
struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
struct nfs_inode *nfsi = NFS_I(inode);
- struct nfs_delegation *delegation;
+ struct nfs_delegation *delegation, *old_delegation;
struct nfs_delegation *freeme = NULL;
int status = 0;
- delegation = kmalloc(sizeof(*delegation), GFP_KERNEL);
+ delegation = kmalloc(sizeof(*delegation), GFP_NOFS);
if (delegation == NULL)
return -ENOMEM;
memcpy(delegation->stateid.data, res->delegation.data,
@@ -213,10 +227,12 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
spin_lock_init(&delegation->lock);
spin_lock(&clp->cl_lock);
- if (rcu_dereference(nfsi->delegation) != NULL) {
- if (memcmp(&delegation->stateid, &nfsi->delegation->stateid,
- sizeof(delegation->stateid)) == 0 &&
- delegation->type == nfsi->delegation->type) {
+ old_delegation = rcu_dereference_protected(nfsi->delegation,
+ lockdep_is_held(&clp->cl_lock));
+ if (old_delegation != NULL) {
+ if (memcmp(&delegation->stateid, &old_delegation->stateid,
+ sizeof(old_delegation->stateid)) == 0 &&
+ delegation->type == old_delegation->type) {
goto out;
}
/*
@@ -226,12 +242,12 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
dfprintk(FILE, "%s: server %s handed out "
"a duplicate delegation!\n",
__func__, clp->cl_hostname);
- if (delegation->type <= nfsi->delegation->type) {
+ if (delegation->type <= old_delegation->type) {
freeme = delegation;
delegation = NULL;
goto out;
}
- freeme = nfs_detach_delegation_locked(nfsi, NULL);
+ freeme = nfs_detach_delegation_locked(nfsi, NULL, clp);
}
list_add_rcu(&delegation->super_list, &clp->cl_delegations);
nfsi->delegation_state = delegation->type;
@@ -301,7 +317,7 @@ restart:
if (inode == NULL)
continue;
spin_lock(&clp->cl_lock);
- delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL);
+ delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL, clp);
spin_unlock(&clp->cl_lock);
rcu_read_unlock();
if (delegation != NULL) {
@@ -330,9 +346,9 @@ void nfs_inode_return_delegation_noreclaim(struct inode *inode)
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs_delegation *delegation;
- if (rcu_dereference(nfsi->delegation) != NULL) {
+ if (rcu_access_pointer(nfsi->delegation) != NULL) {
spin_lock(&clp->cl_lock);
- delegation = nfs_detach_delegation_locked(nfsi, NULL);
+ delegation = nfs_detach_delegation_locked(nfsi, NULL, clp);
spin_unlock(&clp->cl_lock);
if (delegation != NULL)
nfs_do_return_delegation(inode, delegation, 0);
@@ -346,9 +362,9 @@ int nfs_inode_return_delegation(struct inode *inode)
struct nfs_delegation *delegation;
int err = 0;
- if (rcu_dereference(nfsi->delegation) != NULL) {
+ if (rcu_access_pointer(nfsi->delegation) != NULL) {
spin_lock(&clp->cl_lock);
- delegation = nfs_detach_delegation_locked(nfsi, NULL);
+ delegation = nfs_detach_delegation_locked(nfsi, NULL, clp);
spin_unlock(&clp->cl_lock);
if (delegation != NULL) {
nfs_msync_inode(inode);
@@ -526,7 +542,7 @@ restart:
if (inode == NULL)
continue;
spin_lock(&clp->cl_lock);
- delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL);
+ delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL, clp);
spin_unlock(&clp->cl_lock);
rcu_read_unlock();
if (delegation != NULL)
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index db3ad84..ee9a179 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -530,9 +530,7 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
nfs_readdir_descriptor_t my_desc,
*desc = &my_desc;
struct nfs_entry my_entry;
- struct nfs_fh fh;
- struct nfs_fattr fattr;
- long res;
+ int res = -ENOMEM;
dfprintk(FILE, "NFS: readdir(%s/%s) starting at cookie %llu\n",
dentry->d_parent->d_name.name, dentry->d_name.name,
@@ -554,9 +552,11 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
my_entry.cookie = my_entry.prev_cookie = 0;
my_entry.eof = 0;
- my_entry.fh = &fh;
- my_entry.fattr = &fattr;
- nfs_fattr_init(&fattr);
+ my_entry.fh = nfs_alloc_fhandle();
+ my_entry.fattr = nfs_alloc_fattr();
+ if (my_entry.fh == NULL || my_entry.fattr == NULL)
+ goto out_alloc_failed;
+
desc->entry = &my_entry;
nfs_block_sillyrename(dentry);
@@ -598,7 +598,10 @@ out:
nfs_unblock_sillyrename(dentry);
if (res > 0)
res = 0;
- dfprintk(FILE, "NFS: readdir(%s/%s) returns %ld\n",
+out_alloc_failed:
+ nfs_free_fattr(my_entry.fattr);
+ nfs_free_fhandle(my_entry.fh);
+ dfprintk(FILE, "NFS: readdir(%s/%s) returns %d\n",
dentry->d_parent->d_name.name, dentry->d_name.name,
res);
return res;
@@ -776,9 +779,9 @@ static int nfs_lookup_revalidate(struct dentry * dentry, struct nameidata *nd)
struct inode *dir;
struct inode *inode;
struct dentry *parent;
+ struct nfs_fh *fhandle = NULL;
+ struct nfs_fattr *fattr = NULL;
int error;
- struct nfs_fh fhandle;
- struct nfs_fattr fattr;
parent = dget_parent(dentry);
dir = parent->d_inode;
@@ -811,14 +814,22 @@ static int nfs_lookup_revalidate(struct dentry * dentry, struct nameidata *nd)
if (NFS_STALE(inode))
goto out_bad;
- error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, &fhandle, &fattr);
+ error = -ENOMEM;
+ fhandle = nfs_alloc_fhandle();
+ fattr = nfs_alloc_fattr();
+ if (fhandle == NULL || fattr == NULL)
+ goto out_error;
+
+ error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr);
if (error)
goto out_bad;
- if (nfs_compare_fh(NFS_FH(inode), &fhandle))
+ if (nfs_compare_fh(NFS_FH(inode), fhandle))
goto out_bad;
- if ((error = nfs_refresh_inode(inode, &fattr)) != 0)
+ if ((error = nfs_refresh_inode(inode, fattr)) != 0)
goto out_bad;
+ nfs_free_fattr(fattr);
+ nfs_free_fhandle(fhandle);
out_set_verifier:
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
out_valid:
@@ -842,11 +853,21 @@ out_zap_parent:
shrink_dcache_parent(dentry);
}
d_drop(dentry);
+ nfs_free_fattr(fattr);
+ nfs_free_fhandle(fhandle);
dput(parent);
dfprintk(LOOKUPCACHE, "NFS: %s(%s/%s) is invalid\n",
__func__, dentry->d_parent->d_name.name,
dentry->d_name.name);
return 0;
+out_error:
+ nfs_free_fattr(fattr);
+ nfs_free_fhandle(fhandle);
+ dput(parent);
+ dfprintk(LOOKUPCACHE, "NFS: %s(%s/%s) lookup returned error %d\n",
+ __func__, dentry->d_parent->d_name.name,
+ dentry->d_name.name, error);
+ return error;
}
/*
@@ -911,9 +932,9 @@ static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, stru
struct dentry *res;
struct dentry *parent;
struct inode *inode = NULL;
+ struct nfs_fh *fhandle = NULL;
+ struct nfs_fattr *fattr = NULL;
int error;
- struct nfs_fh fhandle;
- struct nfs_fattr fattr;
dfprintk(VFS, "NFS: lookup(%s/%s)\n",
dentry->d_parent->d_name.name, dentry->d_name.name);
@@ -923,7 +944,6 @@ static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, stru
if (dentry->d_name.len > NFS_SERVER(dir)->namelen)
goto out;
- res = ERR_PTR(-ENOMEM);
dentry->d_op = NFS_PROTO(dir)->dentry_ops;
/*
@@ -936,17 +956,23 @@ static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, stru
goto out;
}
+ res = ERR_PTR(-ENOMEM);
+ fhandle = nfs_alloc_fhandle();
+ fattr = nfs_alloc_fattr();
+ if (fhandle == NULL || fattr == NULL)
+ goto out;
+
parent = dentry->d_parent;
/* Protect against concurrent sillydeletes */
nfs_block_sillyrename(parent);
- error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, &fhandle, &fattr);
+ error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr);
if (error == -ENOENT)
goto no_entry;
if (error < 0) {
res = ERR_PTR(error);
goto out_unblock_sillyrename;
}
- inode = nfs_fhget(dentry->d_sb, &fhandle, &fattr);
+ inode = nfs_fhget(dentry->d_sb, fhandle, fattr);
res = (struct dentry *)inode;
if (IS_ERR(res))
goto out_unblock_sillyrename;
@@ -962,6 +988,8 @@ no_entry:
out_unblock_sillyrename:
nfs_unblock_sillyrename(parent);
out:
+ nfs_free_fattr(fattr);
+ nfs_free_fhandle(fhandle);
return res;
}
@@ -1052,7 +1080,7 @@ static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd)
struct inode *dir;
int openflags, ret = 0;
- if (!is_atomic_open(nd))
+ if (!is_atomic_open(nd) || d_mountpoint(dentry))
goto no_open;
parent = dget_parent(dentry);
dir = parent->d_inode;
@@ -1669,28 +1697,33 @@ static void nfs_access_free_entry(struct nfs_access_entry *entry)
smp_mb__after_atomic_dec();
}
+static void nfs_access_free_list(struct list_head *head)
+{
+ struct nfs_access_entry *cache;
+
+ while (!list_empty(head)) {
+ cache = list_entry(head->next, struct nfs_access_entry, lru);
+ list_del(&cache->lru);
+ nfs_access_free_entry(cache);
+ }
+}
+
int nfs_access_cache_shrinker(int nr_to_scan, gfp_t gfp_mask)
{
LIST_HEAD(head);
struct nfs_inode *nfsi;
struct nfs_access_entry *cache;
-restart:
+ if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL)
+ return (nr_to_scan == 0) ? 0 : -1;
+
spin_lock(&nfs_access_lru_lock);
list_for_each_entry(nfsi, &nfs_access_lru_list, access_cache_inode_lru) {
- struct rw_semaphore *s_umount;
struct inode *inode;
if (nr_to_scan-- == 0)
break;
- s_umount = &nfsi->vfs_inode.i_sb->s_umount;
- if (!down_read_trylock(s_umount))
- continue;
- inode = igrab(&nfsi->vfs_inode);
- if (inode == NULL) {
- up_read(s_umount);
- continue;
- }
+ inode = &nfsi->vfs_inode;
spin_lock(&inode->i_lock);
if (list_empty(&nfsi->access_cache_entry_lru))
goto remove_lru_entry;
@@ -1704,61 +1737,47 @@ restart:
else {
remove_lru_entry:
list_del_init(&nfsi->access_cache_inode_lru);
+ smp_mb__before_clear_bit();
clear_bit(NFS_INO_ACL_LRU_SET, &nfsi->flags);
+ smp_mb__after_clear_bit();
}
- spin_unlock(&inode->i_lock);
- spin_unlock(&nfs_access_lru_lock);
- iput(inode);
- up_read(s_umount);
- goto restart;
}
spin_unlock(&nfs_access_lru_lock);
- while (!list_empty(&head)) {
- cache = list_entry(head.next, struct nfs_access_entry, lru);
- list_del(&cache->lru);
- nfs_access_free_entry(cache);
- }
+ nfs_access_free_list(&head);
return (atomic_long_read(&nfs_access_nr_entries) / 100) * sysctl_vfs_cache_pressure;
}
-static void __nfs_access_zap_cache(struct inode *inode)
+static void __nfs_access_zap_cache(struct nfs_inode *nfsi, struct list_head *head)
{
- struct nfs_inode *nfsi = NFS_I(inode);
struct rb_root *root_node = &nfsi->access_cache;
- struct rb_node *n, *dispose = NULL;
+ struct rb_node *n;
struct nfs_access_entry *entry;
/* Unhook entries from the cache */
while ((n = rb_first(root_node)) != NULL) {
entry = rb_entry(n, struct nfs_access_entry, rb_node);
rb_erase(n, root_node);
- list_del(&entry->lru);
- n->rb_left = dispose;
- dispose = n;
+ list_move(&entry->lru, head);
}
nfsi->cache_validity &= ~NFS_INO_INVALID_ACCESS;
- spin_unlock(&inode->i_lock);
-
- /* Now kill them all! */
- while (dispose != NULL) {
- n = dispose;
- dispose = n->rb_left;
- nfs_access_free_entry(rb_entry(n, struct nfs_access_entry, rb_node));
- }
}
void nfs_access_zap_cache(struct inode *inode)
{
+ LIST_HEAD(head);
+
+ if (test_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags) == 0)
+ return;
/* Remove from global LRU init */
- if (test_and_clear_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags)) {
- spin_lock(&nfs_access_lru_lock);
+ spin_lock(&nfs_access_lru_lock);
+ if (test_and_clear_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags))
list_del_init(&NFS_I(inode)->access_cache_inode_lru);
- spin_unlock(&nfs_access_lru_lock);
- }
spin_lock(&inode->i_lock);
- /* This will release the spinlock */
- __nfs_access_zap_cache(inode);
+ __nfs_access_zap_cache(NFS_I(inode), &head);
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&nfs_access_lru_lock);
+ nfs_access_free_list(&head);
}
static struct nfs_access_entry *nfs_access_search_rbtree(struct inode *inode, struct rpc_cred *cred)
@@ -1809,8 +1828,8 @@ out_stale:
nfs_access_free_entry(cache);
return -ENOENT;
out_zap:
- /* This will release the spinlock */
- __nfs_access_zap_cache(inode);
+ spin_unlock(&inode->i_lock);
+ nfs_access_zap_cache(inode);
return -ENOENT;
}
@@ -1865,9 +1884,11 @@ static void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *s
smp_mb__after_atomic_inc();
/* Add inode to global LRU list */
- if (!test_and_set_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags)) {
+ if (!test_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags)) {
spin_lock(&nfs_access_lru_lock);
- list_add_tail(&NFS_I(inode)->access_cache_inode_lru, &nfs_access_lru_list);
+ if (!test_and_set_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags))
+ list_add_tail(&NFS_I(inode)->access_cache_inode_lru,
+ &nfs_access_lru_list);
spin_unlock(&nfs_access_lru_lock);
}
}
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 8d965bd..cac96bc 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -161,14 +161,17 @@ static int nfs_revalidate_file_size(struct inode *inode, struct file *filp)
struct nfs_server *server = NFS_SERVER(inode);
struct nfs_inode *nfsi = NFS_I(inode);
- if (server->flags & NFS_MOUNT_NOAC)
- goto force_reval;
+ if (nfs_have_delegated_attributes(inode))
+ goto out_noreval;
+
if (filp->f_flags & O_DIRECT)
goto force_reval;
- if (nfsi->npages != 0)
- return 0;
- if (!(nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE) && !nfs_attribute_timeout(inode))
- return 0;
+ if (nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE)
+ goto force_reval;
+ if (nfs_attribute_timeout(inode))
+ goto force_reval;
+out_noreval:
+ return 0;
force_reval:
return __nfs_revalidate_inode(server, inode);
}
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
index a6b16ed..ce153a6 100644
--- a/fs/nfs/fscache.c
+++ b/fs/nfs/fscache.c
@@ -467,7 +467,8 @@ int __nfs_readpages_from_fscache(struct nfs_open_context *ctx,
struct list_head *pages,
unsigned *nr_pages)
{
- int ret, npages = *nr_pages;
+ unsigned npages = *nr_pages;
+ int ret;
dfprintk(FSCACHE, "NFS: nfs_getpages_from_fscache (0x%p/%u/0x%p)\n",
NFS_I(inode)->fscache, npages, inode);
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
index b35d2a6..7428f7d 100644
--- a/fs/nfs/getroot.c
+++ b/fs/nfs/getroot.c
@@ -78,159 +78,94 @@ struct dentry *nfs_get_root(struct super_block *sb, struct nfs_fh *mntfh)
{
struct nfs_server *server = NFS_SB(sb);
struct nfs_fsinfo fsinfo;
- struct nfs_fattr fattr;
- struct dentry *mntroot;
+ struct dentry *ret;
struct inode *inode;
int error;
/* get the actual root for this mount */
- fsinfo.fattr = &fattr;
+ fsinfo.fattr = nfs_alloc_fattr();
+ if (fsinfo.fattr == NULL)
+ return ERR_PTR(-ENOMEM);
error = server->nfs_client->rpc_ops->getroot(server, mntfh, &fsinfo);
if (error < 0) {
dprintk("nfs_get_root: getattr error = %d\n", -error);
- return ERR_PTR(error);
+ ret = ERR_PTR(error);
+ goto out;
}
inode = nfs_fhget(sb, mntfh, fsinfo.fattr);
if (IS_ERR(inode)) {
dprintk("nfs_get_root: get root inode failed\n");
- return ERR_CAST(inode);
+ ret = ERR_CAST(inode);
+ goto out;
}
error = nfs_superblock_set_dummy_root(sb, inode);
- if (error != 0)
- return ERR_PTR(error);
+ if (error != 0) {
+ ret = ERR_PTR(error);
+ goto out;
+ }
/* root dentries normally start off anonymous and get spliced in later
* if the dentry tree reaches them; however if the dentry already
* exists, we'll pick it up at this point and use it as the root
*/
- mntroot = d_obtain_alias(inode);
- if (IS_ERR(mntroot)) {
+ ret = d_obtain_alias(inode);
+ if (IS_ERR(ret)) {
dprintk("nfs_get_root: get root dentry failed\n");
- return mntroot;
+ goto out;
}
- security_d_instantiate(mntroot, inode);
-
- if (!mntroot->d_op)
- mntroot->d_op = server->nfs_client->rpc_ops->dentry_ops;
+ security_d_instantiate(ret, inode);
- return mntroot;
+ if (ret->d_op == NULL)
+ ret->d_op = server->nfs_client->rpc_ops->dentry_ops;
+out:
+ nfs_free_fattr(fsinfo.fattr);
+ return ret;
}
#ifdef CONFIG_NFS_V4
-/*
- * Do a simple pathwalk from the root FH of the server to the nominated target
- * of the mountpoint
- * - give error on symlinks
- * - give error on ".." occurring in the path
- * - follow traversals
- */
-int nfs4_path_walk(struct nfs_server *server,
- struct nfs_fh *mntfh,
- const char *path)
+int nfs4_get_rootfh(struct nfs_server *server, struct nfs_fh *mntfh)
{
struct nfs_fsinfo fsinfo;
- struct nfs_fattr fattr;
- struct nfs_fh lastfh;
- struct qstr name;
- int ret;
+ int ret = -ENOMEM;
- dprintk("--> nfs4_path_walk(,,%s)\n", path);
+ dprintk("--> nfs4_get_rootfh()\n");
- fsinfo.fattr = &fattr;
- nfs_fattr_init(&fattr);
-
- /* Eat leading slashes */
- while (*path == '/')
- path++;
+ fsinfo.fattr = nfs_alloc_fattr();
+ if (fsinfo.fattr == NULL)
+ goto out;
/* Start by getting the root filehandle from the server */
ret = server->nfs_client->rpc_ops->getroot(server, mntfh, &fsinfo);
if (ret < 0) {
- dprintk("nfs4_get_root: getroot error = %d\n", -ret);
- return ret;
+ dprintk("nfs4_get_rootfh: getroot error = %d\n", -ret);
+ goto out;
}
- if (!S_ISDIR(fattr.mode)) {
- printk(KERN_ERR "nfs4_get_root:"
+ if (!(fsinfo.fattr->valid & NFS_ATTR_FATTR_MODE)
+ || !S_ISDIR(fsinfo.fattr->mode)) {
+ printk(KERN_ERR "nfs4_get_rootfh:"
" getroot encountered non-directory\n");
- return -ENOTDIR;
+ ret = -ENOTDIR;
+ goto out;
}
- /* FIXME: It is quite valid for the server to return a referral here */
- if (fattr.valid & NFS_ATTR_FATTR_V4_REFERRAL) {
- printk(KERN_ERR "nfs4_get_root:"
+ if (fsinfo.fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) {
+ printk(KERN_ERR "nfs4_get_rootfh:"
" getroot obtained referral\n");
- return -EREMOTE;
- }
-
-next_component:
- dprintk("Next: %s\n", path);
-
- /* extract the next bit of the path */
- if (!*path)
- goto path_walk_complete;
-
- name.name = path;
- while (*path && *path != '/')
- path++;
- name.len = path - (const char *) name.name;
-
- if (name.len > NFS4_MAXNAMLEN)
- return -ENAMETOOLONG;
-
-eat_dot_dir:
- while (*path == '/')
- path++;
-
- if (path[0] == '.' && (path[1] == '/' || !path[1])) {
- path += 2;
- goto eat_dot_dir;
- }
-
- /* FIXME: Why shouldn't the user be able to use ".." in the path? */
- if (path[0] == '.' && path[1] == '.' && (path[2] == '/' || !path[2])
- ) {
- printk(KERN_ERR "nfs4_get_root:"
- " Mount path contains reference to \"..\"\n");
- return -EINVAL;
+ ret = -EREMOTE;
+ goto out;
}
- /* lookup the next FH in the sequence */
- memcpy(&lastfh, mntfh, sizeof(lastfh));
-
- dprintk("LookupFH: %*.*s [%s]\n", name.len, name.len, name.name, path);
-
- ret = server->nfs_client->rpc_ops->lookupfh(server, &lastfh, &name,
- mntfh, &fattr);
- if (ret < 0) {
- dprintk("nfs4_get_root: getroot error = %d\n", -ret);
- return ret;
- }
-
- if (!S_ISDIR(fattr.mode)) {
- printk(KERN_ERR "nfs4_get_root:"
- " lookupfh encountered non-directory\n");
- return -ENOTDIR;
- }
-
- /* FIXME: Referrals are quite valid here too */
- if (fattr.valid & NFS_ATTR_FATTR_V4_REFERRAL) {
- printk(KERN_ERR "nfs4_get_root:"
- " lookupfh obtained referral\n");
- return -EREMOTE;
- }
-
- goto next_component;
-
-path_walk_complete:
- memcpy(&server->fsid, &fattr.fsid, sizeof(server->fsid));
- dprintk("<-- nfs4_path_walk() = 0\n");
- return 0;
+ memcpy(&server->fsid, &fsinfo.fattr->fsid, sizeof(server->fsid));
+out:
+ nfs_free_fattr(fsinfo.fattr);
+ dprintk("<-- nfs4_get_rootfh() = %d\n", ret);
+ return ret;
}
/*
@@ -239,8 +174,8 @@ path_walk_complete:
struct dentry *nfs4_get_root(struct super_block *sb, struct nfs_fh *mntfh)
{
struct nfs_server *server = NFS_SB(sb);
- struct nfs_fattr fattr;
- struct dentry *mntroot;
+ struct nfs_fattr *fattr = NULL;
+ struct dentry *ret;
struct inode *inode;
int error;
@@ -254,40 +189,50 @@ struct dentry *nfs4_get_root(struct super_block *sb, struct nfs_fh *mntfh)
return ERR_PTR(error);
}
+ fattr = nfs_alloc_fattr();
+ if (fattr == NULL)
+ return ERR_PTR(-ENOMEM);;
+
/* get the actual root for this mount */
- error = server->nfs_client->rpc_ops->getattr(server, mntfh, &fattr);
+ error = server->nfs_client->rpc_ops->getattr(server, mntfh, fattr);
if (error < 0) {
dprintk("nfs_get_root: getattr error = %d\n", -error);
- return ERR_PTR(error);
+ ret = ERR_PTR(error);
+ goto out;
}
- inode = nfs_fhget(sb, mntfh, &fattr);
+ inode = nfs_fhget(sb, mntfh, fattr);
if (IS_ERR(inode)) {
dprintk("nfs_get_root: get root inode failed\n");
- return ERR_CAST(inode);
+ ret = ERR_CAST(inode);
+ goto out;
}
error = nfs_superblock_set_dummy_root(sb, inode);
- if (error != 0)
- return ERR_PTR(error);
+ if (error != 0) {
+ ret = ERR_PTR(error);
+ goto out;
+ }
/* root dentries normally start off anonymous and get spliced in later
* if the dentry tree reaches them; however if the dentry already
* exists, we'll pick it up at this point and use it as the root
*/
- mntroot = d_obtain_alias(inode);
- if (IS_ERR(mntroot)) {
+ ret = d_obtain_alias(inode);
+ if (IS_ERR(ret)) {
dprintk("nfs_get_root: get root dentry failed\n");
- return mntroot;
+ goto out;
}
- security_d_instantiate(mntroot, inode);
+ security_d_instantiate(ret, inode);
- if (!mntroot->d_op)
- mntroot->d_op = server->nfs_client->rpc_ops->dentry_ops;
+ if (ret->d_op == NULL)
+ ret->d_op = server->nfs_client->rpc_ops->dentry_ops;
+out:
+ nfs_free_fattr(fattr);
dprintk("<-- nfs4_get_root()\n");
- return mntroot;
+ return ret;
}
#endif /* CONFIG_NFS_V4 */
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 50a56ed..099b351 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -393,8 +393,8 @@ int
nfs_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = dentry->d_inode;
- struct nfs_fattr fattr;
- int error;
+ struct nfs_fattr *fattr;
+ int error = -ENOMEM;
nfs_inc_stats(inode, NFSIOS_VFSSETATTR);
@@ -417,14 +417,20 @@ nfs_setattr(struct dentry *dentry, struct iattr *attr)
filemap_write_and_wait(inode->i_mapping);
nfs_wb_all(inode);
}
+
+ fattr = nfs_alloc_fattr();
+ if (fattr == NULL)
+ goto out;
/*
* Return any delegations if we're going to change ACLs
*/
if ((attr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0)
nfs_inode_return_delegation(inode);
- error = NFS_PROTO(inode)->setattr(dentry, &fattr, attr);
+ error = NFS_PROTO(inode)->setattr(dentry, fattr, attr);
if (error == 0)
- nfs_refresh_inode(inode, &fattr);
+ nfs_refresh_inode(inode, fattr);
+ nfs_free_fattr(fattr);
+out:
return error;
}
@@ -682,7 +688,7 @@ int
__nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
{
int status = -ESTALE;
- struct nfs_fattr fattr;
+ struct nfs_fattr *fattr = NULL;
struct nfs_inode *nfsi = NFS_I(inode);
dfprintk(PAGECACHE, "NFS: revalidating (%s/%Ld)\n",
@@ -693,8 +699,13 @@ __nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
if (NFS_STALE(inode))
goto out;
+ status = -ENOMEM;
+ fattr = nfs_alloc_fattr();
+ if (fattr == NULL)
+ goto out;
+
nfs_inc_stats(inode, NFSIOS_INODEREVALIDATE);
- status = NFS_PROTO(inode)->getattr(server, NFS_FH(inode), &fattr);
+ status = NFS_PROTO(inode)->getattr(server, NFS_FH(inode), fattr);
if (status != 0) {
dfprintk(PAGECACHE, "nfs_revalidate_inode: (%s/%Ld) getattr failed, error=%d\n",
inode->i_sb->s_id,
@@ -707,7 +718,7 @@ __nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
goto out;
}
- status = nfs_refresh_inode(inode, &fattr);
+ status = nfs_refresh_inode(inode, fattr);
if (status) {
dfprintk(PAGECACHE, "nfs_revalidate_inode: (%s/%Ld) refresh failed, error=%d\n",
inode->i_sb->s_id,
@@ -723,6 +734,7 @@ __nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
(long long)NFS_FILEID(inode));
out:
+ nfs_free_fattr(fattr);
return status;
}
@@ -730,9 +742,14 @@ int nfs_attribute_timeout(struct inode *inode)
{
struct nfs_inode *nfsi = NFS_I(inode);
+ return !time_in_range_open(jiffies, nfsi->read_cache_jiffies, nfsi->read_cache_jiffies + nfsi->attrtimeo);
+}
+
+static int nfs_attribute_cache_expired(struct inode *inode)
+{
if (nfs_have_delegated_attributes(inode))
return 0;
- return !time_in_range_open(jiffies, nfsi->read_cache_jiffies, nfsi->read_cache_jiffies + nfsi->attrtimeo);
+ return nfs_attribute_timeout(inode);
}
/**
@@ -745,7 +762,7 @@ int nfs_attribute_timeout(struct inode *inode)
int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
{
if (!(NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATTR)
- && !nfs_attribute_timeout(inode))
+ && !nfs_attribute_cache_expired(inode))
return NFS_STALE(inode) ? -ESTALE : 0;
return __nfs_revalidate_inode(server, inode);
}
@@ -782,7 +799,8 @@ int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping)
int ret = 0;
if ((nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE)
- || nfs_attribute_timeout(inode) || NFS_STALE(inode)) {
+ || nfs_attribute_cache_expired(inode)
+ || NFS_STALE(inode)) {
ret = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
if (ret < 0)
goto out;
@@ -916,6 +934,26 @@ void nfs_fattr_init(struct nfs_fattr *fattr)
fattr->gencount = nfs_inc_attr_generation_counter();
}
+struct nfs_fattr *nfs_alloc_fattr(void)
+{
+ struct nfs_fattr *fattr;
+
+ fattr = kmalloc(sizeof(*fattr), GFP_NOFS);
+ if (fattr != NULL)
+ nfs_fattr_init(fattr);
+ return fattr;
+}
+
+struct nfs_fh *nfs_alloc_fhandle(void)
+{
+ struct nfs_fh *fh;
+
+ fh = kmalloc(sizeof(struct nfs_fh), GFP_NOFS);
+ if (fh != NULL)
+ fh->size = 0;
+ return fh;
+}
+
/**
* nfs_inode_attrs_need_update - check if the inode attributes need updating
* @inode - pointer to inode
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 11f82f0..d8bd619 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -244,9 +244,7 @@ extern struct dentry *nfs_get_root(struct super_block *, struct nfs_fh *);
#ifdef CONFIG_NFS_V4
extern struct dentry *nfs4_get_root(struct super_block *, struct nfs_fh *);
-extern int nfs4_path_walk(struct nfs_server *server,
- struct nfs_fh *mntfh,
- const char *path);
+extern int nfs4_get_rootfh(struct nfs_server *server, struct nfs_fh *mntfh);
#endif
/* read.c */
diff --git a/fs/nfs/iostat.h b/fs/nfs/iostat.h
index 1d8d5c8..c583248 100644
--- a/fs/nfs/iostat.h
+++ b/fs/nfs/iostat.h
@@ -36,14 +36,14 @@ static inline void nfs_inc_stats(const struct inode *inode,
static inline void nfs_add_server_stats(const struct nfs_server *server,
enum nfs_stat_bytecounters stat,
- unsigned long addend)
+ long addend)
{
this_cpu_add(server->io_stats->bytes[stat], addend);
}
static inline void nfs_add_stats(const struct inode *inode,
enum nfs_stat_bytecounters stat,
- unsigned long addend)
+ long addend)
{
nfs_add_server_stats(NFS_SERVER(inode), stat, addend);
}
@@ -51,7 +51,7 @@ static inline void nfs_add_stats(const struct inode *inode,
#ifdef CONFIG_NFS_FSCACHE
static inline void nfs_add_fscache_stats(struct inode *inode,
enum nfs_stat_fscachecounters stat,
- unsigned long addend)
+ long addend)
{
this_cpu_add(NFS_SERVER(inode)->io_stats->fscache[stat], addend);
}
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index 7888cf3..db6aa36 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -105,8 +105,8 @@ static void * nfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
struct vfsmount *mnt;
struct nfs_server *server = NFS_SERVER(dentry->d_inode);
struct dentry *parent;
- struct nfs_fh fh;
- struct nfs_fattr fattr;
+ struct nfs_fh *fh = NULL;
+ struct nfs_fattr *fattr = NULL;
int err;
dprintk("--> nfs_follow_mountpoint()\n");
@@ -115,6 +115,12 @@ static void * nfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
if (IS_ROOT(dentry))
goto out_err;
+ err = -ENOMEM;
+ fh = nfs_alloc_fhandle();
+ fattr = nfs_alloc_fattr();
+ if (fh == NULL || fattr == NULL)
+ goto out_err;
+
dprintk("%s: enter\n", __func__);
dput(nd->path.dentry);
nd->path.dentry = dget(dentry);
@@ -123,16 +129,16 @@ static void * nfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
parent = dget_parent(nd->path.dentry);
err = server->nfs_client->rpc_ops->lookup(parent->d_inode,
&nd->path.dentry->d_name,
- &fh, &fattr);
+ fh, fattr);
dput(parent);
if (err != 0)
goto out_err;
- if (fattr.valid & NFS_ATTR_FATTR_V4_REFERRAL)
+ if (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)
mnt = nfs_do_refmount(nd->path.mnt, nd->path.dentry);
else
- mnt = nfs_do_submount(nd->path.mnt, nd->path.dentry, &fh,
- &fattr);
+ mnt = nfs_do_submount(nd->path.mnt, nd->path.dentry, fh,
+ fattr);
err = PTR_ERR(mnt);
if (IS_ERR(mnt))
goto out_err;
@@ -151,6 +157,8 @@ static void * nfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
nd->path.dentry = dget(mnt->mnt_root);
schedule_delayed_work(&nfs_automount_task, nfs_mountpoint_expiry_timeout);
out:
+ nfs_free_fattr(fattr);
+ nfs_free_fhandle(fh);
dprintk("%s: done, returned %d\n", __func__, err);
dprintk("<-- nfs_follow_mountpoint() = %d\n", err);
diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c
index d150ae0..9f88c5f 100644
--- a/fs/nfs/nfs3acl.c
+++ b/fs/nfs/nfs3acl.c
@@ -185,7 +185,6 @@ static void nfs3_cache_acls(struct inode *inode, struct posix_acl *acl,
struct posix_acl *nfs3_proc_getacl(struct inode *inode, int type)
{
struct nfs_server *server = NFS_SERVER(inode);
- struct nfs_fattr fattr;
struct page *pages[NFSACL_MAXPAGES] = { };
struct nfs3_getaclargs args = {
.fh = NFS_FH(inode),
@@ -193,7 +192,7 @@ struct posix_acl *nfs3_proc_getacl(struct inode *inode, int type)
.pages = pages,
};
struct nfs3_getaclres res = {
- .fattr = &fattr,
+ 0
};
struct rpc_message msg = {
.rpc_argp = &args,
@@ -228,7 +227,10 @@ struct posix_acl *nfs3_proc_getacl(struct inode *inode, int type)
dprintk("NFS call getacl\n");
msg.rpc_proc = &server->client_acl->cl_procinfo[ACLPROC3_GETACL];
- nfs_fattr_init(&fattr);
+ res.fattr = nfs_alloc_fattr();
+ if (res.fattr == NULL)
+ return ERR_PTR(-ENOMEM);
+
status = rpc_call_sync(server->client_acl, &msg, 0);
dprintk("NFS reply getacl: %d\n", status);
@@ -238,7 +240,7 @@ struct posix_acl *nfs3_proc_getacl(struct inode *inode, int type)
switch (status) {
case 0:
- status = nfs_refresh_inode(inode, &fattr);
+ status = nfs_refresh_inode(inode, res.fattr);
break;
case -EPFNOSUPPORT:
case -EPROTONOSUPPORT:
@@ -278,6 +280,7 @@ struct posix_acl *nfs3_proc_getacl(struct inode *inode, int type)
getout:
posix_acl_release(res.acl_access);
posix_acl_release(res.acl_default);
+ nfs_free_fattr(res.fattr);
if (status != 0) {
posix_acl_release(acl);
@@ -290,7 +293,7 @@ static int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
struct posix_acl *dfacl)
{
struct nfs_server *server = NFS_SERVER(inode);
- struct nfs_fattr fattr;
+ struct nfs_fattr *fattr;
struct page *pages[NFSACL_MAXPAGES];
struct nfs3_setaclargs args = {
.inode = inode,
@@ -335,8 +338,13 @@ static int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
}
dprintk("NFS call setacl\n");
+ status = -ENOMEM;
+ fattr = nfs_alloc_fattr();
+ if (fattr == NULL)
+ goto out_freepages;
+
msg.rpc_proc = &server->client_acl->cl_procinfo[ACLPROC3_SETACL];
- nfs_fattr_init(&fattr);
+ msg.rpc_resp = fattr;
status = rpc_call_sync(server->client_acl, &msg, 0);
nfs_access_zap_cache(inode);
nfs_zap_acl_cache(inode);
@@ -344,7 +352,7 @@ static int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
switch (status) {
case 0:
- status = nfs_refresh_inode(inode, &fattr);
+ status = nfs_refresh_inode(inode, fattr);
nfs3_cache_acls(inode, acl, dfacl);
break;
case -EPFNOSUPPORT:
@@ -355,6 +363,7 @@ static int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
case -ENOTSUPP:
status = -EOPNOTSUPP;
}
+ nfs_free_fattr(fattr);
out_freepages:
while (args.npages != 0) {
args.npages--;
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index e701002..fabb4f2 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -144,14 +144,12 @@ static int
nfs3_proc_lookup(struct inode *dir, struct qstr *name,
struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
- struct nfs_fattr dir_attr;
struct nfs3_diropargs arg = {
.fh = NFS_FH(dir),
.name = name->name,
.len = name->len
};
struct nfs3_diropres res = {
- .dir_attr = &dir_attr,
.fh = fhandle,
.fattr = fattr
};
@@ -163,29 +161,30 @@ nfs3_proc_lookup(struct inode *dir, struct qstr *name,
int status;
dprintk("NFS call lookup %s\n", name->name);
- nfs_fattr_init(&dir_attr);
+ res.dir_attr = nfs_alloc_fattr();
+ if (res.dir_attr == NULL)
+ return -ENOMEM;
+
nfs_fattr_init(fattr);
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
- nfs_refresh_inode(dir, &dir_attr);
+ nfs_refresh_inode(dir, res.dir_attr);
if (status >= 0 && !(fattr->valid & NFS_ATTR_FATTR)) {
msg.rpc_proc = &nfs3_procedures[NFS3PROC_GETATTR];
msg.rpc_argp = fhandle;
msg.rpc_resp = fattr;
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
}
+ nfs_free_fattr(res.dir_attr);
dprintk("NFS reply lookup: %d\n", status);
return status;
}
static int nfs3_proc_access(struct inode *inode, struct nfs_access_entry *entry)
{
- struct nfs_fattr fattr;
struct nfs3_accessargs arg = {
.fh = NFS_FH(inode),
};
- struct nfs3_accessres res = {
- .fattr = &fattr,
- };
+ struct nfs3_accessres res;
struct rpc_message msg = {
.rpc_proc = &nfs3_procedures[NFS3PROC_ACCESS],
.rpc_argp = &arg,
@@ -193,7 +192,7 @@ static int nfs3_proc_access(struct inode *inode, struct nfs_access_entry *entry)
.rpc_cred = entry->cred,
};
int mode = entry->mask;
- int status;
+ int status = -ENOMEM;
dprintk("NFS call access\n");
@@ -210,9 +209,13 @@ static int nfs3_proc_access(struct inode *inode, struct nfs_access_entry *entry)
if (mode & MAY_EXEC)
arg.access |= NFS3_ACCESS_EXECUTE;
}
- nfs_fattr_init(&fattr);
+
+ res.fattr = nfs_alloc_fattr();
+ if (res.fattr == NULL)
+ goto out;
+
status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
- nfs_refresh_inode(inode, &fattr);
+ nfs_refresh_inode(inode, res.fattr);
if (status == 0) {
entry->mask = 0;
if (res.access & NFS3_ACCESS_READ)
@@ -222,6 +225,8 @@ static int nfs3_proc_access(struct inode *inode, struct nfs_access_entry *entry)
if (res.access & (NFS3_ACCESS_LOOKUP|NFS3_ACCESS_EXECUTE))
entry->mask |= MAY_EXEC;
}
+ nfs_free_fattr(res.fattr);
+out:
dprintk("NFS reply access: %d\n", status);
return status;
}
@@ -229,7 +234,7 @@ static int nfs3_proc_access(struct inode *inode, struct nfs_access_entry *entry)
static int nfs3_proc_readlink(struct inode *inode, struct page *page,
unsigned int pgbase, unsigned int pglen)
{
- struct nfs_fattr fattr;
+ struct nfs_fattr *fattr;
struct nfs3_readlinkargs args = {
.fh = NFS_FH(inode),
.pgbase = pgbase,
@@ -239,14 +244,19 @@ static int nfs3_proc_readlink(struct inode *inode, struct page *page,
struct rpc_message msg = {
.rpc_proc = &nfs3_procedures[NFS3PROC_READLINK],
.rpc_argp = &args,
- .rpc_resp = &fattr,
};
- int status;
+ int status = -ENOMEM;
dprintk("NFS call readlink\n");
- nfs_fattr_init(&fattr);
+ fattr = nfs_alloc_fattr();
+ if (fattr == NULL)
+ goto out;
+ msg.rpc_resp = fattr;
+
status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
- nfs_refresh_inode(inode, &fattr);
+ nfs_refresh_inode(inode, fattr);
+ nfs_free_fattr(fattr);
+out:
dprintk("NFS reply readlink: %d\n", status);
return status;
}
@@ -396,12 +406,17 @@ nfs3_proc_remove(struct inode *dir, struct qstr *name)
.rpc_argp = &arg,
.rpc_resp = &res,
};
- int status;
+ int status = -ENOMEM;
dprintk("NFS call remove %s\n", name->name);
- nfs_fattr_init(&res.dir_attr);
+ res.dir_attr = nfs_alloc_fattr();
+ if (res.dir_attr == NULL)
+ goto out;
+
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
- nfs_post_op_update_inode(dir, &res.dir_attr);
+ nfs_post_op_update_inode(dir, res.dir_attr);
+ nfs_free_fattr(res.dir_attr);
+out:
dprintk("NFS reply remove: %d\n", status);
return status;
}
@@ -419,7 +434,7 @@ nfs3_proc_unlink_done(struct rpc_task *task, struct inode *dir)
if (nfs3_async_handle_jukebox(task, dir))
return 0;
res = task->tk_msg.rpc_resp;
- nfs_post_op_update_inode(dir, &res->dir_attr);
+ nfs_post_op_update_inode(dir, res->dir_attr);
return 1;
}
@@ -427,7 +442,6 @@ static int
nfs3_proc_rename(struct inode *old_dir, struct qstr *old_name,
struct inode *new_dir, struct qstr *new_name)
{
- struct nfs_fattr old_dir_attr, new_dir_attr;
struct nfs3_renameargs arg = {
.fromfh = NFS_FH(old_dir),
.fromname = old_name->name,
@@ -436,23 +450,27 @@ nfs3_proc_rename(struct inode *old_dir, struct qstr *old_name,
.toname = new_name->name,
.tolen = new_name->len
};
- struct nfs3_renameres res = {
- .fromattr = &old_dir_attr,
- .toattr = &new_dir_attr
- };
+ struct nfs3_renameres res;
struct rpc_message msg = {
.rpc_proc = &nfs3_procedures[NFS3PROC_RENAME],
.rpc_argp = &arg,
.rpc_resp = &res,
};
- int status;
+ int status = -ENOMEM;
dprintk("NFS call rename %s -> %s\n", old_name->name, new_name->name);
- nfs_fattr_init(&old_dir_attr);
- nfs_fattr_init(&new_dir_attr);
+
+ res.fromattr = nfs_alloc_fattr();
+ res.toattr = nfs_alloc_fattr();
+ if (res.fromattr == NULL || res.toattr == NULL)
+ goto out;
+
status = rpc_call_sync(NFS_CLIENT(old_dir), &msg, 0);
- nfs_post_op_update_inode(old_dir, &old_dir_attr);
- nfs_post_op_update_inode(new_dir, &new_dir_attr);
+ nfs_post_op_update_inode(old_dir, res.fromattr);
+ nfs_post_op_update_inode(new_dir, res.toattr);
+out:
+ nfs_free_fattr(res.toattr);
+ nfs_free_fattr(res.fromattr);
dprintk("NFS reply rename: %d\n", status);
return status;
}
@@ -460,30 +478,32 @@ nfs3_proc_rename(struct inode *old_dir, struct qstr *old_name,
static int
nfs3_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
{
- struct nfs_fattr dir_attr, fattr;
struct nfs3_linkargs arg = {
.fromfh = NFS_FH(inode),
.tofh = NFS_FH(dir),
.toname = name->name,
.tolen = name->len
};
- struct nfs3_linkres res = {
- .dir_attr = &dir_attr,
- .fattr = &fattr
- };
+ struct nfs3_linkres res;
struct rpc_message msg = {
.rpc_proc = &nfs3_procedures[NFS3PROC_LINK],
.rpc_argp = &arg,
.rpc_resp = &res,
};
- int status;
+ int status = -ENOMEM;
dprintk("NFS call link %s\n", name->name);
- nfs_fattr_init(&dir_attr);
- nfs_fattr_init(&fattr);
+ res.fattr = nfs_alloc_fattr();
+ res.dir_attr = nfs_alloc_fattr();
+ if (res.fattr == NULL || res.dir_attr == NULL)
+ goto out;
+
status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
- nfs_post_op_update_inode(dir, &dir_attr);
- nfs_post_op_update_inode(inode, &fattr);
+ nfs_post_op_update_inode(dir, res.dir_attr);
+ nfs_post_op_update_inode(inode, res.fattr);
+out:
+ nfs_free_fattr(res.dir_attr);
+ nfs_free_fattr(res.fattr);
dprintk("NFS reply link: %d\n", status);
return status;
}
@@ -554,7 +574,7 @@ out:
static int
nfs3_proc_rmdir(struct inode *dir, struct qstr *name)
{
- struct nfs_fattr dir_attr;
+ struct nfs_fattr *dir_attr;
struct nfs3_diropargs arg = {
.fh = NFS_FH(dir),
.name = name->name,
@@ -563,14 +583,19 @@ nfs3_proc_rmdir(struct inode *dir, struct qstr *name)
struct rpc_message msg = {
.rpc_proc = &nfs3_procedures[NFS3PROC_RMDIR],
.rpc_argp = &arg,
- .rpc_resp = &dir_attr,
};
- int status;
+ int status = -ENOMEM;
dprintk("NFS call rmdir %s\n", name->name);
- nfs_fattr_init(&dir_attr);
+ dir_attr = nfs_alloc_fattr();
+ if (dir_attr == NULL)
+ goto out;
+
+ msg.rpc_resp = dir_attr;
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
- nfs_post_op_update_inode(dir, &dir_attr);
+ nfs_post_op_update_inode(dir, dir_attr);
+ nfs_free_fattr(dir_attr);
+out:
dprintk("NFS reply rmdir: %d\n", status);
return status;
}
@@ -589,7 +614,6 @@ nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
u64 cookie, struct page *page, unsigned int count, int plus)
{
struct inode *dir = dentry->d_inode;
- struct nfs_fattr dir_attr;
__be32 *verf = NFS_COOKIEVERF(dir);
struct nfs3_readdirargs arg = {
.fh = NFS_FH(dir),
@@ -600,7 +624,6 @@ nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
.pages = &page
};
struct nfs3_readdirres res = {
- .dir_attr = &dir_attr,
.verf = verf,
.plus = plus
};
@@ -610,7 +633,7 @@ nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
.rpc_resp = &res,
.rpc_cred = cred
};
- int status;
+ int status = -ENOMEM;
if (plus)
msg.rpc_proc = &nfs3_procedures[NFS3PROC_READDIRPLUS];
@@ -618,12 +641,17 @@ nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
dprintk("NFS call readdir%s %d\n",
plus? "plus" : "", (unsigned int) cookie);
- nfs_fattr_init(&dir_attr);
+ res.dir_attr = nfs_alloc_fattr();
+ if (res.dir_attr == NULL)
+ goto out;
+
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
nfs_invalidate_atime(dir);
+ nfs_refresh_inode(dir, res.dir_attr);
- nfs_refresh_inode(dir, &dir_attr);
+ nfs_free_fattr(res.dir_attr);
+out:
dprintk("NFS reply readdir: %d\n", status);
return status;
}
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index 56a86f6..75dcfc7 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -762,7 +762,7 @@ nfs3_xdr_wccstat(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
static int
nfs3_xdr_removeres(struct rpc_rqst *req, __be32 *p, struct nfs_removeres *res)
{
- return nfs3_xdr_wccstat(req, p, &res->dir_attr);
+ return nfs3_xdr_wccstat(req, p, res->dir_attr);
}
/*
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index a187200..c538c61 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -206,14 +206,14 @@ extern ssize_t nfs4_listxattr(struct dentry *, char *, size_t);
/* nfs4proc.c */
-extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *);
-extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct rpc_cred *);
+extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *, struct nfs4_setclientid_res *);
+extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct nfs4_setclientid_res *arg, struct rpc_cred *);
extern int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred);
extern int nfs4_proc_async_renew(struct nfs_client *, struct rpc_cred *);
extern int nfs4_proc_renew(struct nfs_client *, struct rpc_cred *);
extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *);
extern int nfs41_init_clientid(struct nfs_client *, struct rpc_cred *);
-extern int nfs4_do_close(struct path *path, struct nfs4_state *state, int wait);
+extern int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, int wait);
extern struct dentry *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *);
extern int nfs4_open_revalidate(struct inode *, struct dentry *, int, struct nameidata *);
extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle);
@@ -286,7 +286,7 @@ extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t);
-extern struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter);
+extern struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask);
extern int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task);
extern void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid);
extern void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid);
diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
index f071d12..3c2a172 100644
--- a/fs/nfs/nfs4namespace.c
+++ b/fs/nfs/nfs4namespace.c
@@ -115,6 +115,7 @@ static struct vfsmount *try_location(struct nfs_clone_mount *mountdata,
char *page, char *page2,
const struct nfs4_fs_location *location)
{
+ const size_t addr_bufsize = sizeof(struct sockaddr_storage);
struct vfsmount *mnt = ERR_PTR(-ENOENT);
char *mnt_path;
unsigned int maxbuflen;
@@ -126,9 +127,12 @@ static struct vfsmount *try_location(struct nfs_clone_mount *mountdata,
mountdata->mnt_path = mnt_path;
maxbuflen = mnt_path - 1 - page2;
+ mountdata->addr = kmalloc(addr_bufsize, GFP_KERNEL);
+ if (mountdata->addr == NULL)
+ return ERR_PTR(-ENOMEM);
+
for (s = 0; s < location->nservers; s++) {
const struct nfs4_string *buf = &location->servers[s];
- struct sockaddr_storage addr;
if (buf->len <= 0 || buf->len >= maxbuflen)
continue;
@@ -137,11 +141,10 @@ static struct vfsmount *try_location(struct nfs_clone_mount *mountdata,
continue;
mountdata->addrlen = nfs_parse_server_name(buf->data, buf->len,
- (struct sockaddr *)&addr, sizeof(addr));
+ mountdata->addr, addr_bufsize);
if (mountdata->addrlen == 0)
continue;
- mountdata->addr = (struct sockaddr *)&addr;
rpc_set_port(mountdata->addr, NFS_PORT);
memcpy(page2, buf->data, buf->len);
@@ -156,6 +159,7 @@ static struct vfsmount *try_location(struct nfs_clone_mount *mountdata,
if (!IS_ERR(mnt))
break;
}
+ kfree(mountdata->addr);
return mnt;
}
@@ -221,8 +225,8 @@ out:
/*
* nfs_do_refmount - handle crossing a referral on server
+ * @mnt_parent - mountpoint of referral
* @dentry - dentry of referral
- * @nd - nameidata info
*
*/
struct vfsmount *nfs_do_refmount(const struct vfsmount *mnt_parent, struct dentry *dentry)
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 6380670..70015dd 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -70,6 +70,9 @@ static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinf
static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *);
static int _nfs4_proc_lookup(struct inode *dir, const struct qstr *name, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
+static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
+ struct nfs_fattr *fattr, struct iattr *sattr,
+ struct nfs4_state *state);
/* Prevent leaks of NFSv4 errors into userland */
static int nfs4_map_errors(int err)
@@ -714,17 +717,18 @@ static void nfs4_init_opendata_res(struct nfs4_opendata *p)
static struct nfs4_opendata *nfs4_opendata_alloc(struct path *path,
struct nfs4_state_owner *sp, fmode_t fmode, int flags,
- const struct iattr *attrs)
+ const struct iattr *attrs,
+ gfp_t gfp_mask)
{
struct dentry *parent = dget_parent(path->dentry);
struct inode *dir = parent->d_inode;
struct nfs_server *server = NFS_SERVER(dir);
struct nfs4_opendata *p;
- p = kzalloc(sizeof(*p), GFP_KERNEL);
+ p = kzalloc(sizeof(*p), gfp_mask);
if (p == NULL)
goto err;
- p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid);
+ p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid, gfp_mask);
if (p->o_arg.seqid == NULL)
goto err_free;
path_get(path);
@@ -1060,7 +1064,7 @@ static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context
{
struct nfs4_opendata *opendata;
- opendata = nfs4_opendata_alloc(&ctx->path, state->owner, 0, 0, NULL);
+ opendata = nfs4_opendata_alloc(&ctx->path, state->owner, 0, 0, NULL, GFP_NOFS);
if (opendata == NULL)
return ERR_PTR(-ENOMEM);
opendata->state = state;
@@ -1648,7 +1652,7 @@ static int _nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, in
if (path->dentry->d_inode != NULL)
nfs4_return_incompatible_delegation(path->dentry->d_inode, fmode);
status = -ENOMEM;
- opendata = nfs4_opendata_alloc(path, sp, fmode, flags, sattr);
+ opendata = nfs4_opendata_alloc(path, sp, fmode, flags, sattr, GFP_KERNEL);
if (opendata == NULL)
goto err_put_state_owner;
@@ -1659,15 +1663,24 @@ static int _nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, in
if (status != 0)
goto err_opendata_put;
- if (opendata->o_arg.open_flags & O_EXCL)
- nfs4_exclusive_attrset(opendata, sattr);
-
state = nfs4_opendata_to_nfs4_state(opendata);
status = PTR_ERR(state);
if (IS_ERR(state))
goto err_opendata_put;
if (server->caps & NFS_CAP_POSIX_LOCK)
set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
+
+ if (opendata->o_arg.open_flags & O_EXCL) {
+ nfs4_exclusive_attrset(opendata, sattr);
+
+ nfs_fattr_init(opendata->o_res.f_attr);
+ status = nfs4_do_setattr(state->inode, cred,
+ opendata->o_res.f_attr, sattr,
+ state);
+ if (status == 0)
+ nfs_setattr_update_inode(state->inode, sattr);
+ nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr);
+ }
nfs4_opendata_put(opendata);
nfs4_put_state_owner(sp);
*res = state;
@@ -1914,7 +1927,7 @@ static const struct rpc_call_ops nfs4_close_ops = {
*
* NOTE: Caller must be holding the sp->so_owner semaphore!
*/
-int nfs4_do_close(struct path *path, struct nfs4_state *state, int wait)
+int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, int wait)
{
struct nfs_server *server = NFS_SERVER(state->inode);
struct nfs4_closedata *calldata;
@@ -1933,7 +1946,7 @@ int nfs4_do_close(struct path *path, struct nfs4_state *state, int wait)
};
int status = -ENOMEM;
- calldata = kzalloc(sizeof(*calldata), GFP_KERNEL);
+ calldata = kzalloc(sizeof(*calldata), gfp_mask);
if (calldata == NULL)
goto out;
calldata->inode = state->inode;
@@ -1941,7 +1954,7 @@ int nfs4_do_close(struct path *path, struct nfs4_state *state, int wait)
calldata->arg.fh = NFS_FH(state->inode);
calldata->arg.stateid = &state->open_stateid;
/* Serialization for the sequence id */
- calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid);
+ calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid, gfp_mask);
if (calldata->arg.seqid == NULL)
goto out_free_calldata;
calldata->arg.fmode = 0;
@@ -2404,14 +2417,12 @@ static int nfs4_proc_lookup(struct inode *dir, struct qstr *name, struct nfs_fh
static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
{
struct nfs_server *server = NFS_SERVER(inode);
- struct nfs_fattr fattr;
struct nfs4_accessargs args = {
.fh = NFS_FH(inode),
.bitmask = server->attr_bitmask,
};
struct nfs4_accessres res = {
.server = server,
- .fattr = &fattr,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
@@ -2438,7 +2449,11 @@ static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry
if (mode & MAY_EXEC)
args.access |= NFS4_ACCESS_EXECUTE;
}
- nfs_fattr_init(&fattr);
+
+ res.fattr = nfs_alloc_fattr();
+ if (res.fattr == NULL)
+ return -ENOMEM;
+
status = nfs4_call_sync(server, &msg, &args, &res, 0);
if (!status) {
entry->mask = 0;
@@ -2448,8 +2463,9 @@ static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry
entry->mask |= MAY_WRITE;
if (res.access & (NFS4_ACCESS_LOOKUP|NFS4_ACCESS_EXECUTE))
entry->mask |= MAY_EXEC;
- nfs_refresh_inode(inode, &fattr);
+ nfs_refresh_inode(inode, res.fattr);
}
+ nfs_free_fattr(res.fattr);
return status;
}
@@ -2562,13 +2578,6 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
}
d_add(dentry, igrab(state->inode));
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
- if (flags & O_EXCL) {
- struct nfs_fattr fattr;
- status = nfs4_do_setattr(state->inode, cred, &fattr, sattr, state);
- if (status == 0)
- nfs_setattr_update_inode(state->inode, sattr);
- nfs_post_op_update_inode(state->inode, &fattr);
- }
if (status == 0 && (nd->flags & LOOKUP_OPEN) != 0)
status = nfs4_intent_set_file(nd, &path, state, fmode);
else
@@ -2596,14 +2605,19 @@ static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
.rpc_argp = &args,
.rpc_resp = &res,
};
- int status;
+ int status = -ENOMEM;
+
+ res.dir_attr = nfs_alloc_fattr();
+ if (res.dir_attr == NULL)
+ goto out;
- nfs_fattr_init(&res.dir_attr);
status = nfs4_call_sync(server, &msg, &args, &res, 1);
if (status == 0) {
update_changeattr(dir, &res.cinfo);
- nfs_post_op_update_inode(dir, &res.dir_attr);
+ nfs_post_op_update_inode(dir, res.dir_attr);
}
+ nfs_free_fattr(res.dir_attr);
+out:
return status;
}
@@ -2638,7 +2652,7 @@ static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
return 0;
update_changeattr(dir, &res->cinfo);
- nfs_post_op_update_inode(dir, &res->dir_attr);
+ nfs_post_op_update_inode(dir, res->dir_attr);
return 1;
}
@@ -2653,29 +2667,31 @@ static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
.new_name = new_name,
.bitmask = server->attr_bitmask,
};
- struct nfs_fattr old_fattr, new_fattr;
struct nfs4_rename_res res = {
.server = server,
- .old_fattr = &old_fattr,
- .new_fattr = &new_fattr,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME],
.rpc_argp = &arg,
.rpc_resp = &res,
};
- int status;
+ int status = -ENOMEM;
- nfs_fattr_init(res.old_fattr);
- nfs_fattr_init(res.new_fattr);
- status = nfs4_call_sync(server, &msg, &arg, &res, 1);
+ res.old_fattr = nfs_alloc_fattr();
+ res.new_fattr = nfs_alloc_fattr();
+ if (res.old_fattr == NULL || res.new_fattr == NULL)
+ goto out;
+ status = nfs4_call_sync(server, &msg, &arg, &res, 1);
if (!status) {
update_changeattr(old_dir, &res.old_cinfo);
nfs_post_op_update_inode(old_dir, res.old_fattr);
update_changeattr(new_dir, &res.new_cinfo);
nfs_post_op_update_inode(new_dir, res.new_fattr);
}
+out:
+ nfs_free_fattr(res.new_fattr);
+ nfs_free_fattr(res.old_fattr);
return status;
}
@@ -2702,28 +2718,30 @@ static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *
.name = name,
.bitmask = server->attr_bitmask,
};
- struct nfs_fattr fattr, dir_attr;
struct nfs4_link_res res = {
.server = server,
- .fattr = &fattr,
- .dir_attr = &dir_attr,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
.rpc_argp = &arg,
.rpc_resp = &res,
};
- int status;
+ int status = -ENOMEM;
+
+ res.fattr = nfs_alloc_fattr();
+ res.dir_attr = nfs_alloc_fattr();
+ if (res.fattr == NULL || res.dir_attr == NULL)
+ goto out;
- nfs_fattr_init(res.fattr);
- nfs_fattr_init(res.dir_attr);
status = nfs4_call_sync(server, &msg, &arg, &res, 1);
if (!status) {
update_changeattr(dir, &res.cinfo);
nfs_post_op_update_inode(dir, res.dir_attr);
nfs_post_op_update_inode(inode, res.fattr);
}
-
+out:
+ nfs_free_fattr(res.dir_attr);
+ nfs_free_fattr(res.fattr);
return status;
}
@@ -3146,23 +3164,31 @@ static void nfs4_proc_commit_setup(struct nfs_write_data *data, struct rpc_messa
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
}
+struct nfs4_renewdata {
+ struct nfs_client *client;
+ unsigned long timestamp;
+};
+
/*
* nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
* standalone procedure for queueing an asynchronous RENEW.
*/
-static void nfs4_renew_release(void *data)
+static void nfs4_renew_release(void *calldata)
{
- struct nfs_client *clp = data;
+ struct nfs4_renewdata *data = calldata;
+ struct nfs_client *clp = data->client;
if (atomic_read(&clp->cl_count) > 1)
nfs4_schedule_state_renewal(clp);
nfs_put_client(clp);
+ kfree(data);
}
-static void nfs4_renew_done(struct rpc_task *task, void *data)
+static void nfs4_renew_done(struct rpc_task *task, void *calldata)
{
- struct nfs_client *clp = data;
- unsigned long timestamp = task->tk_start;
+ struct nfs4_renewdata *data = calldata;
+ struct nfs_client *clp = data->client;
+ unsigned long timestamp = data->timestamp;
if (task->tk_status < 0) {
/* Unless we're shutting down, schedule state recovery! */
@@ -3188,11 +3214,17 @@ int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred)
.rpc_argp = clp,
.rpc_cred = cred,
};
+ struct nfs4_renewdata *data;
if (!atomic_inc_not_zero(&clp->cl_count))
return -EIO;
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+ data->client = clp;
+ data->timestamp = jiffies;
return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT,
- &nfs4_renew_ops, clp);
+ &nfs4_renew_ops, data);
}
int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
@@ -3494,7 +3526,9 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
return _nfs4_async_handle_error(task, server, server->nfs_client, state);
}
-int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, unsigned short port, struct rpc_cred *cred)
+int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
+ unsigned short port, struct rpc_cred *cred,
+ struct nfs4_setclientid_res *res)
{
nfs4_verifier sc_verifier;
struct nfs4_setclientid setclientid = {
@@ -3504,7 +3538,7 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, unsigned short po
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
.rpc_argp = &setclientid,
- .rpc_resp = clp,
+ .rpc_resp = res,
.rpc_cred = cred,
};
__be32 *p;
@@ -3547,12 +3581,14 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, unsigned short po
return status;
}
-static int _nfs4_proc_setclientid_confirm(struct nfs_client *clp, struct rpc_cred *cred)
+static int _nfs4_proc_setclientid_confirm(struct nfs_client *clp,
+ struct nfs4_setclientid_res *arg,
+ struct rpc_cred *cred)
{
struct nfs_fsinfo fsinfo;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
- .rpc_argp = clp,
+ .rpc_argp = arg,
.rpc_resp = &fsinfo,
.rpc_cred = cred,
};
@@ -3570,12 +3606,14 @@ static int _nfs4_proc_setclientid_confirm(struct nfs_client *clp, struct rpc_cre
return status;
}
-int nfs4_proc_setclientid_confirm(struct nfs_client *clp, struct rpc_cred *cred)
+int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
+ struct nfs4_setclientid_res *arg,
+ struct rpc_cred *cred)
{
long timeout = 0;
int err;
do {
- err = _nfs4_proc_setclientid_confirm(clp, cred);
+ err = _nfs4_proc_setclientid_confirm(clp, arg, cred);
switch (err) {
case 0:
return err;
@@ -3667,7 +3705,7 @@ static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, co
};
int status = 0;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_NOFS);
if (data == NULL)
return -ENOMEM;
data->args.fhandle = &data->fh;
@@ -3823,7 +3861,7 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
struct nfs4_unlockdata *p;
struct inode *inode = lsp->ls_state->inode;
- p = kzalloc(sizeof(*p), GFP_KERNEL);
+ p = kzalloc(sizeof(*p), GFP_NOFS);
if (p == NULL)
return NULL;
p->arg.fh = NFS_FH(inode);
@@ -3961,7 +3999,7 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *
if (test_bit(NFS_DELEGATED_STATE, &state->flags))
goto out;
lsp = request->fl_u.nfs4_fl.owner;
- seqid = nfs_alloc_seqid(&lsp->ls_seqid);
+ seqid = nfs_alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
status = -ENOMEM;
if (seqid == NULL)
goto out;
@@ -3989,22 +4027,23 @@ struct nfs4_lockdata {
};
static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
- struct nfs_open_context *ctx, struct nfs4_lock_state *lsp)
+ struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
+ gfp_t gfp_mask)
{
struct nfs4_lockdata *p;
struct inode *inode = lsp->ls_state->inode;
struct nfs_server *server = NFS_SERVER(inode);
- p = kzalloc(sizeof(*p), GFP_KERNEL);
+ p = kzalloc(sizeof(*p), gfp_mask);
if (p == NULL)
return NULL;
p->arg.fh = NFS_FH(inode);
p->arg.fl = &p->fl;
- p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid);
+ p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
if (p->arg.open_seqid == NULL)
goto out_free;
- p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid);
+ p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid, gfp_mask);
if (p->arg.lock_seqid == NULL)
goto out_free_seqid;
p->arg.lock_stateid = &lsp->ls_stateid;
@@ -4158,7 +4197,8 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f
dprintk("%s: begin!\n", __func__);
data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
- fl->fl_u.nfs4_fl.owner);
+ fl->fl_u.nfs4_fl.owner,
+ recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS);
if (data == NULL)
return -ENOMEM;
if (IS_SETLKW(cmd))
@@ -4647,7 +4687,7 @@ static int nfs4_reset_slot_table(struct nfs4_slot_table *tbl, u32 max_reqs,
if (max_reqs != tbl->max_slots) {
ret = -ENOMEM;
new = kmalloc(max_reqs * sizeof(struct nfs4_slot),
- GFP_KERNEL);
+ GFP_NOFS);
if (!new)
goto out;
ret = 0;
@@ -4712,7 +4752,7 @@ static int nfs4_init_slot_table(struct nfs4_slot_table *tbl,
dprintk("--> %s: max_reqs=%u\n", __func__, max_slots);
- slot = kcalloc(max_slots, sizeof(struct nfs4_slot), GFP_KERNEL);
+ slot = kcalloc(max_slots, sizeof(struct nfs4_slot), GFP_NOFS);
if (!slot)
goto out;
ret = 0;
@@ -4761,7 +4801,7 @@ struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
struct nfs4_session *session;
struct nfs4_slot_table *tbl;
- session = kzalloc(sizeof(struct nfs4_session), GFP_KERNEL);
+ session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
if (!session)
return NULL;
@@ -5105,8 +5145,8 @@ static int nfs41_proc_async_sequence(struct nfs_client *clp,
if (!atomic_inc_not_zero(&clp->cl_count))
return -EIO;
- args = kzalloc(sizeof(*args), GFP_KERNEL);
- res = kzalloc(sizeof(*res), GFP_KERNEL);
+ args = kzalloc(sizeof(*args), GFP_NOFS);
+ res = kzalloc(sizeof(*res), GFP_NOFS);
if (!args || !res) {
kfree(args);
kfree(res);
@@ -5207,7 +5247,7 @@ static int nfs41_proc_reclaim_complete(struct nfs_client *clp)
int status = -ENOMEM;
dprintk("--> %s\n", __func__);
- calldata = kzalloc(sizeof(*calldata), GFP_KERNEL);
+ calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
if (calldata == NULL)
goto out;
calldata->clp = clp;
@@ -5218,9 +5258,12 @@ static int nfs41_proc_reclaim_complete(struct nfs_client *clp)
msg.rpc_resp = &calldata->res;
task_setup_data.callback_data = calldata;
task = rpc_run_task(&task_setup_data);
- if (IS_ERR(task))
+ if (IS_ERR(task)) {
status = PTR_ERR(task);
+ goto out;
+ }
rpc_put_task(task);
+ return 0;
out:
dprintk("<-- %s status=%d\n", __func__, status);
return status;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 6c5ed51..34acf59 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -62,6 +62,7 @@ static LIST_HEAD(nfs4_clientid_list);
int nfs4_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
{
+ struct nfs4_setclientid_res clid;
unsigned short port;
int status;
@@ -69,11 +70,15 @@ int nfs4_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
if (clp->cl_addr.ss_family == AF_INET6)
port = nfs_callback_tcpport6;
- status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, port, cred);
- if (status == 0)
- status = nfs4_proc_setclientid_confirm(clp, cred);
- if (status == 0)
- nfs4_schedule_state_renewal(clp);
+ status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, port, cred, &clid);
+ if (status != 0)
+ goto out;
+ status = nfs4_proc_setclientid_confirm(clp, &clid, cred);
+ if (status != 0)
+ goto out;
+ clp->cl_clientid = clid.clientid;
+ nfs4_schedule_state_renewal(clp);
+out:
return status;
}
@@ -361,7 +366,7 @@ nfs4_alloc_state_owner(void)
{
struct nfs4_state_owner *sp;
- sp = kzalloc(sizeof(*sp),GFP_KERNEL);
+ sp = kzalloc(sizeof(*sp),GFP_NOFS);
if (!sp)
return NULL;
spin_lock_init(&sp->so_lock);
@@ -435,7 +440,7 @@ nfs4_alloc_open_state(void)
{
struct nfs4_state *state;
- state = kzalloc(sizeof(*state), GFP_KERNEL);
+ state = kzalloc(sizeof(*state), GFP_NOFS);
if (!state)
return NULL;
atomic_set(&state->count, 1);
@@ -537,7 +542,8 @@ void nfs4_put_open_state(struct nfs4_state *state)
/*
* Close the current file.
*/
-static void __nfs4_close(struct path *path, struct nfs4_state *state, fmode_t fmode, int wait)
+static void __nfs4_close(struct path *path, struct nfs4_state *state,
+ fmode_t fmode, gfp_t gfp_mask, int wait)
{
struct nfs4_state_owner *owner = state->owner;
int call_close = 0;
@@ -578,17 +584,17 @@ static void __nfs4_close(struct path *path, struct nfs4_state *state, fmode_t fm
nfs4_put_open_state(state);
nfs4_put_state_owner(owner);
} else
- nfs4_do_close(path, state, wait);
+ nfs4_do_close(path, state, gfp_mask, wait);
}
void nfs4_close_state(struct path *path, struct nfs4_state *state, fmode_t fmode)
{
- __nfs4_close(path, state, fmode, 0);
+ __nfs4_close(path, state, fmode, GFP_NOFS, 0);
}
void nfs4_close_sync(struct path *path, struct nfs4_state *state, fmode_t fmode)
{
- __nfs4_close(path, state, fmode, 1);
+ __nfs4_close(path, state, fmode, GFP_KERNEL, 1);
}
/*
@@ -618,7 +624,7 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
struct nfs4_lock_state *lsp;
struct nfs_client *clp = state->owner->so_client;
- lsp = kzalloc(sizeof(*lsp), GFP_KERNEL);
+ lsp = kzalloc(sizeof(*lsp), GFP_NOFS);
if (lsp == NULL)
return NULL;
rpc_init_wait_queue(&lsp->ls_sequence.wait, "lock_seqid_waitqueue");
@@ -754,11 +760,11 @@ void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t f
nfs4_put_lock_state(lsp);
}
-struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter)
+struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask)
{
struct nfs_seqid *new;
- new = kmalloc(sizeof(*new), GFP_KERNEL);
+ new = kmalloc(sizeof(*new), gfp_mask);
if (new != NULL) {
new->sequence = counter;
INIT_LIST_HEAD(&new->list);
@@ -1347,7 +1353,7 @@ static int nfs4_recall_slot(struct nfs_client *clp)
nfs4_begin_drain_session(clp);
new = kmalloc(fc_tbl->target_max_slots * sizeof(struct nfs4_slot),
- GFP_KERNEL);
+ GFP_NOFS);
if (!new)
return -ENOMEM;
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 38f3b58..6bdef28 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -1504,14 +1504,14 @@ static void encode_setclientid(struct xdr_stream *xdr, const struct nfs4_setclie
hdr->replen += decode_setclientid_maxsz;
}
-static void encode_setclientid_confirm(struct xdr_stream *xdr, const struct nfs_client *client_state, struct compound_hdr *hdr)
+static void encode_setclientid_confirm(struct xdr_stream *xdr, const struct nfs4_setclientid_res *arg, struct compound_hdr *hdr)
{
__be32 *p;
p = reserve_space(xdr, 12 + NFS4_VERIFIER_SIZE);
*p++ = cpu_to_be32(OP_SETCLIENTID_CONFIRM);
- p = xdr_encode_hyper(p, client_state->cl_clientid);
- xdr_encode_opaque_fixed(p, client_state->cl_confirm.data, NFS4_VERIFIER_SIZE);
+ p = xdr_encode_hyper(p, arg->clientid);
+ xdr_encode_opaque_fixed(p, arg->confirm.data, NFS4_VERIFIER_SIZE);
hdr->nops++;
hdr->replen += decode_setclientid_confirm_maxsz;
}
@@ -2324,7 +2324,7 @@ static int nfs4_xdr_enc_setclientid(struct rpc_rqst *req, __be32 *p, struct nfs4
/*
* a SETCLIENTID_CONFIRM request
*/
-static int nfs4_xdr_enc_setclientid_confirm(struct rpc_rqst *req, __be32 *p, struct nfs_client *clp)
+static int nfs4_xdr_enc_setclientid_confirm(struct rpc_rqst *req, __be32 *p, struct nfs4_setclientid_res *arg)
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
@@ -2334,7 +2334,7 @@ static int nfs4_xdr_enc_setclientid_confirm(struct rpc_rqst *req, __be32 *p, str
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
encode_compound_hdr(&xdr, req, &hdr);
- encode_setclientid_confirm(&xdr, clp, &hdr);
+ encode_setclientid_confirm(&xdr, arg, &hdr);
encode_putrootfh(&xdr, &hdr);
encode_fsinfo(&xdr, lease_bitmap, &hdr);
encode_nops(&hdr);
@@ -4397,7 +4397,7 @@ out_overflow:
return -EIO;
}
-static int decode_setclientid(struct xdr_stream *xdr, struct nfs_client *clp)
+static int decode_setclientid(struct xdr_stream *xdr, struct nfs4_setclientid_res *res)
{
__be32 *p;
uint32_t opnum;
@@ -4417,8 +4417,8 @@ static int decode_setclientid(struct xdr_stream *xdr, struct nfs_client *clp)
p = xdr_inline_decode(xdr, 8 + NFS4_VERIFIER_SIZE);
if (unlikely(!p))
goto out_overflow;
- p = xdr_decode_hyper(p, &clp->cl_clientid);
- memcpy(clp->cl_confirm.data, p, NFS4_VERIFIER_SIZE);
+ p = xdr_decode_hyper(p, &res->clientid);
+ memcpy(res->confirm.data, p, NFS4_VERIFIER_SIZE);
} else if (nfserr == NFSERR_CLID_INUSE) {
uint32_t len;
@@ -4815,7 +4815,7 @@ static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, __be32 *p, struct nfs_rem
goto out;
if ((status = decode_remove(&xdr, &res->cinfo)) != 0)
goto out;
- decode_getfattr(&xdr, &res->dir_attr, res->server,
+ decode_getfattr(&xdr, res->dir_attr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
@@ -5498,7 +5498,7 @@ static int nfs4_xdr_dec_renew(struct rpc_rqst *rqstp, __be32 *p, void *dummy)
* Decode SETCLIENTID response
*/
static int nfs4_xdr_dec_setclientid(struct rpc_rqst *req, __be32 *p,
- struct nfs_client *clp)
+ struct nfs4_setclientid_res *res)
{
struct xdr_stream xdr;
struct compound_hdr hdr;
@@ -5507,7 +5507,7 @@ static int nfs4_xdr_dec_setclientid(struct rpc_rqst *req, __be32 *p,
xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
status = decode_compound_hdr(&xdr, &hdr);
if (!status)
- status = decode_setclientid(&xdr, clp);
+ status = decode_setclientid(&xdr, res);
return status;
}
diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c
index 8c55b27..6bd19d8 100644
--- a/fs/nfs/nfsroot.c
+++ b/fs/nfs/nfsroot.c
@@ -488,7 +488,6 @@ static int __init root_nfs_ports(void)
*/
static int __init root_nfs_get_handle(void)
{
- struct nfs_fh fh;
struct sockaddr_in sin;
unsigned int auth_flav_len = 0;
struct nfs_mount_request request = {
@@ -499,21 +498,24 @@ static int __init root_nfs_get_handle(void)
NFS_MNT3_VERSION : NFS_MNT_VERSION,
.protocol = (nfs_data.flags & NFS_MOUNT_TCP) ?
XPRT_TRANSPORT_TCP : XPRT_TRANSPORT_UDP,
- .fh = &fh,
.auth_flav_len = &auth_flav_len,
};
- int status;
+ int status = -ENOMEM;
+ request.fh = nfs_alloc_fhandle();
+ if (!request.fh)
+ goto out;
set_sockaddr(&sin, servaddr, htons(mount_port));
status = nfs_mount(&request);
if (status < 0)
printk(KERN_ERR "Root-NFS: Server returned error %d "
"while mounting %s\n", status, nfs_export_path);
else {
- nfs_data.root.size = fh.size;
- memcpy(nfs_data.root.data, fh.data, fh.size);
+ nfs_data.root.size = request.fh->size;
+ memcpy(&nfs_data.root.data, request.fh->data, request.fh->size);
}
-
+ nfs_free_fhandle(request.fh);
+out:
return status;
}
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 29d9d36..a3654e5 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -60,16 +60,10 @@ nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
{
struct nfs_page *req;
- for (;;) {
- /* try to allocate the request struct */
- req = nfs_page_alloc();
- if (req != NULL)
- break;
-
- if (fatal_signal_pending(current))
- return ERR_PTR(-ERESTARTSYS);
- yield();
- }
+ /* try to allocate the request struct */
+ req = nfs_page_alloc();
+ if (req == NULL)
+ return ERR_PTR(-ENOMEM);
/* Initialize the request struct. Initially, we assume a
* long write-back delay. This will be adjusted in
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index 0288be8..611bec2 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -224,35 +224,60 @@ static int nfs_proc_readlink(struct inode *inode, struct page *page,
return status;
}
+struct nfs_createdata {
+ struct nfs_createargs arg;
+ struct nfs_diropok res;
+ struct nfs_fh fhandle;
+ struct nfs_fattr fattr;
+};
+
+static struct nfs_createdata *nfs_alloc_createdata(struct inode *dir,
+ struct dentry *dentry, struct iattr *sattr)
+{
+ struct nfs_createdata *data;
+
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+
+ if (data != NULL) {
+ data->arg.fh = NFS_FH(dir);
+ data->arg.name = dentry->d_name.name;
+ data->arg.len = dentry->d_name.len;
+ data->arg.sattr = sattr;
+ nfs_fattr_init(&data->fattr);
+ data->fhandle.size = 0;
+ data->res.fh = &data->fhandle;
+ data->res.fattr = &data->fattr;
+ }
+ return data;
+};
+
+static void nfs_free_createdata(const struct nfs_createdata *data)
+{
+ kfree(data);
+}
+
static int
nfs_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
int flags, struct nameidata *nd)
{
- struct nfs_fh fhandle;
- struct nfs_fattr fattr;
- struct nfs_createargs arg = {
- .fh = NFS_FH(dir),
- .name = dentry->d_name.name,
- .len = dentry->d_name.len,
- .sattr = sattr
- };
- struct nfs_diropok res = {
- .fh = &fhandle,
- .fattr = &fattr
- };
+ struct nfs_createdata *data;
struct rpc_message msg = {
.rpc_proc = &nfs_procedures[NFSPROC_CREATE],
- .rpc_argp = &arg,
- .rpc_resp = &res,
};
- int status;
+ int status = -ENOMEM;
- nfs_fattr_init(&fattr);
dprintk("NFS call create %s\n", dentry->d_name.name);
+ data = nfs_alloc_createdata(dir, dentry, sattr);
+ if (data == NULL)
+ goto out;
+ msg.rpc_argp = &data->arg;
+ msg.rpc_resp = &data->res;
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
nfs_mark_for_revalidate(dir);
if (status == 0)
- status = nfs_instantiate(dentry, &fhandle, &fattr);
+ status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
+ nfs_free_createdata(data);
+out:
dprintk("NFS reply create: %d\n", status);
return status;
}
@@ -264,24 +289,12 @@ static int
nfs_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
dev_t rdev)
{
- struct nfs_fh fhandle;
- struct nfs_fattr fattr;
- struct nfs_createargs arg = {
- .fh = NFS_FH(dir),
- .name = dentry->d_name.name,
- .len = dentry->d_name.len,
- .sattr = sattr
- };
- struct nfs_diropok res = {
- .fh = &fhandle,
- .fattr = &fattr
- };
+ struct nfs_createdata *data;
struct rpc_message msg = {
.rpc_proc = &nfs_procedures[NFSPROC_CREATE],
- .rpc_argp = &arg,
- .rpc_resp = &res,
};
- int status, mode;
+ umode_t mode;
+ int status = -ENOMEM;
dprintk("NFS call mknod %s\n", dentry->d_name.name);
@@ -294,17 +307,24 @@ nfs_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
sattr->ia_size = new_encode_dev(rdev);/* get out your barf bag */
}
- nfs_fattr_init(&fattr);
+ data = nfs_alloc_createdata(dir, dentry, sattr);
+ if (data == NULL)
+ goto out;
+ msg.rpc_argp = &data->arg;
+ msg.rpc_resp = &data->res;
+
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
nfs_mark_for_revalidate(dir);
if (status == -EINVAL && S_ISFIFO(mode)) {
sattr->ia_mode = mode;
- nfs_fattr_init(&fattr);
+ nfs_fattr_init(data->res.fattr);
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
}
if (status == 0)
- status = nfs_instantiate(dentry, &fhandle, &fattr);
+ status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
+ nfs_free_createdata(data);
+out:
dprintk("NFS reply mknod: %d\n", status);
return status;
}
@@ -398,8 +418,8 @@ static int
nfs_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page,
unsigned int len, struct iattr *sattr)
{
- struct nfs_fh fhandle;
- struct nfs_fattr fattr;
+ struct nfs_fh *fh;
+ struct nfs_fattr *fattr;
struct nfs_symlinkargs arg = {
.fromfh = NFS_FH(dir),
.fromname = dentry->d_name.name,
@@ -412,12 +432,18 @@ nfs_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page,
.rpc_proc = &nfs_procedures[NFSPROC_SYMLINK],
.rpc_argp = &arg,
};
- int status;
+ int status = -ENAMETOOLONG;
+
+ dprintk("NFS call symlink %s\n", dentry->d_name.name);
if (len > NFS2_MAXPATHLEN)
- return -ENAMETOOLONG;
+ goto out;
- dprintk("NFS call symlink %s\n", dentry->d_name.name);
+ fh = nfs_alloc_fhandle();
+ fattr = nfs_alloc_fattr();
+ status = -ENOMEM;
+ if (fh == NULL || fattr == NULL)
+ goto out;
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
nfs_mark_for_revalidate(dir);
@@ -427,12 +453,12 @@ nfs_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page,
* filehandle size to zero indicates to nfs_instantiate that it
* should fill in the data with a LOOKUP call on the wire.
*/
- if (status == 0) {
- nfs_fattr_init(&fattr);
- fhandle.size = 0;
- status = nfs_instantiate(dentry, &fhandle, &fattr);
- }
+ if (status == 0)
+ status = nfs_instantiate(dentry, fh, fattr);
+ nfs_free_fattr(fattr);
+ nfs_free_fhandle(fh);
+out:
dprintk("NFS reply symlink: %d\n", status);
return status;
}
@@ -440,31 +466,25 @@ nfs_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page,
static int
nfs_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr)
{
- struct nfs_fh fhandle;
- struct nfs_fattr fattr;
- struct nfs_createargs arg = {
- .fh = NFS_FH(dir),
- .name = dentry->d_name.name,
- .len = dentry->d_name.len,
- .sattr = sattr
- };
- struct nfs_diropok res = {
- .fh = &fhandle,
- .fattr = &fattr
- };
+ struct nfs_createdata *data;
struct rpc_message msg = {
.rpc_proc = &nfs_procedures[NFSPROC_MKDIR],
- .rpc_argp = &arg,
- .rpc_resp = &res,
};
- int status;
+ int status = -ENOMEM;
dprintk("NFS call mkdir %s\n", dentry->d_name.name);
- nfs_fattr_init(&fattr);
+ data = nfs_alloc_createdata(dir, dentry, sattr);
+ if (data == NULL)
+ goto out;
+ msg.rpc_argp = &data->arg;
+ msg.rpc_resp = &data->res;
+
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
nfs_mark_for_revalidate(dir);
if (status == 0)
- status = nfs_instantiate(dentry, &fhandle, &fattr);
+ status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
+ nfs_free_createdata(data);
+out:
dprintk("NFS reply mkdir: %d\n", status);
return status;
}
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index db9b360..6e2b06e 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -40,7 +40,7 @@ static mempool_t *nfs_rdata_mempool;
struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount)
{
- struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, GFP_NOFS);
+ struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, GFP_KERNEL);
if (p) {
memset(p, 0, sizeof(*p));
@@ -50,7 +50,7 @@ struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount)
if (pagecount <= ARRAY_SIZE(p->page_array))
p->pagevec = p->page_array;
else {
- p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
+ p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
if (!p->pagevec) {
mempool_free(p, nfs_rdata_mempool);
p = NULL;
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index e016372..2f8b1157 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -141,7 +141,6 @@ static const match_table_t nfs_mount_option_tokens = {
{ Opt_resvport, "resvport" },
{ Opt_noresvport, "noresvport" },
{ Opt_fscache, "fsc" },
- { Opt_fscache_uniq, "fsc=%s" },
{ Opt_nofscache, "nofsc" },
{ Opt_port, "port=%s" },
@@ -171,6 +170,7 @@ static const match_table_t nfs_mount_option_tokens = {
{ Opt_mountaddr, "mountaddr=%s" },
{ Opt_lookupcache, "lookupcache=%s" },
+ { Opt_fscache_uniq, "fsc=%s" },
{ Opt_err, NULL }
};
@@ -423,15 +423,19 @@ static int nfs_statfs(struct dentry *dentry, struct kstatfs *buf)
unsigned char blockbits;
unsigned long blockres;
struct nfs_fh *fh = NFS_FH(dentry->d_inode);
- struct nfs_fattr fattr;
- struct nfs_fsstat res = {
- .fattr = &fattr,
- };
- int error;
+ struct nfs_fsstat res;
+ int error = -ENOMEM;
+
+ res.fattr = nfs_alloc_fattr();
+ if (res.fattr == NULL)
+ goto out_err;
error = server->nfs_client->rpc_ops->statfs(server, fh, &res);
+
+ nfs_free_fattr(res.fattr);
if (error < 0)
goto out_err;
+
buf->f_type = NFS_SUPER_MAGIC;
/*
@@ -1046,14 +1050,6 @@ static int nfs_parse_mount_options(char *raw,
kfree(mnt->fscache_uniq);
mnt->fscache_uniq = NULL;
break;
- case Opt_fscache_uniq:
- string = match_strdup(args);
- if (!string)
- goto out_nomem;
- kfree(mnt->fscache_uniq);
- mnt->fscache_uniq = string;
- mnt->options |= NFS_OPTION_FSCACHE;
- break;
/*
* options that take numeric values
@@ -1384,6 +1380,14 @@ static int nfs_parse_mount_options(char *raw,
return 0;
};
break;
+ case Opt_fscache_uniq:
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ kfree(mnt->fscache_uniq);
+ mnt->fscache_uniq = string;
+ mnt->options |= NFS_OPTION_FSCACHE;
+ break;
/*
* Special options
@@ -2172,7 +2176,7 @@ static int nfs_get_sb(struct file_system_type *fs_type,
int error = -ENOMEM;
data = nfs_alloc_parsed_mount_data(3);
- mntfh = kzalloc(sizeof(*mntfh), GFP_KERNEL);
+ mntfh = nfs_alloc_fhandle();
if (data == NULL || mntfh == NULL)
goto out_free_fh;
@@ -2187,6 +2191,7 @@ static int nfs_get_sb(struct file_system_type *fs_type,
if (data->version == 4) {
error = nfs4_try_mount(flags, dev_name, data, mnt);
kfree(data->client_address);
+ kfree(data->nfs_server.export_path);
goto out;
}
#endif /* CONFIG_NFS_V4 */
@@ -2246,7 +2251,7 @@ out:
kfree(data->fscache_uniq);
security_free_mnt_opts(&data->lsm_opts);
out_free_fh:
- kfree(mntfh);
+ nfs_free_fhandle(mntfh);
kfree(data);
return error;
@@ -2555,7 +2560,7 @@ static int nfs4_remote_get_sb(struct file_system_type *fs_type,
};
int error = -ENOMEM;
- mntfh = kzalloc(sizeof(*mntfh), GFP_KERNEL);
+ mntfh = nfs_alloc_fhandle();
if (data == NULL || mntfh == NULL)
goto out_free_fh;
@@ -2613,7 +2618,7 @@ static int nfs4_remote_get_sb(struct file_system_type *fs_type,
out:
security_free_mnt_opts(&data->lsm_opts);
out_free_fh:
- kfree(mntfh);
+ nfs_free_fhandle(mntfh);
return error;
out_free:
@@ -2657,7 +2662,7 @@ static void nfs_fix_devname(const struct path *path, struct vfsmount *mnt)
devname = nfs_path(path->mnt->mnt_devname,
path->mnt->mnt_root, path->dentry,
page, PAGE_SIZE);
- if (devname == NULL)
+ if (IS_ERR(devname))
goto out_freepage;
tmp = kstrdup(devname, GFP_KERNEL);
if (tmp == NULL)
@@ -2668,41 +2673,120 @@ out_freepage:
free_page((unsigned long)page);
}
+struct nfs_referral_count {
+ struct list_head list;
+ const struct task_struct *task;
+ unsigned int referral_count;
+};
+
+static LIST_HEAD(nfs_referral_count_list);
+static DEFINE_SPINLOCK(nfs_referral_count_list_lock);
+
+static struct nfs_referral_count *nfs_find_referral_count(void)
+{
+ struct nfs_referral_count *p;
+
+ list_for_each_entry(p, &nfs_referral_count_list, list) {
+ if (p->task == current)
+ return p;
+ }
+ return NULL;
+}
+
+#define NFS_MAX_NESTED_REFERRALS 2
+
+static int nfs_referral_loop_protect(void)
+{
+ struct nfs_referral_count *p, *new;
+ int ret = -ENOMEM;
+
+ new = kmalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ goto out;
+ new->task = current;
+ new->referral_count = 1;
+
+ ret = 0;
+ spin_lock(&nfs_referral_count_list_lock);
+ p = nfs_find_referral_count();
+ if (p != NULL) {
+ if (p->referral_count >= NFS_MAX_NESTED_REFERRALS)
+ ret = -ELOOP;
+ else
+ p->referral_count++;
+ } else {
+ list_add(&new->list, &nfs_referral_count_list);
+ new = NULL;
+ }
+ spin_unlock(&nfs_referral_count_list_lock);
+ kfree(new);
+out:
+ return ret;
+}
+
+static void nfs_referral_loop_unprotect(void)
+{
+ struct nfs_referral_count *p;
+
+ spin_lock(&nfs_referral_count_list_lock);
+ p = nfs_find_referral_count();
+ p->referral_count--;
+ if (p->referral_count == 0)
+ list_del(&p->list);
+ else
+ p = NULL;
+ spin_unlock(&nfs_referral_count_list_lock);
+ kfree(p);
+}
+
static int nfs_follow_remote_path(struct vfsmount *root_mnt,
const char *export_path, struct vfsmount *mnt_target)
{
+ struct nameidata *nd = NULL;
struct mnt_namespace *ns_private;
- struct nameidata nd;
struct super_block *s;
int ret;
+ nd = kmalloc(sizeof(*nd), GFP_KERNEL);
+ if (nd == NULL)
+ return -ENOMEM;
+
ns_private = create_mnt_ns(root_mnt);
ret = PTR_ERR(ns_private);
if (IS_ERR(ns_private))
goto out_mntput;
+ ret = nfs_referral_loop_protect();
+ if (ret != 0)
+ goto out_put_mnt_ns;
+
ret = vfs_path_lookup(root_mnt->mnt_root, root_mnt,
- export_path, LOOKUP_FOLLOW, &nd);
+ export_path, LOOKUP_FOLLOW, nd);
+ nfs_referral_loop_unprotect();
put_mnt_ns(ns_private);
if (ret != 0)
goto out_err;
- s = nd.path.mnt->mnt_sb;
+ s = nd->path.mnt->mnt_sb;
atomic_inc(&s->s_active);
mnt_target->mnt_sb = s;
- mnt_target->mnt_root = dget(nd.path.dentry);
+ mnt_target->mnt_root = dget(nd->path.dentry);
/* Correct the device pathname */
- nfs_fix_devname(&nd.path, mnt_target);
+ nfs_fix_devname(&nd->path, mnt_target);
- path_put(&nd.path);
+ path_put(&nd->path);
+ kfree(nd);
down_write(&s->s_umount);
return 0;
+out_put_mnt_ns:
+ put_mnt_ns(ns_private);
out_mntput:
mntput(root_mnt);
out_err:
+ kfree(nd);
return ret;
}
@@ -2873,17 +2957,21 @@ static int nfs4_remote_referral_get_sb(struct file_system_type *fs_type,
struct super_block *s;
struct nfs_server *server;
struct dentry *mntroot;
- struct nfs_fh mntfh;
+ struct nfs_fh *mntfh;
int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
struct nfs_sb_mountdata sb_mntdata = {
.mntflags = flags,
};
- int error;
+ int error = -ENOMEM;
dprintk("--> nfs4_referral_get_sb()\n");
+ mntfh = nfs_alloc_fhandle();
+ if (mntfh == NULL)
+ goto out_err_nofh;
+
/* create a new volume representation */
- server = nfs4_create_referral_server(data, &mntfh);
+ server = nfs4_create_referral_server(data, mntfh);
if (IS_ERR(server)) {
error = PTR_ERR(server);
goto out_err_noserver;
@@ -2915,7 +3003,7 @@ static int nfs4_remote_referral_get_sb(struct file_system_type *fs_type,
nfs_fscache_get_super_cookie(s, NULL, data);
}
- mntroot = nfs4_get_root(s, &mntfh);
+ mntroot = nfs4_get_root(s, mntfh);
if (IS_ERR(mntroot)) {
error = PTR_ERR(mntroot);
goto error_splat_super;
@@ -2932,12 +3020,15 @@ static int nfs4_remote_referral_get_sb(struct file_system_type *fs_type,
security_sb_clone_mnt_opts(data->sb, s);
+ nfs_free_fhandle(mntfh);
dprintk("<-- nfs4_referral_get_sb() = 0\n");
return 0;
out_err_nosb:
nfs_free_server(server);
out_err_noserver:
+ nfs_free_fhandle(mntfh);
+out_err_nofh:
dprintk("<-- nfs4_referral_get_sb() = %d [error]\n", error);
return error;
@@ -2946,6 +3037,7 @@ error_splat_super:
bdi_unregister(&server->backing_dev_info);
error_splat_bdi:
deactivate_locked_super(s);
+ nfs_free_fhandle(mntfh);
dprintk("<-- nfs4_referral_get_sb() = %d [splat]\n", error);
return error;
}
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index 6da3d3f..a2242af 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -23,6 +23,7 @@ struct nfs_unlinkdata {
struct nfs_removeres res;
struct inode *dir;
struct rpc_cred *cred;
+ struct nfs_fattr dir_attr;
};
/**
@@ -169,7 +170,7 @@ static int nfs_do_call_unlink(struct dentry *parent, struct inode *dir, struct n
}
nfs_sb_active(dir->i_sb);
data->args.fh = NFS_FH(dir);
- nfs_fattr_init(&data->res.dir_attr);
+ nfs_fattr_init(data->res.dir_attr);
NFS_PROTO(dir)->unlink_setup(&msg, dir);
@@ -259,6 +260,7 @@ nfs_async_unlink(struct inode *dir, struct dentry *dentry)
goto out_free;
}
data->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
+ data->res.dir_attr = &data->dir_attr;
status = -EBUSY;
spin_lock(&dentry->d_lock);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index de38d63..3aea3ca 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1201,6 +1201,25 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
+static int nfs_commit_set_lock(struct nfs_inode *nfsi, int may_wait)
+{
+ if (!test_and_set_bit(NFS_INO_COMMIT, &nfsi->flags))
+ return 1;
+ if (may_wait && !out_of_line_wait_on_bit_lock(&nfsi->flags,
+ NFS_INO_COMMIT, nfs_wait_bit_killable,
+ TASK_KILLABLE))
+ return 1;
+ return 0;
+}
+
+static void nfs_commit_clear_lock(struct nfs_inode *nfsi)
+{
+ clear_bit(NFS_INO_COMMIT, &nfsi->flags);
+ smp_mb__after_clear_bit();
+ wake_up_bit(&nfsi->flags, NFS_INO_COMMIT);
+}
+
+
static void nfs_commitdata_release(void *data)
{
struct nfs_write_data *wdata = data;
@@ -1262,8 +1281,6 @@ static int nfs_commit_rpcsetup(struct list_head *head,
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
- if (how & FLUSH_SYNC)
- rpc_wait_for_completion_task(task);
rpc_put_task(task);
return 0;
}
@@ -1294,6 +1311,7 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how)
BDI_RECLAIMABLE);
nfs_clear_page_tag_locked(req);
}
+ nfs_commit_clear_lock(NFS_I(inode));
return -ENOMEM;
}
@@ -1349,6 +1367,7 @@ static void nfs_commit_release(void *calldata)
next:
nfs_clear_page_tag_locked(req);
}
+ nfs_commit_clear_lock(NFS_I(data->inode));
nfs_commitdata_release(calldata);
}
@@ -1363,8 +1382,11 @@ static const struct rpc_call_ops nfs_commit_ops = {
static int nfs_commit_inode(struct inode *inode, int how)
{
LIST_HEAD(head);
- int res;
+ int may_wait = how & FLUSH_SYNC;
+ int res = 0;
+ if (!nfs_commit_set_lock(NFS_I(inode), may_wait))
+ goto out;
spin_lock(&inode->i_lock);
res = nfs_scan_commit(inode, &head, 0, 0);
spin_unlock(&inode->i_lock);
@@ -1372,7 +1394,13 @@ static int nfs_commit_inode(struct inode *inode, int how)
int error = nfs_commit_list(inode, &head, how);
if (error < 0)
return error;
- }
+ if (may_wait)
+ wait_on_bit(&NFS_I(inode)->flags, NFS_INO_COMMIT,
+ nfs_wait_bit_killable,
+ TASK_KILLABLE);
+ } else
+ nfs_commit_clear_lock(NFS_I(inode));
+out:
return res;
}
@@ -1444,6 +1472,7 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
BUG_ON(!PageLocked(page));
for (;;) {
+ wait_on_page_writeback(page);
req = nfs_page_find_request(page);
if (req == NULL)
break;
@@ -1478,30 +1507,18 @@ int nfs_wb_page(struct inode *inode, struct page *page)
.range_start = range_start,
.range_end = range_end,
};
- struct nfs_page *req;
- int need_commit;
int ret;
while(PagePrivate(page)) {
+ wait_on_page_writeback(page);
if (clear_page_dirty_for_io(page)) {
ret = nfs_writepage_locked(page, &wbc);
if (ret < 0)
goto out_error;
}
- req = nfs_find_and_lock_request(page);
- if (!req)
- break;
- if (IS_ERR(req)) {
- ret = PTR_ERR(req);
+ ret = sync_inode(inode, &wbc);
+ if (ret < 0)
goto out_error;
- }
- need_commit = test_bit(PG_CLEAN, &req->wb_flags);
- nfs_clear_page_tag_locked(req);
- if (need_commit) {
- ret = nfs_commit_inode(inode, FLUSH_SYNC);
- if (ret < 0)
- goto out_error;
- }
}
return 0;
out_error:
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 872a5ef..c2a4f71 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -259,10 +259,9 @@ static struct cache_detail svc_expkey_cache = {
.alloc = expkey_alloc,
};
-static struct svc_expkey *
-svc_expkey_lookup(struct svc_expkey *item)
+static int
+svc_expkey_hash(struct svc_expkey *item)
{
- struct cache_head *ch;
int hash = item->ek_fsidtype;
char * cp = (char*)item->ek_fsid;
int len = key_len(item->ek_fsidtype);
@@ -270,6 +269,14 @@ svc_expkey_lookup(struct svc_expkey *item)
hash ^= hash_mem(cp, len, EXPKEY_HASHBITS);
hash ^= hash_ptr(item->ek_client, EXPKEY_HASHBITS);
hash &= EXPKEY_HASHMASK;
+ return hash;
+}
+
+static struct svc_expkey *
+svc_expkey_lookup(struct svc_expkey *item)
+{
+ struct cache_head *ch;
+ int hash = svc_expkey_hash(item);
ch = sunrpc_cache_lookup(&svc_expkey_cache, &item->h,
hash);
@@ -283,13 +290,7 @@ static struct svc_expkey *
svc_expkey_update(struct svc_expkey *new, struct svc_expkey *old)
{
struct cache_head *ch;
- int hash = new->ek_fsidtype;
- char * cp = (char*)new->ek_fsid;
- int len = key_len(new->ek_fsidtype);
-
- hash ^= hash_mem(cp, len, EXPKEY_HASHBITS);
- hash ^= hash_ptr(new->ek_client, EXPKEY_HASHBITS);
- hash &= EXPKEY_HASHMASK;
+ int hash = svc_expkey_hash(new);
ch = sunrpc_cache_update(&svc_expkey_cache, &new->h,
&old->h, hash);
@@ -738,14 +739,22 @@ struct cache_detail svc_export_cache = {
.alloc = svc_export_alloc,
};
-static struct svc_export *
-svc_export_lookup(struct svc_export *exp)
+static int
+svc_export_hash(struct svc_export *exp)
{
- struct cache_head *ch;
int hash;
+
hash = hash_ptr(exp->ex_client, EXPORT_HASHBITS);
hash ^= hash_ptr(exp->ex_path.dentry, EXPORT_HASHBITS);
hash ^= hash_ptr(exp->ex_path.mnt, EXPORT_HASHBITS);
+ return hash;
+}
+
+static struct svc_export *
+svc_export_lookup(struct svc_export *exp)
+{
+ struct cache_head *ch;
+ int hash = svc_export_hash(exp);
ch = sunrpc_cache_lookup(&svc_export_cache, &exp->h,
hash);
@@ -759,10 +768,7 @@ static struct svc_export *
svc_export_update(struct svc_export *new, struct svc_export *old)
{
struct cache_head *ch;
- int hash;
- hash = hash_ptr(old->ex_client, EXPORT_HASHBITS);
- hash ^= hash_ptr(old->ex_path.dentry, EXPORT_HASHBITS);
- hash ^= hash_ptr(old->ex_path.mnt, EXPORT_HASHBITS);
+ int hash = svc_export_hash(old);
ch = sunrpc_cache_update(&svc_export_cache, &new->h,
&old->h,
@@ -1071,9 +1077,9 @@ exp_export(struct nfsctl_export *nxp)
err = 0;
finish:
kfree(new.ex_pathname);
- if (exp)
+ if (!IS_ERR_OR_NULL(exp))
exp_put(exp);
- if (fsid_key && !IS_ERR(fsid_key))
+ if (!IS_ERR_OR_NULL(fsid_key))
cache_put(&fsid_key->h, &svc_expkey_cache);
path_put(&path);
out_put_clp:
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 7e32bd3..eb78e7e 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -32,6 +32,7 @@
*/
#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/svc_xprt.h>
#include <linux/slab.h>
#include "nfsd.h"
#include "state.h"
@@ -79,11 +80,6 @@ enum nfs_cb_opnum4 {
cb_sequence_dec_sz + \
op_dec_sz)
-struct nfs4_rpc_args {
- void *args_op;
- struct nfsd4_cb_sequence args_seq;
-};
-
/*
* Generic encode routines from fs/nfs/nfs4xdr.c
*/
@@ -428,13 +424,19 @@ static struct rpc_procinfo nfs4_cb_procedures[] = {
};
static struct rpc_version nfs_cb_version4 = {
+/*
+ * Note on the callback rpc program version number: despite language in rfc
+ * 5661 section 18.36.3 requiring servers to use 4 in this field, the
+ * official xdr descriptions for both 4.0 and 4.1 specify version 1, and
+ * in practice that appears to be what implementations use. The section
+ * 18.36.3 language is expected to be fixed in an erratum.
+ */
.number = 1,
.nrprocs = ARRAY_SIZE(nfs4_cb_procedures),
.procs = nfs4_cb_procedures
};
static struct rpc_version * nfs_cb_version[] = {
- NULL,
&nfs_cb_version4,
};
@@ -456,15 +458,14 @@ static struct rpc_program cb_program = {
static int max_cb_time(void)
{
- return max(NFSD_LEASE_TIME/10, (time_t)1) * HZ;
+ return max(nfsd4_lease/10, (time_t)1) * HZ;
}
/* Reference counting, callback cleanup, etc., all look racy as heck.
- * And why is cb_set an atomic? */
+ * And why is cl_cb_set an atomic? */
-int setup_callback_client(struct nfs4_client *clp)
+int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *cb)
{
- struct nfs4_cb_conn *cb = &clp->cl_cb_conn;
struct rpc_timeout timeparms = {
.to_initval = max_cb_time(),
.to_retries = 0,
@@ -476,7 +477,7 @@ int setup_callback_client(struct nfs4_client *clp)
.timeout = &timeparms,
.program = &cb_program,
.prognumber = cb->cb_prog,
- .version = nfs_cb_version[1]->number,
+ .version = 0,
.authflavor = clp->cl_flavor,
.flags = (RPC_CLNT_CREATE_NOPING | RPC_CLNT_CREATE_QUIET),
.client_name = clp->cl_principal,
@@ -486,7 +487,7 @@ int setup_callback_client(struct nfs4_client *clp)
if (!clp->cl_principal && (clp->cl_flavor >= RPC_AUTH_GSS_KRB5))
return -EINVAL;
if (cb->cb_minorversion) {
- args.bc_xprt = clp->cl_cb_xprt;
+ args.bc_xprt = cb->cb_xprt;
args.protocol = XPRT_TRANSPORT_BC_TCP;
}
/* Create RPC client */
@@ -496,7 +497,7 @@ int setup_callback_client(struct nfs4_client *clp)
PTR_ERR(client));
return PTR_ERR(client);
}
- cb->cb_client = client;
+ nfsd4_set_callback_client(clp, client);
return 0;
}
@@ -514,8 +515,7 @@ static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata)
if (task->tk_status)
warn_no_callback_path(clp, task->tk_status);
else
- atomic_set(&clp->cl_cb_conn.cb_set, 1);
- put_nfs4_client(clp);
+ atomic_set(&clp->cl_cb_set, 1);
}
static const struct rpc_call_ops nfsd4_cb_probe_ops = {
@@ -537,7 +537,6 @@ int set_callback_cred(void)
void do_probe_callback(struct nfs4_client *clp)
{
- struct nfs4_cb_conn *cb = &clp->cl_cb_conn;
struct rpc_message msg = {
.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_NULL],
.rpc_argp = clp,
@@ -545,34 +544,27 @@ void do_probe_callback(struct nfs4_client *clp)
};
int status;
- status = rpc_call_async(cb->cb_client, &msg,
+ status = rpc_call_async(clp->cl_cb_client, &msg,
RPC_TASK_SOFT | RPC_TASK_SOFTCONN,
&nfsd4_cb_probe_ops, (void *)clp);
- if (status) {
+ if (status)
warn_no_callback_path(clp, status);
- put_nfs4_client(clp);
- }
}
/*
* Set up the callback client and put a NFSPROC4_CB_NULL on the wire...
*/
-void
-nfsd4_probe_callback(struct nfs4_client *clp)
+void nfsd4_probe_callback(struct nfs4_client *clp, struct nfs4_cb_conn *cb)
{
int status;
- BUG_ON(atomic_read(&clp->cl_cb_conn.cb_set));
+ BUG_ON(atomic_read(&clp->cl_cb_set));
- status = setup_callback_client(clp);
+ status = setup_callback_client(clp, cb);
if (status) {
warn_no_callback_path(clp, status);
return;
}
-
- /* the task holds a reference to the nfs4_client struct */
- atomic_inc(&clp->cl_count);
-
do_probe_callback(clp);
}
@@ -658,18 +650,32 @@ static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
}
}
+
static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata)
{
struct nfs4_delegation *dp = calldata;
struct nfs4_client *clp = dp->dl_client;
+ struct rpc_clnt *current_rpc_client = clp->cl_cb_client;
nfsd4_cb_done(task, calldata);
+ if (current_rpc_client == NULL) {
+ /* We're shutting down; give up. */
+ /* XXX: err, or is it ok just to fall through
+ * and rpc_restart_call? */
+ return;
+ }
+
switch (task->tk_status) {
case -EIO:
/* Network partition? */
- atomic_set(&clp->cl_cb_conn.cb_set, 0);
+ atomic_set(&clp->cl_cb_set, 0);
warn_no_callback_path(clp, task->tk_status);
+ if (current_rpc_client != task->tk_client) {
+ /* queue a callback on the new connection: */
+ nfsd4_cb_recall(dp);
+ return;
+ }
case -EBADHANDLE:
case -NFS4ERR_BAD_STATEID:
/* Race: client probably got cb_recall
@@ -677,7 +683,7 @@ static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata)
break;
default:
/* success, or error we can't handle */
- goto done;
+ return;
}
if (dp->dl_retries--) {
rpc_delay(task, 2*HZ);
@@ -685,20 +691,16 @@ static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata)
rpc_restart_call(task);
return;
} else {
- atomic_set(&clp->cl_cb_conn.cb_set, 0);
+ atomic_set(&clp->cl_cb_set, 0);
warn_no_callback_path(clp, task->tk_status);
}
-done:
- kfree(task->tk_msg.rpc_argp);
}
static void nfsd4_cb_recall_release(void *calldata)
{
struct nfs4_delegation *dp = calldata;
- struct nfs4_client *clp = dp->dl_client;
nfs4_put_delegation(dp);
- put_nfs4_client(clp);
}
static const struct rpc_call_ops nfsd4_cb_recall_ops = {
@@ -707,33 +709,75 @@ static const struct rpc_call_ops nfsd4_cb_recall_ops = {
.rpc_release = nfsd4_cb_recall_release,
};
+static struct workqueue_struct *callback_wq;
+
+int nfsd4_create_callback_queue(void)
+{
+ callback_wq = create_singlethread_workqueue("nfsd4_callbacks");
+ if (!callback_wq)
+ return -ENOMEM;
+ return 0;
+}
+
+void nfsd4_destroy_callback_queue(void)
+{
+ destroy_workqueue(callback_wq);
+}
+
+/* must be called under the state lock */
+void nfsd4_set_callback_client(struct nfs4_client *clp, struct rpc_clnt *new)
+{
+ struct rpc_clnt *old = clp->cl_cb_client;
+
+ clp->cl_cb_client = new;
+ /*
+ * After this, any work that saw the old value of cl_cb_client will
+ * be gone:
+ */
+ flush_workqueue(callback_wq);
+ /* So we can safely shut it down: */
+ if (old)
+ rpc_shutdown_client(old);
+}
+
/*
* called with dp->dl_count inc'ed.
*/
-void
-nfsd4_cb_recall(struct nfs4_delegation *dp)
+static void _nfsd4_cb_recall(struct nfs4_delegation *dp)
{
struct nfs4_client *clp = dp->dl_client;
- struct rpc_clnt *clnt = clp->cl_cb_conn.cb_client;
- struct nfs4_rpc_args *args;
+ struct rpc_clnt *clnt = clp->cl_cb_client;
+ struct nfs4_rpc_args *args = &dp->dl_recall.cb_args;
struct rpc_message msg = {
.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_RECALL],
.rpc_cred = callback_cred
};
- int status = -ENOMEM;
+ int status;
+
+ if (clnt == NULL)
+ return; /* Client is shutting down; give up. */
- args = kzalloc(sizeof(*args), GFP_KERNEL);
- if (!args)
- goto out;
args->args_op = dp;
msg.rpc_argp = args;
dp->dl_retries = 1;
status = rpc_call_async(clnt, &msg, RPC_TASK_SOFT,
&nfsd4_cb_recall_ops, dp);
-out:
- if (status) {
- kfree(args);
- put_nfs4_client(clp);
+ if (status)
nfs4_put_delegation(dp);
- }
+}
+
+void nfsd4_do_callback_rpc(struct work_struct *w)
+{
+ /* XXX: for now, just send off delegation recall. */
+ /* In future, generalize to handle any sort of callback. */
+ struct nfsd4_callback *c = container_of(w, struct nfsd4_callback, cb_work);
+ struct nfs4_delegation *dp = container_of(c, struct nfs4_delegation, dl_recall);
+
+ _nfsd4_cb_recall(dp);
+}
+
+
+void nfsd4_cb_recall(struct nfs4_delegation *dp)
+{
+ queue_work(callback_wq, &dp->dl_recall.cb_work);
}
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 2ab9e85..59ec449 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -969,20 +969,36 @@ static struct nfsd4_operation nfsd4_ops[];
static const char *nfsd4_op_name(unsigned opnum);
/*
- * Enforce NFSv4.1 COMPOUND ordering rules.
+ * Enforce NFSv4.1 COMPOUND ordering rules:
*
- * TODO:
- * - enforce NFS4ERR_NOT_ONLY_OP,
- * - DESTROY_SESSION MUST be the final operation in the COMPOUND request.
+ * Also note, enforced elsewhere:
+ * - SEQUENCE other than as first op results in
+ * NFS4ERR_SEQUENCE_POS. (Enforced in nfsd4_sequence().)
+ * - BIND_CONN_TO_SESSION must be the only op in its compound
+ * (Will be enforced in nfsd4_bind_conn_to_session().)
+ * - DESTROY_SESSION must be the final operation in a compound, if
+ * sessionid's in SEQUENCE and DESTROY_SESSION are the same.
+ * (Enforced in nfsd4_destroy_session().)
*/
-static bool nfs41_op_ordering_ok(struct nfsd4_compoundargs *args)
+static __be32 nfs41_check_op_ordering(struct nfsd4_compoundargs *args)
{
- if (args->minorversion && args->opcnt > 0) {
- struct nfsd4_op *op = &args->ops[0];
- return (op->status == nfserr_op_illegal) ||
- (nfsd4_ops[op->opnum].op_flags & ALLOWED_AS_FIRST_OP);
- }
- return true;
+ struct nfsd4_op *op = &args->ops[0];
+
+ /* These ordering requirements don't apply to NFSv4.0: */
+ if (args->minorversion == 0)
+ return nfs_ok;
+ /* This is weird, but OK, not our problem: */
+ if (args->opcnt == 0)
+ return nfs_ok;
+ if (op->status == nfserr_op_illegal)
+ return nfs_ok;
+ if (!(nfsd4_ops[op->opnum].op_flags & ALLOWED_AS_FIRST_OP))
+ return nfserr_op_not_in_session;
+ if (op->opnum == OP_SEQUENCE)
+ return nfs_ok;
+ if (args->opcnt != 1)
+ return nfserr_not_only_op;
+ return nfs_ok;
}
/*
@@ -1012,6 +1028,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
resp->rqstp = rqstp;
resp->cstate.minorversion = args->minorversion;
resp->cstate.replay_owner = NULL;
+ resp->cstate.session = NULL;
fh_init(&resp->cstate.current_fh, NFS4_FHSIZE);
fh_init(&resp->cstate.save_fh, NFS4_FHSIZE);
/* Use the deferral mechanism only for NFSv4.0 compounds */
@@ -1024,13 +1041,13 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
if (args->minorversion > nfsd_supported_minorversion)
goto out;
- if (!nfs41_op_ordering_ok(args)) {
+ status = nfs41_check_op_ordering(args);
+ if (status) {
op = &args->ops[0];
- op->status = nfserr_sequence_pos;
+ op->status = status;
goto encode_op;
}
- status = nfs_ok;
while (!status && resp->opcnt < args->opcnt) {
op = &args->ops[resp->opcnt++];
@@ -1295,6 +1312,11 @@ static struct nfsd4_operation nfsd4_ops[] = {
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP,
.op_name = "OP_SEQUENCE",
},
+ [OP_RECLAIM_COMPLETE] = {
+ .op_func = (nfsd4op_func)nfsd4_reclaim_complete,
+ .op_flags = ALLOWED_WITHOUT_FH,
+ .op_name = "OP_RECLAIM_COMPLETE",
+ },
};
static const char *nfsd4_op_name(unsigned opnum)
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 6a8feda..12f7109 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -45,8 +45,8 @@
#define NFSDDBG_FACILITY NFSDDBG_PROC
/* Globals */
-static time_t lease_time = 90; /* default lease time */
-static time_t user_lease_time = 90;
+time_t nfsd4_lease = 90; /* default lease time */
+time_t nfsd4_grace = 90;
static time_t boot_time;
static u32 current_ownerid = 1;
static u32 current_fileid = 1;
@@ -190,7 +190,7 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_stateid *stp, struct svc_f
dp->dl_vfs_file = stp->st_vfs_file;
dp->dl_type = type;
dp->dl_ident = cb->cb_ident;
- dp->dl_stateid.si_boot = get_seconds();
+ dp->dl_stateid.si_boot = boot_time;
dp->dl_stateid.si_stateownerid = current_delegid++;
dp->dl_stateid.si_fileid = 0;
dp->dl_stateid.si_generation = 0;
@@ -199,6 +199,7 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_stateid *stp, struct svc_f
atomic_set(&dp->dl_count, 1);
list_add(&dp->dl_perfile, &fp->fi_delegations);
list_add(&dp->dl_perclnt, &clp->cl_delegations);
+ INIT_WORK(&dp->dl_recall.cb_work, nfsd4_do_callback_rpc);
return dp;
}
@@ -249,6 +250,9 @@ unhash_delegation(struct nfs4_delegation *dp)
* SETCLIENTID state
*/
+/* client_lock protects the client lru list and session hash table */
+static DEFINE_SPINLOCK(client_lock);
+
/* Hash tables for nfs4_clientid state */
#define CLIENT_HASH_BITS 4
#define CLIENT_HASH_SIZE (1 << CLIENT_HASH_BITS)
@@ -367,7 +371,6 @@ static void release_openowner(struct nfs4_stateowner *sop)
nfs4_put_stateowner(sop);
}
-static DEFINE_SPINLOCK(sessionid_lock);
#define SESSION_HASH_SIZE 512
static struct list_head sessionid_hashtbl[SESSION_HASH_SIZE];
@@ -565,10 +568,10 @@ alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp,
new->se_flags = cses->flags;
kref_init(&new->se_ref);
- spin_lock(&sessionid_lock);
+ spin_lock(&client_lock);
list_add(&new->se_hash, &sessionid_hashtbl[idx]);
list_add(&new->se_perclnt, &clp->cl_sessions);
- spin_unlock(&sessionid_lock);
+ spin_unlock(&client_lock);
status = nfs_ok;
out:
@@ -579,7 +582,7 @@ out_free:
goto out;
}
-/* caller must hold sessionid_lock */
+/* caller must hold client_lock */
static struct nfsd4_session *
find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid)
{
@@ -602,7 +605,7 @@ find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid)
return NULL;
}
-/* caller must hold sessionid_lock */
+/* caller must hold client_lock */
static void
unhash_session(struct nfsd4_session *ses)
{
@@ -610,15 +613,6 @@ unhash_session(struct nfsd4_session *ses)
list_del(&ses->se_perclnt);
}
-static void
-release_session(struct nfsd4_session *ses)
-{
- spin_lock(&sessionid_lock);
- unhash_session(ses);
- spin_unlock(&sessionid_lock);
- nfsd4_put_session(ses);
-}
-
void
free_session(struct kref *kref)
{
@@ -634,9 +628,18 @@ free_session(struct kref *kref)
kfree(ses);
}
+/* must be called under the client_lock */
static inline void
-renew_client(struct nfs4_client *clp)
+renew_client_locked(struct nfs4_client *clp)
{
+ if (is_client_expired(clp)) {
+ dprintk("%s: client (clientid %08x/%08x) already expired\n",
+ __func__,
+ clp->cl_clientid.cl_boot,
+ clp->cl_clientid.cl_id);
+ return;
+ }
+
/*
* Move client to the end to the LRU list.
*/
@@ -647,6 +650,14 @@ renew_client(struct nfs4_client *clp)
clp->cl_time = get_seconds();
}
+static inline void
+renew_client(struct nfs4_client *clp)
+{
+ spin_lock(&client_lock);
+ renew_client_locked(clp);
+ spin_unlock(&client_lock);
+}
+
/* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
static int
STALE_CLIENTID(clientid_t *clid)
@@ -680,27 +691,9 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name)
return clp;
}
-static void
-shutdown_callback_client(struct nfs4_client *clp)
-{
- struct rpc_clnt *clnt = clp->cl_cb_conn.cb_client;
-
- if (clnt) {
- /*
- * Callback threads take a reference on the client, so there
- * should be no outstanding callbacks at this point.
- */
- clp->cl_cb_conn.cb_client = NULL;
- rpc_shutdown_client(clnt);
- }
-}
-
static inline void
free_client(struct nfs4_client *clp)
{
- shutdown_callback_client(clp);
- if (clp->cl_cb_xprt)
- svc_xprt_put(clp->cl_cb_xprt);
if (clp->cl_cred.cr_group_info)
put_group_info(clp->cl_cred.cr_group_info);
kfree(clp->cl_principal);
@@ -709,10 +702,34 @@ free_client(struct nfs4_client *clp)
}
void
-put_nfs4_client(struct nfs4_client *clp)
+release_session_client(struct nfsd4_session *session)
{
- if (atomic_dec_and_test(&clp->cl_count))
+ struct nfs4_client *clp = session->se_client;
+
+ if (!atomic_dec_and_lock(&clp->cl_refcount, &client_lock))
+ return;
+ if (is_client_expired(clp)) {
free_client(clp);
+ session->se_client = NULL;
+ } else
+ renew_client_locked(clp);
+ spin_unlock(&client_lock);
+ nfsd4_put_session(session);
+}
+
+/* must be called under the client_lock */
+static inline void
+unhash_client_locked(struct nfs4_client *clp)
+{
+ mark_client_expired(clp);
+ list_del(&clp->cl_lru);
+ while (!list_empty(&clp->cl_sessions)) {
+ struct nfsd4_session *ses;
+ ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
+ se_perclnt);
+ unhash_session(ses);
+ nfsd4_put_session(ses);
+ }
}
static void
@@ -722,9 +739,6 @@ expire_client(struct nfs4_client *clp)
struct nfs4_delegation *dp;
struct list_head reaplist;
- dprintk("NFSD: expire_client cl_count %d\n",
- atomic_read(&clp->cl_count));
-
INIT_LIST_HEAD(&reaplist);
spin_lock(&recall_lock);
while (!list_empty(&clp->cl_delegations)) {
@@ -740,20 +754,20 @@ expire_client(struct nfs4_client *clp)
list_del_init(&dp->dl_recall_lru);
unhash_delegation(dp);
}
- list_del(&clp->cl_idhash);
- list_del(&clp->cl_strhash);
- list_del(&clp->cl_lru);
while (!list_empty(&clp->cl_openowners)) {
sop = list_entry(clp->cl_openowners.next, struct nfs4_stateowner, so_perclient);
release_openowner(sop);
}
- while (!list_empty(&clp->cl_sessions)) {
- struct nfsd4_session *ses;
- ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
- se_perclnt);
- release_session(ses);
- }
- put_nfs4_client(clp);
+ nfsd4_set_callback_client(clp, NULL);
+ if (clp->cl_cb_conn.cb_xprt)
+ svc_xprt_put(clp->cl_cb_conn.cb_xprt);
+ list_del(&clp->cl_idhash);
+ list_del(&clp->cl_strhash);
+ spin_lock(&client_lock);
+ unhash_client_locked(clp);
+ if (atomic_read(&clp->cl_refcount) == 0)
+ free_client(clp);
+ spin_unlock(&client_lock);
}
static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
@@ -839,14 +853,15 @@ static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir,
}
memcpy(clp->cl_recdir, recdir, HEXDIR_LEN);
- atomic_set(&clp->cl_count, 1);
- atomic_set(&clp->cl_cb_conn.cb_set, 0);
+ atomic_set(&clp->cl_refcount, 0);
+ atomic_set(&clp->cl_cb_set, 0);
INIT_LIST_HEAD(&clp->cl_idhash);
INIT_LIST_HEAD(&clp->cl_strhash);
INIT_LIST_HEAD(&clp->cl_openowners);
INIT_LIST_HEAD(&clp->cl_delegations);
INIT_LIST_HEAD(&clp->cl_sessions);
INIT_LIST_HEAD(&clp->cl_lru);
+ clp->cl_time = get_seconds();
clear_bit(0, &clp->cl_cb_slot_busy);
rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
copy_verf(clp, verf);
@@ -877,8 +892,7 @@ add_to_unconfirmed(struct nfs4_client *clp, unsigned int strhashval)
list_add(&clp->cl_strhash, &unconf_str_hashtbl[strhashval]);
idhashval = clientid_hashval(clp->cl_clientid.cl_id);
list_add(&clp->cl_idhash, &unconf_id_hashtbl[idhashval]);
- list_add_tail(&clp->cl_lru, &client_lru);
- clp->cl_time = get_seconds();
+ renew_client(clp);
}
static void
@@ -888,10 +902,9 @@ move_to_confirmed(struct nfs4_client *clp)
unsigned int strhashval;
dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
- list_del_init(&clp->cl_strhash);
list_move(&clp->cl_idhash, &conf_id_hashtbl[idhashval]);
strhashval = clientstr_hashval(clp->cl_recdir);
- list_add(&clp->cl_strhash, &conf_str_hashtbl[strhashval]);
+ list_move(&clp->cl_strhash, &conf_str_hashtbl[strhashval]);
renew_client(clp);
}
@@ -1327,15 +1340,9 @@ nfsd4_create_session(struct svc_rqst *rqstp,
cs_slot->sl_seqid++; /* from 0 to 1 */
move_to_confirmed(unconf);
- /*
- * We do not support RDMA or persistent sessions
- */
- cr_ses->flags &= ~SESSION4_PERSIST;
- cr_ses->flags &= ~SESSION4_RDMA;
-
if (cr_ses->flags & SESSION4_BACK_CHAN) {
- unconf->cl_cb_xprt = rqstp->rq_xprt;
- svc_xprt_get(unconf->cl_cb_xprt);
+ unconf->cl_cb_conn.cb_xprt = rqstp->rq_xprt;
+ svc_xprt_get(rqstp->rq_xprt);
rpc_copy_addr(
(struct sockaddr *)&unconf->cl_cb_conn.cb_addr,
sa);
@@ -1344,7 +1351,7 @@ nfsd4_create_session(struct svc_rqst *rqstp,
cstate->minorversion;
unconf->cl_cb_conn.cb_prog = cr_ses->callback_prog;
unconf->cl_cb_seq_nr = 1;
- nfsd4_probe_callback(unconf);
+ nfsd4_probe_callback(unconf, &unconf->cl_cb_conn);
}
conf = unconf;
} else {
@@ -1352,6 +1359,12 @@ nfsd4_create_session(struct svc_rqst *rqstp,
goto out;
}
+ /*
+ * We do not support RDMA or persistent sessions
+ */
+ cr_ses->flags &= ~SESSION4_PERSIST;
+ cr_ses->flags &= ~SESSION4_RDMA;
+
status = alloc_init_session(rqstp, conf, cr_ses);
if (status)
goto out;
@@ -1369,6 +1382,21 @@ out:
return status;
}
+static bool nfsd4_last_compound_op(struct svc_rqst *rqstp)
+{
+ struct nfsd4_compoundres *resp = rqstp->rq_resp;
+ struct nfsd4_compoundargs *argp = rqstp->rq_argp;
+
+ return argp->opcnt == resp->opcnt;
+}
+
+static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid)
+{
+ if (!session)
+ return 0;
+ return !memcmp(sid, &session->se_sessionid, sizeof(*sid));
+}
+
__be32
nfsd4_destroy_session(struct svc_rqst *r,
struct nfsd4_compound_state *cstate,
@@ -1384,19 +1412,25 @@ nfsd4_destroy_session(struct svc_rqst *r,
* - Do we need to clear any callback info from previous session?
*/
+ if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) {
+ if (!nfsd4_last_compound_op(r))
+ return nfserr_not_only_op;
+ }
dump_sessionid(__func__, &sessionid->sessionid);
- spin_lock(&sessionid_lock);
+ spin_lock(&client_lock);
ses = find_in_sessionid_hashtbl(&sessionid->sessionid);
if (!ses) {
- spin_unlock(&sessionid_lock);
+ spin_unlock(&client_lock);
goto out;
}
unhash_session(ses);
- spin_unlock(&sessionid_lock);
+ spin_unlock(&client_lock);
+ nfs4_lock_state();
/* wait for callbacks */
- shutdown_callback_client(ses->se_client);
+ nfsd4_set_callback_client(ses->se_client, NULL);
+ nfs4_unlock_state();
nfsd4_put_session(ses);
status = nfs_ok;
out:
@@ -1417,7 +1451,7 @@ nfsd4_sequence(struct svc_rqst *rqstp,
if (resp->opcnt != 1)
return nfserr_sequence_pos;
- spin_lock(&sessionid_lock);
+ spin_lock(&client_lock);
status = nfserr_badsession;
session = find_in_sessionid_hashtbl(&seq->sessionid);
if (!session)
@@ -1456,23 +1490,47 @@ nfsd4_sequence(struct svc_rqst *rqstp,
cstate->slot = slot;
cstate->session = session;
- /* Hold a session reference until done processing the compound:
- * nfsd4_put_session called only if the cstate slot is set.
- */
- nfsd4_get_session(session);
out:
- spin_unlock(&sessionid_lock);
- /* Renew the clientid on success and on replay */
+ /* Hold a session reference until done processing the compound. */
if (cstate->session) {
- nfs4_lock_state();
- renew_client(session->se_client);
- nfs4_unlock_state();
+ nfsd4_get_session(cstate->session);
+ atomic_inc(&session->se_client->cl_refcount);
}
+ spin_unlock(&client_lock);
dprintk("%s: return %d\n", __func__, ntohl(status));
return status;
}
__be32
+nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc)
+{
+ if (rc->rca_one_fs) {
+ if (!cstate->current_fh.fh_dentry)
+ return nfserr_nofilehandle;
+ /*
+ * We don't take advantage of the rca_one_fs case.
+ * That's OK, it's optional, we can safely ignore it.
+ */
+ return nfs_ok;
+ }
+ nfs4_lock_state();
+ if (is_client_expired(cstate->session->se_client)) {
+ nfs4_unlock_state();
+ /*
+ * The following error isn't really legal.
+ * But we only get here if the client just explicitly
+ * destroyed the client. Surely it no longer cares what
+ * error it gets back on an operation for the dead
+ * client.
+ */
+ return nfserr_stale_clientid;
+ }
+ nfsd4_create_clid_dir(cstate->session->se_client);
+ nfs4_unlock_state();
+ return nfs_ok;
+}
+
+__be32
nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_setclientid *setclid)
{
@@ -1631,9 +1689,8 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
if (!same_creds(&conf->cl_cred, &unconf->cl_cred))
status = nfserr_clid_inuse;
else {
- /* XXX: We just turn off callbacks until we can handle
- * change request correctly. */
- atomic_set(&conf->cl_cb_conn.cb_set, 0);
+ atomic_set(&conf->cl_cb_set, 0);
+ nfsd4_probe_callback(conf, &unconf->cl_cb_conn);
expire_client(unconf);
status = nfs_ok;
@@ -1667,7 +1724,7 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
}
move_to_confirmed(unconf);
conf = unconf;
- nfsd4_probe_callback(conf);
+ nfsd4_probe_callback(conf, &conf->cl_cb_conn);
status = nfs_ok;
}
} else if ((!conf || (conf && !same_verf(&conf->cl_confirm, &confirm)))
@@ -1700,12 +1757,12 @@ alloc_init_file(struct inode *ino)
INIT_LIST_HEAD(&fp->fi_hash);
INIT_LIST_HEAD(&fp->fi_stateids);
INIT_LIST_HEAD(&fp->fi_delegations);
- spin_lock(&recall_lock);
- list_add(&fp->fi_hash, &file_hashtbl[hashval]);
- spin_unlock(&recall_lock);
fp->fi_inode = igrab(ino);
fp->fi_id = current_fileid++;
fp->fi_had_conflict = false;
+ spin_lock(&recall_lock);
+ list_add(&fp->fi_hash, &file_hashtbl[hashval]);
+ spin_unlock(&recall_lock);
return fp;
}
return NULL;
@@ -1827,7 +1884,7 @@ init_stateid(struct nfs4_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *
stp->st_stateowner = sop;
get_nfs4_file(fp);
stp->st_file = fp;
- stp->st_stateid.si_boot = get_seconds();
+ stp->st_stateid.si_boot = boot_time;
stp->st_stateid.si_stateownerid = sop->so_id;
stp->st_stateid.si_fileid = fp->fi_id;
stp->st_stateid.si_generation = 0;
@@ -2028,7 +2085,6 @@ void nfsd_break_deleg_cb(struct file_lock *fl)
* lock) we know the server hasn't removed the lease yet, we know
* it's safe to take a reference: */
atomic_inc(&dp->dl_count);
- atomic_inc(&dp->dl_client->cl_count);
spin_lock(&recall_lock);
list_add_tail(&dp->dl_recall_lru, &del_recall_lru);
@@ -2347,7 +2403,7 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta
{
struct nfs4_delegation *dp;
struct nfs4_stateowner *sop = stp->st_stateowner;
- struct nfs4_cb_conn *cb = &sop->so_client->cl_cb_conn;
+ int cb_up = atomic_read(&sop->so_client->cl_cb_set);
struct file_lock fl, *flp = &fl;
int status, flag = 0;
@@ -2355,7 +2411,7 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta
open->op_recall = 0;
switch (open->op_claim_type) {
case NFS4_OPEN_CLAIM_PREVIOUS:
- if (!atomic_read(&cb->cb_set))
+ if (!cb_up)
open->op_recall = 1;
flag = open->op_delegate_type;
if (flag == NFS4_OPEN_DELEGATE_NONE)
@@ -2366,7 +2422,7 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta
* had the chance to reclaim theirs.... */
if (locks_in_grace())
goto out;
- if (!atomic_read(&cb->cb_set) || !sop->so_confirmed)
+ if (!cb_up || !sop->so_confirmed)
goto out;
if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
flag = NFS4_OPEN_DELEGATE_WRITE;
@@ -2483,10 +2539,8 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
}
memcpy(&open->op_stateid, &stp->st_stateid, sizeof(stateid_t));
- if (nfsd4_has_session(&resp->cstate)) {
+ if (nfsd4_has_session(&resp->cstate))
open->op_stateowner->so_confirmed = 1;
- nfsd4_create_clid_dir(open->op_stateowner->so_client);
- }
/*
* Attempt to hand out a delegation. No error return, because the
@@ -2537,7 +2591,7 @@ nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
renew_client(clp);
status = nfserr_cb_path_down;
if (!list_empty(&clp->cl_delegations)
- && !atomic_read(&clp->cl_cb_conn.cb_set))
+ && !atomic_read(&clp->cl_cb_set))
goto out;
status = nfs_ok;
out:
@@ -2554,6 +2608,12 @@ nfsd4_end_grace(void)
dprintk("NFSD: end of grace period\n");
nfsd4_recdir_purge_old();
locks_end_grace(&nfsd4_manager);
+ /*
+ * Now that every NFSv4 client has had the chance to recover and
+ * to see the (possibly new, possibly shorter) lease time, we
+ * can safely set the next grace time to the current lease time:
+ */
+ nfsd4_grace = nfsd4_lease;
}
static time_t
@@ -2563,15 +2623,17 @@ nfs4_laundromat(void)
struct nfs4_stateowner *sop;
struct nfs4_delegation *dp;
struct list_head *pos, *next, reaplist;
- time_t cutoff = get_seconds() - NFSD_LEASE_TIME;
- time_t t, clientid_val = NFSD_LEASE_TIME;
- time_t u, test_val = NFSD_LEASE_TIME;
+ time_t cutoff = get_seconds() - nfsd4_lease;
+ time_t t, clientid_val = nfsd4_lease;
+ time_t u, test_val = nfsd4_lease;
nfs4_lock_state();
dprintk("NFSD: laundromat service - starting\n");
if (locks_in_grace())
nfsd4_end_grace();
+ INIT_LIST_HEAD(&reaplist);
+ spin_lock(&client_lock);
list_for_each_safe(pos, next, &client_lru) {
clp = list_entry(pos, struct nfs4_client, cl_lru);
if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) {
@@ -2580,12 +2642,22 @@ nfs4_laundromat(void)
clientid_val = t;
break;
}
+ if (atomic_read(&clp->cl_refcount)) {
+ dprintk("NFSD: client in use (clientid %08x)\n",
+ clp->cl_clientid.cl_id);
+ continue;
+ }
+ unhash_client_locked(clp);
+ list_add(&clp->cl_lru, &reaplist);
+ }
+ spin_unlock(&client_lock);
+ list_for_each_safe(pos, next, &reaplist) {
+ clp = list_entry(pos, struct nfs4_client, cl_lru);
dprintk("NFSD: purging unused client (clientid %08x)\n",
clp->cl_clientid.cl_id);
nfsd4_remove_clid_dir(clp);
expire_client(clp);
}
- INIT_LIST_HEAD(&reaplist);
spin_lock(&recall_lock);
list_for_each_safe(pos, next, &del_recall_lru) {
dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
@@ -2605,7 +2677,7 @@ nfs4_laundromat(void)
list_del_init(&dp->dl_recall_lru);
unhash_delegation(dp);
}
- test_val = NFSD_LEASE_TIME;
+ test_val = nfsd4_lease;
list_for_each_safe(pos, next, &close_lru) {
sop = list_entry(pos, struct nfs4_stateowner, so_close_lru);
if (time_after((unsigned long)sop->so_time, (unsigned long)cutoff)) {
@@ -2661,39 +2733,11 @@ nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stateid *stp)
static int
STALE_STATEID(stateid_t *stateid)
{
- if (time_after((unsigned long)boot_time,
- (unsigned long)stateid->si_boot)) {
- dprintk("NFSD: stale stateid " STATEID_FMT "!\n",
- STATEID_VAL(stateid));
- return 1;
- }
- return 0;
-}
-
-static int
-EXPIRED_STATEID(stateid_t *stateid)
-{
- if (time_before((unsigned long)boot_time,
- ((unsigned long)stateid->si_boot)) &&
- time_before((unsigned long)(stateid->si_boot + lease_time), get_seconds())) {
- dprintk("NFSD: expired stateid " STATEID_FMT "!\n",
- STATEID_VAL(stateid));
- return 1;
- }
- return 0;
-}
-
-static __be32
-stateid_error_map(stateid_t *stateid)
-{
- if (STALE_STATEID(stateid))
- return nfserr_stale_stateid;
- if (EXPIRED_STATEID(stateid))
- return nfserr_expired;
-
- dprintk("NFSD: bad stateid " STATEID_FMT "!\n",
+ if (stateid->si_boot == boot_time)
+ return 0;
+ dprintk("NFSD: stale stateid " STATEID_FMT "!\n",
STATEID_VAL(stateid));
- return nfserr_bad_stateid;
+ return 1;
}
static inline int
@@ -2817,10 +2861,8 @@ nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate,
status = nfserr_bad_stateid;
if (is_delegation_stateid(stateid)) {
dp = find_delegation_stateid(ino, stateid);
- if (!dp) {
- status = stateid_error_map(stateid);
+ if (!dp)
goto out;
- }
status = check_stateid_generation(stateid, &dp->dl_stateid,
flags);
if (status)
@@ -2833,10 +2875,8 @@ nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate,
*filpp = dp->dl_vfs_file;
} else { /* open or lock stateid */
stp = find_stateid(stateid, flags);
- if (!stp) {
- status = stateid_error_map(stateid);
+ if (!stp)
goto out;
- }
if (nfs4_check_fh(current_fh, stp))
goto out;
if (!stp->st_stateowner->so_confirmed)
@@ -2908,7 +2948,7 @@ nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
*/
sop = search_close_lru(stateid->si_stateownerid, flags);
if (sop == NULL)
- return stateid_error_map(stateid);
+ return nfserr_bad_stateid;
*sopp = sop;
goto check_replay;
}
@@ -3175,10 +3215,8 @@ nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (!is_delegation_stateid(stateid))
goto out;
dp = find_delegation_stateid(inode, stateid);
- if (!dp) {
- status = stateid_error_map(stateid);
+ if (!dp)
goto out;
- }
status = check_stateid_generation(stateid, &dp->dl_stateid, flags);
if (status)
goto out;
@@ -3404,7 +3442,7 @@ alloc_init_lock_stateid(struct nfs4_stateowner *sop, struct nfs4_file *fp, struc
stp->st_stateowner = sop;
get_nfs4_file(fp);
stp->st_file = fp;
- stp->st_stateid.si_boot = get_seconds();
+ stp->st_stateid.si_boot = boot_time;
stp->st_stateid.si_stateownerid = sop->so_id;
stp->st_stateid.si_fileid = fp->fi_id;
stp->st_stateid.si_generation = 0;
@@ -3976,12 +4014,6 @@ nfsd4_load_reboot_recovery_data(void)
printk("NFSD: Failure reading reboot recovery data\n");
}
-unsigned long
-get_nfs4_grace_period(void)
-{
- return max(user_lease_time, lease_time) * HZ;
-}
-
/*
* Since the lifetime of a delegation isn't limited to that of an open, a
* client may quite reasonably hang on to a delegation as long as it has
@@ -4008,20 +4040,27 @@ set_max_delegations(void)
static int
__nfs4_state_start(void)
{
- unsigned long grace_time;
+ int ret;
boot_time = get_seconds();
- grace_time = get_nfs4_grace_period();
- lease_time = user_lease_time;
locks_start_grace(&nfsd4_manager);
printk(KERN_INFO "NFSD: starting %ld-second grace period\n",
- grace_time/HZ);
+ nfsd4_grace);
+ ret = set_callback_cred();
+ if (ret)
+ return -ENOMEM;
laundry_wq = create_singlethread_workqueue("nfsd4");
if (laundry_wq == NULL)
return -ENOMEM;
- queue_delayed_work(laundry_wq, &laundromat_work, grace_time);
+ ret = nfsd4_create_callback_queue();
+ if (ret)
+ goto out_free_laundry;
+ queue_delayed_work(laundry_wq, &laundromat_work, nfsd4_grace * HZ);
set_max_delegations();
- return set_callback_cred();
+ return 0;
+out_free_laundry:
+ destroy_workqueue(laundry_wq);
+ return ret;
}
int
@@ -4039,12 +4078,6 @@ nfs4_state_start(void)
return 0;
}
-time_t
-nfs4_lease_time(void)
-{
- return lease_time;
-}
-
static void
__nfs4_state_shutdown(void)
{
@@ -4089,6 +4122,7 @@ nfs4_state_shutdown(void)
nfs4_lock_state();
nfs4_release_reclaim();
__nfs4_state_shutdown();
+ nfsd4_destroy_callback_queue();
nfs4_unlock_state();
}
@@ -4128,21 +4162,3 @@ nfs4_recoverydir(void)
{
return user_recovery_dirname;
}
-
-/*
- * Called when leasetime is changed.
- *
- * The only way the protocol gives us to handle on-the-fly lease changes is to
- * simulate a reboot. Instead of doing that, we just wait till the next time
- * we start to register any changes in lease time. If the administrator
- * really wants to change the lease time *now*, they can go ahead and bring
- * nfsd down and then back up again after changing the lease time.
- *
- * user_lease_time is protected by nfsd_mutex since it's only really accessed
- * when nfsd is starting
- */
-void
-nfs4_reset_lease(time_t leasetime)
-{
- user_lease_time = leasetime;
-}
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 34ccf81..ac17a70 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1234,6 +1234,16 @@ nfsd4_decode_sequence(struct nfsd4_compoundargs *argp,
DECODE_TAIL;
}
+static __be32 nfsd4_decode_reclaim_complete(struct nfsd4_compoundargs *argp, struct nfsd4_reclaim_complete *rc)
+{
+ DECODE_HEAD;
+
+ READ_BUF(4);
+ READ32(rc->rca_one_fs);
+
+ DECODE_TAIL;
+}
+
static __be32
nfsd4_decode_noop(struct nfsd4_compoundargs *argp, void *p)
{
@@ -1346,7 +1356,7 @@ static nfsd4_dec nfsd41_dec_ops[] = {
[OP_TEST_STATEID] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_WANT_DELEGATION] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_DESTROY_CLIENTID] = (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_RECLAIM_COMPLETE] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_RECLAIM_COMPLETE] = (nfsd4_dec)nfsd4_decode_reclaim_complete,
};
struct nfsd4_minorversion_ops {
@@ -1900,7 +1910,7 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
if (bmval0 & FATTR4_WORD0_LEASE_TIME) {
if ((buflen -= 4) < 0)
goto out_resource;
- WRITE32(NFSD_LEASE_TIME);
+ WRITE32(nfsd4_lease);
}
if (bmval0 & FATTR4_WORD0_RDATTR_ERROR) {
if ((buflen -= 4) < 0)
@@ -3307,11 +3317,14 @@ nfs4svc_encode_compoundres(struct svc_rqst *rqstp, __be32 *p, struct nfsd4_compo
iov = &rqstp->rq_res.head[0];
iov->iov_len = ((char*)resp->p) - (char*)iov->iov_base;
BUG_ON(iov->iov_len > PAGE_SIZE);
- if (nfsd4_has_session(cs) && cs->status != nfserr_replay_cache) {
- nfsd4_store_cache_entry(resp);
- dprintk("%s: SET SLOT STATE TO AVAILABLE\n", __func__);
- resp->cstate.slot->sl_inuse = false;
- nfsd4_put_session(resp->cstate.session);
+ if (nfsd4_has_session(cs)) {
+ if (cs->status != nfserr_replay_cache) {
+ nfsd4_store_cache_entry(resp);
+ dprintk("%s: SET SLOT STATE TO AVAILABLE\n", __func__);
+ cs->slot->sl_inuse = false;
+ }
+ /* Renew the clientid on success and on replay */
+ release_session_client(cs->session);
}
return 1;
}
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index e359107..bc3194e 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -46,6 +46,7 @@ enum {
*/
#ifdef CONFIG_NFSD_V4
NFSD_Leasetime,
+ NFSD_Gracetime,
NFSD_RecoveryDir,
#endif
};
@@ -70,6 +71,7 @@ static ssize_t write_ports(struct file *file, char *buf, size_t size);
static ssize_t write_maxblksize(struct file *file, char *buf, size_t size);
#ifdef CONFIG_NFSD_V4
static ssize_t write_leasetime(struct file *file, char *buf, size_t size);
+static ssize_t write_gracetime(struct file *file, char *buf, size_t size);
static ssize_t write_recoverydir(struct file *file, char *buf, size_t size);
#endif
@@ -91,6 +93,7 @@ static ssize_t (*write_op[])(struct file *, char *, size_t) = {
[NFSD_MaxBlkSize] = write_maxblksize,
#ifdef CONFIG_NFSD_V4
[NFSD_Leasetime] = write_leasetime,
+ [NFSD_Gracetime] = write_gracetime,
[NFSD_RecoveryDir] = write_recoverydir,
#endif
};
@@ -1204,29 +1207,45 @@ static ssize_t write_maxblksize(struct file *file, char *buf, size_t size)
}
#ifdef CONFIG_NFSD_V4
-extern time_t nfs4_leasetime(void);
-
-static ssize_t __write_leasetime(struct file *file, char *buf, size_t size)
+static ssize_t __nfsd4_write_time(struct file *file, char *buf, size_t size, time_t *time)
{
- /* if size > 10 seconds, call
- * nfs4_reset_lease() then write out the new lease (seconds) as reply
- */
char *mesg = buf;
- int rv, lease;
+ int rv, i;
if (size > 0) {
if (nfsd_serv)
return -EBUSY;
- rv = get_int(&mesg, &lease);
+ rv = get_int(&mesg, &i);
if (rv)
return rv;
- if (lease < 10 || lease > 3600)
+ /*
+ * Some sanity checking. We don't have a reason for
+ * these particular numbers, but problems with the
+ * extremes are:
+ * - Too short: the briefest network outage may
+ * cause clients to lose all their locks. Also,
+ * the frequent polling may be wasteful.
+ * - Too long: do you really want reboot recovery
+ * to take more than an hour? Or to make other
+ * clients wait an hour before being able to
+ * revoke a dead client's locks?
+ */
+ if (i < 10 || i > 3600)
return -EINVAL;
- nfs4_reset_lease(lease);
+ *time = i;
}
- return scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%ld\n",
- nfs4_lease_time());
+ return scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%ld\n", *time);
+}
+
+static ssize_t nfsd4_write_time(struct file *file, char *buf, size_t size, time_t *time)
+{
+ ssize_t rv;
+
+ mutex_lock(&nfsd_mutex);
+ rv = __nfsd4_write_time(file, buf, size, time);
+ mutex_unlock(&nfsd_mutex);
+ return rv;
}
/**
@@ -1252,12 +1271,22 @@ static ssize_t __write_leasetime(struct file *file, char *buf, size_t size)
*/
static ssize_t write_leasetime(struct file *file, char *buf, size_t size)
{
- ssize_t rv;
+ return nfsd4_write_time(file, buf, size, &nfsd4_lease);
+}
- mutex_lock(&nfsd_mutex);
- rv = __write_leasetime(file, buf, size);
- mutex_unlock(&nfsd_mutex);
- return rv;
+/**
+ * write_gracetime - Set or report current NFSv4 grace period time
+ *
+ * As above, but sets the time of the NFSv4 grace period.
+ *
+ * Note this should never be set to less than the *previous*
+ * lease-period time, but we don't try to enforce this. (In the common
+ * case (a new boot), we don't know what the previous lease time was
+ * anyway.)
+ */
+static ssize_t write_gracetime(struct file *file, char *buf, size_t size)
+{
+ return nfsd4_write_time(file, buf, size, &nfsd4_grace);
}
extern char *nfs4_recoverydir(void);
@@ -1351,6 +1380,7 @@ static int nfsd_fill_super(struct super_block * sb, void * data, int silent)
[NFSD_MaxBlkSize] = {"max_block_size", &transaction_ops, S_IWUSR|S_IRUGO},
#ifdef CONFIG_NFSD_V4
[NFSD_Leasetime] = {"nfsv4leasetime", &transaction_ops, S_IWUSR|S_IRUSR},
+ [NFSD_Gracetime] = {"nfsv4gracetime", &transaction_ops, S_IWUSR|S_IRUSR},
[NFSD_RecoveryDir] = {"nfsv4recoverydir", &transaction_ops, S_IWUSR|S_IRUSR},
#endif
/* last one */ {""}
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index e942a1a..7237776 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -82,7 +82,6 @@ int nfs4_state_init(void);
void nfsd4_free_slabs(void);
int nfs4_state_start(void);
void nfs4_state_shutdown(void);
-time_t nfs4_lease_time(void);
void nfs4_reset_lease(time_t leasetime);
int nfs4_reset_recoverydir(char *recdir);
#else
@@ -90,7 +89,6 @@ static inline int nfs4_state_init(void) { return 0; }
static inline void nfsd4_free_slabs(void) { }
static inline int nfs4_state_start(void) { return 0; }
static inline void nfs4_state_shutdown(void) { }
-static inline time_t nfs4_lease_time(void) { return 0; }
static inline void nfs4_reset_lease(time_t leasetime) { }
static inline int nfs4_reset_recoverydir(char *recdir) { return 0; }
#endif
@@ -229,6 +227,9 @@ extern struct timeval nfssvc_boot;
#ifdef CONFIG_NFSD_V4
+extern time_t nfsd4_lease;
+extern time_t nfsd4_grace;
+
/* before processing a COMPOUND operation, we have to check that there
* is enough space in the buffer for XDR encode to succeed. otherwise,
* we might process an operation with side effects, and be unable to
@@ -247,7 +248,6 @@ extern struct timeval nfssvc_boot;
#define COMPOUND_SLACK_SPACE 140 /* OP_GETFH */
#define COMPOUND_ERR_SLACK_SPACE 12 /* OP_SETATTR */
-#define NFSD_LEASE_TIME (nfs4_lease_time())
#define NFSD_LAUNDROMAT_MINTIMEOUT 10 /* seconds */
/*
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 171699e..06b2a26 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -120,7 +120,7 @@ u32 nfsd_supported_minorversion;
int nfsd_vers(int vers, enum vers_op change)
{
if (vers < NFSD_MINVERS || vers >= NFSD_NRVERS)
- return -1;
+ return 0;
switch(change) {
case NFSD_SET:
nfsd_versions[vers] = nfsd_version[vers];
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index fefeae2..006c842 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -70,6 +70,16 @@ struct nfsd4_cb_sequence {
struct nfs4_client *cbs_clp;
};
+struct nfs4_rpc_args {
+ void *args_op;
+ struct nfsd4_cb_sequence args_seq;
+};
+
+struct nfsd4_callback {
+ struct nfs4_rpc_args cb_args;
+ struct work_struct cb_work;
+};
+
struct nfs4_delegation {
struct list_head dl_perfile;
struct list_head dl_perclnt;
@@ -86,6 +96,7 @@ struct nfs4_delegation {
stateid_t dl_stateid;
struct knfsd_fh dl_fh;
int dl_retries;
+ struct nfsd4_callback dl_recall;
};
/* client delegation callback info */
@@ -96,9 +107,7 @@ struct nfs4_cb_conn {
u32 cb_prog;
u32 cb_minorversion;
u32 cb_ident; /* minorversion 0 only */
- /* RPC client info */
- atomic_t cb_set; /* successful CB_NULL call */
- struct rpc_clnt * cb_client;
+ struct svc_xprt *cb_xprt; /* minorversion 1 only */
};
/* Maximum number of slots per session. 160 is useful for long haul TCP */
@@ -157,7 +166,7 @@ struct nfsd4_session {
struct list_head se_hash; /* hash by sessionid */
struct list_head se_perclnt;
u32 se_flags;
- struct nfs4_client *se_client; /* for expire_client */
+ struct nfs4_client *se_client;
struct nfs4_sessionid se_sessionid;
struct nfsd4_channel_attrs se_fchannel;
struct nfsd4_channel_attrs se_bchannel;
@@ -212,25 +221,41 @@ struct nfs4_client {
struct svc_cred cl_cred; /* setclientid principal */
clientid_t cl_clientid; /* generated by server */
nfs4_verifier cl_confirm; /* generated by server */
- struct nfs4_cb_conn cl_cb_conn; /* callback info */
- atomic_t cl_count; /* ref count */
u32 cl_firststate; /* recovery dir creation */
+ /* for v4.0 and v4.1 callbacks: */
+ struct nfs4_cb_conn cl_cb_conn;
+ struct rpc_clnt *cl_cb_client;
+ atomic_t cl_cb_set;
+
/* for nfs41 */
struct list_head cl_sessions;
struct nfsd4_clid_slot cl_cs_slot; /* create_session slot */
u32 cl_exchange_flags;
struct nfs4_sessionid cl_sessionid;
+ /* number of rpc's in progress over an associated session: */
+ atomic_t cl_refcount;
/* for nfs41 callbacks */
/* We currently support a single back channel with a single slot */
unsigned long cl_cb_slot_busy;
u32 cl_cb_seq_nr;
- struct svc_xprt *cl_cb_xprt; /* 4.1 callback transport */
struct rpc_wait_queue cl_cb_waitq; /* backchannel callers may */
/* wait here for slots */
};
+static inline void
+mark_client_expired(struct nfs4_client *clp)
+{
+ clp->cl_time = 0;
+}
+
+static inline bool
+is_client_expired(struct nfs4_client *clp)
+{
+ return clp->cl_time == 0;
+}
+
/* struct nfs4_client_reset
* one per old client. Populates reset_str_hashtbl. Filled from conf_id_hashtbl
* upon lease reset, or from upcall to state_daemon (to read in state
@@ -377,11 +402,14 @@ extern void nfs4_lock_state(void);
extern void nfs4_unlock_state(void);
extern int nfs4_in_grace(void);
extern __be32 nfs4_check_open_reclaim(clientid_t *clid);
-extern void put_nfs4_client(struct nfs4_client *clp);
extern void nfs4_free_stateowner(struct kref *kref);
extern int set_callback_cred(void);
-extern void nfsd4_probe_callback(struct nfs4_client *clp);
+extern void nfsd4_probe_callback(struct nfs4_client *clp, struct nfs4_cb_conn *);
+extern void nfsd4_do_callback_rpc(struct work_struct *);
extern void nfsd4_cb_recall(struct nfs4_delegation *dp);
+extern int nfsd4_create_callback_queue(void);
+extern void nfsd4_destroy_callback_queue(void);
+extern void nfsd4_set_callback_client(struct nfs4_client *, struct rpc_clnt *);
extern void nfs4_put_delegation(struct nfs4_delegation *dp);
extern __be32 nfs4_make_rec_clidname(char *clidname, struct xdr_netobj *clname);
extern void nfsd4_init_recdir(char *recdir_name);
@@ -392,6 +420,7 @@ extern int nfs4_has_reclaimed_state(const char *name, bool use_exchange_id);
extern void nfsd4_recdir_purge_old(void);
extern int nfsd4_create_clid_dir(struct nfs4_client *clp);
extern void nfsd4_remove_clid_dir(struct nfs4_client *clp);
+extern void release_session_client(struct nfsd4_session *);
static inline void
nfs4_put_stateowner(struct nfs4_stateowner *so)
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 6dd5f19..23c06f7 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -724,7 +724,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
struct inode *inode;
int flags = O_RDONLY|O_LARGEFILE;
__be32 err;
- int host_err;
+ int host_err = 0;
validate_process_creds();
@@ -761,7 +761,8 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
* Check to see if there are any leases on this file.
* This may block while leases are broken.
*/
- host_err = break_lease(inode, O_NONBLOCK | ((access & NFSD_MAY_WRITE) ? O_WRONLY : 0));
+ if (!(access & NFSD_MAY_NOT_BREAK_LEASE))
+ host_err = break_lease(inode, O_NONBLOCK | ((access & NFSD_MAY_WRITE) ? O_WRONLY : 0));
if (host_err == -EWOULDBLOCK)
host_err = -ETIMEDOUT;
if (host_err) /* NOMEM or WOULDBLOCK */
@@ -1169,7 +1170,8 @@ nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp,
goto out;
}
- err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_WRITE, &file);
+ err = nfsd_open(rqstp, fhp, S_IFREG,
+ NFSD_MAY_WRITE|NFSD_MAY_NOT_BREAK_LEASE, &file);
if (err)
goto out;
if (EX_ISSYNC(fhp->fh_export)) {
diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
index 4b1de0a..217a62c 100644
--- a/fs/nfsd/vfs.h
+++ b/fs/nfsd/vfs.h
@@ -20,6 +20,7 @@
#define NFSD_MAY_OWNER_OVERRIDE 64
#define NFSD_MAY_LOCAL_ACCESS 128 /* IRIX doing local access check on device special file*/
#define NFSD_MAY_BYPASS_GSS_ON_ROOT 256
+#define NFSD_MAY_NOT_BREAK_LEASE 512
#define NFSD_MAY_CREATE (NFSD_MAY_EXEC|NFSD_MAY_WRITE)
#define NFSD_MAY_REMOVE (NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC)
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index efa3377..4d476ff 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -381,6 +381,10 @@ struct nfsd4_destroy_session {
struct nfs4_sessionid sessionid;
};
+struct nfsd4_reclaim_complete {
+ u32 rca_one_fs;
+};
+
struct nfsd4_op {
int opnum;
__be32 status;
@@ -421,6 +425,7 @@ struct nfsd4_op {
struct nfsd4_create_session create_session;
struct nfsd4_destroy_session destroy_session;
struct nfsd4_sequence sequence;
+ struct nfsd4_reclaim_complete reclaim_complete;
} u;
struct nfs4_replay * replay;
};
@@ -513,9 +518,8 @@ extern void nfsd4_store_cache_entry(struct nfsd4_compoundres *resp);
extern __be32 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
struct nfsd4_sequence *seq);
extern __be32 nfsd4_exchange_id(struct svc_rqst *rqstp,
- struct nfsd4_compound_state *,
-struct nfsd4_exchange_id *);
- extern __be32 nfsd4_create_session(struct svc_rqst *,
+ struct nfsd4_compound_state *, struct nfsd4_exchange_id *);
+extern __be32 nfsd4_create_session(struct svc_rqst *,
struct nfsd4_compound_state *,
struct nfsd4_create_session *);
extern __be32 nfsd4_sequence(struct svc_rqst *,
@@ -524,6 +528,7 @@ extern __be32 nfsd4_sequence(struct svc_rqst *,
extern __be32 nfsd4_destroy_session(struct svc_rqst *,
struct nfsd4_compound_state *,
struct nfsd4_destroy_session *);
+__be32 nfsd4_reclaim_complete(struct svc_rqst *, struct nfsd4_compound_state *, struct nfsd4_reclaim_complete *);
extern __be32 nfsd4_process_open1(struct nfsd4_compound_state *,
struct nfsd4_open *open);
extern __be32 nfsd4_process_open2(struct svc_rqst *rqstp,
diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c
index 7cfb87e..d7fd696 100644
--- a/fs/nilfs2/alloc.c
+++ b/fs/nilfs2/alloc.c
@@ -31,6 +31,11 @@
#include "alloc.h"
+/**
+ * nilfs_palloc_groups_per_desc_block - get the number of groups that a group
+ * descriptor block can maintain
+ * @inode: inode of metadata file using this allocator
+ */
static inline unsigned long
nilfs_palloc_groups_per_desc_block(const struct inode *inode)
{
@@ -38,12 +43,21 @@ nilfs_palloc_groups_per_desc_block(const struct inode *inode)
sizeof(struct nilfs_palloc_group_desc);
}
+/**
+ * nilfs_palloc_groups_count - get maximum number of groups
+ * @inode: inode of metadata file using this allocator
+ */
static inline unsigned long
nilfs_palloc_groups_count(const struct inode *inode)
{
return 1UL << (BITS_PER_LONG - (inode->i_blkbits + 3 /* log2(8) */));
}
+/**
+ * nilfs_palloc_init_blockgroup - initialize private variables for allocator
+ * @inode: inode of metadata file using this allocator
+ * @entry_size: size of the persistent object
+ */
int nilfs_palloc_init_blockgroup(struct inode *inode, unsigned entry_size)
{
struct nilfs_mdt_info *mi = NILFS_MDT(inode);
@@ -69,6 +83,12 @@ int nilfs_palloc_init_blockgroup(struct inode *inode, unsigned entry_size)
return 0;
}
+/**
+ * nilfs_palloc_group - get group number and offset from an entry number
+ * @inode: inode of metadata file using this allocator
+ * @nr: serial number of the entry (e.g. inode number)
+ * @offset: pointer to store offset number in the group
+ */
static unsigned long nilfs_palloc_group(const struct inode *inode, __u64 nr,
unsigned long *offset)
{
@@ -78,6 +98,14 @@ static unsigned long nilfs_palloc_group(const struct inode *inode, __u64 nr,
return group;
}
+/**
+ * nilfs_palloc_desc_blkoff - get block offset of a group descriptor block
+ * @inode: inode of metadata file using this allocator
+ * @group: group number
+ *
+ * nilfs_palloc_desc_blkoff() returns block offset of the descriptor
+ * block which contains a descriptor of the specified group.
+ */
static unsigned long
nilfs_palloc_desc_blkoff(const struct inode *inode, unsigned long group)
{
@@ -86,6 +114,14 @@ nilfs_palloc_desc_blkoff(const struct inode *inode, unsigned long group)
return desc_block * NILFS_MDT(inode)->mi_blocks_per_desc_block;
}
+/**
+ * nilfs_palloc_bitmap_blkoff - get block offset of a bitmap block
+ * @inode: inode of metadata file using this allocator
+ * @group: group number
+ *
+ * nilfs_palloc_bitmap_blkoff() returns block offset of the bitmap
+ * block used to allocate/deallocate entries in the specified group.
+ */
static unsigned long
nilfs_palloc_bitmap_blkoff(const struct inode *inode, unsigned long group)
{
@@ -95,6 +131,12 @@ nilfs_palloc_bitmap_blkoff(const struct inode *inode, unsigned long group)
desc_offset * NILFS_MDT(inode)->mi_blocks_per_group;
}
+/**
+ * nilfs_palloc_group_desc_nfrees - get the number of free entries in a group
+ * @inode: inode of metadata file using this allocator
+ * @group: group number
+ * @desc: pointer to descriptor structure for the group
+ */
static unsigned long
nilfs_palloc_group_desc_nfrees(struct inode *inode, unsigned long group,
const struct nilfs_palloc_group_desc *desc)
@@ -107,6 +149,13 @@ nilfs_palloc_group_desc_nfrees(struct inode *inode, unsigned long group,
return nfree;
}
+/**
+ * nilfs_palloc_group_desc_add_entries - adjust count of free entries
+ * @inode: inode of metadata file using this allocator
+ * @group: group number
+ * @desc: pointer to descriptor structure for the group
+ * @n: delta to be added
+ */
static void
nilfs_palloc_group_desc_add_entries(struct inode *inode,
unsigned long group,
@@ -118,6 +167,11 @@ nilfs_palloc_group_desc_add_entries(struct inode *inode,
spin_unlock(nilfs_mdt_bgl_lock(inode, group));
}
+/**
+ * nilfs_palloc_entry_blkoff - get block offset of an entry block
+ * @inode: inode of metadata file using this allocator
+ * @nr: serial number of the entry (e.g. inode number)
+ */
static unsigned long
nilfs_palloc_entry_blkoff(const struct inode *inode, __u64 nr)
{
@@ -129,6 +183,12 @@ nilfs_palloc_entry_blkoff(const struct inode *inode, __u64 nr)
group_offset / NILFS_MDT(inode)->mi_entries_per_block;
}
+/**
+ * nilfs_palloc_desc_block_init - initialize buffer of a group descriptor block
+ * @inode: inode of metadata file
+ * @bh: buffer head of the buffer to be initialized
+ * @kaddr: kernel address mapped for the page including the buffer
+ */
static void nilfs_palloc_desc_block_init(struct inode *inode,
struct buffer_head *bh, void *kaddr)
{
@@ -179,6 +239,13 @@ static int nilfs_palloc_get_block(struct inode *inode, unsigned long blkoff,
return ret;
}
+/**
+ * nilfs_palloc_get_desc_block - get buffer head of a group descriptor block
+ * @inode: inode of metadata file using this allocator
+ * @group: group number
+ * @create: create flag
+ * @bhp: pointer to store the resultant buffer head
+ */
static int nilfs_palloc_get_desc_block(struct inode *inode,
unsigned long group,
int create, struct buffer_head **bhp)
@@ -191,6 +258,13 @@ static int nilfs_palloc_get_desc_block(struct inode *inode,
bhp, &cache->prev_desc, &cache->lock);
}
+/**
+ * nilfs_palloc_get_bitmap_block - get buffer head of a bitmap block
+ * @inode: inode of metadata file using this allocator
+ * @group: group number
+ * @create: create flag
+ * @bhp: pointer to store the resultant buffer head
+ */
static int nilfs_palloc_get_bitmap_block(struct inode *inode,
unsigned long group,
int create, struct buffer_head **bhp)
@@ -203,6 +277,13 @@ static int nilfs_palloc_get_bitmap_block(struct inode *inode,
&cache->prev_bitmap, &cache->lock);
}
+/**
+ * nilfs_palloc_get_entry_block - get buffer head of an entry block
+ * @inode: inode of metadata file using this allocator
+ * @nr: serial number of the entry (e.g. inode number)
+ * @create: create flag
+ * @bhp: pointer to store the resultant buffer head
+ */
int nilfs_palloc_get_entry_block(struct inode *inode, __u64 nr,
int create, struct buffer_head **bhp)
{
@@ -214,6 +295,13 @@ int nilfs_palloc_get_entry_block(struct inode *inode, __u64 nr,
&cache->prev_entry, &cache->lock);
}
+/**
+ * nilfs_palloc_block_get_group_desc - get kernel address of a group descriptor
+ * @inode: inode of metadata file using this allocator
+ * @group: group number
+ * @bh: buffer head of the buffer storing the group descriptor block
+ * @kaddr: kernel address mapped for the page including the buffer
+ */
static struct nilfs_palloc_group_desc *
nilfs_palloc_block_get_group_desc(const struct inode *inode,
unsigned long group,
@@ -223,6 +311,13 @@ nilfs_palloc_block_get_group_desc(const struct inode *inode,
group % nilfs_palloc_groups_per_desc_block(inode);
}
+/**
+ * nilfs_palloc_block_get_entry - get kernel address of an entry
+ * @inode: inode of metadata file using this allocator
+ * @nr: serial number of the entry (e.g. inode number)
+ * @bh: buffer head of the buffer storing the entry block
+ * @kaddr: kernel address mapped for the page including the buffer
+ */
void *nilfs_palloc_block_get_entry(const struct inode *inode, __u64 nr,
const struct buffer_head *bh, void *kaddr)
{
@@ -235,11 +330,19 @@ void *nilfs_palloc_block_get_entry(const struct inode *inode, __u64 nr,
entry_offset * NILFS_MDT(inode)->mi_entry_size;
}
+/**
+ * nilfs_palloc_find_available_slot - find available slot in a group
+ * @inode: inode of metadata file using this allocator
+ * @group: group number
+ * @target: offset number of an entry in the group (start point)
+ * @bitmap: bitmap of the group
+ * @bsize: size in bits
+ */
static int nilfs_palloc_find_available_slot(struct inode *inode,
unsigned long group,
unsigned long target,
unsigned char *bitmap,
- int bsize) /* size in bits */
+ int bsize)
{
int curr, pos, end, i;
@@ -277,6 +380,13 @@ static int nilfs_palloc_find_available_slot(struct inode *inode,
return -ENOSPC;
}
+/**
+ * nilfs_palloc_rest_groups_in_desc_block - get the remaining number of groups
+ * in a group descriptor block
+ * @inode: inode of metadata file using this allocator
+ * @curr: current group number
+ * @max: maximum number of groups
+ */
static unsigned long
nilfs_palloc_rest_groups_in_desc_block(const struct inode *inode,
unsigned long curr, unsigned long max)
@@ -287,6 +397,11 @@ nilfs_palloc_rest_groups_in_desc_block(const struct inode *inode,
max - curr + 1);
}
+/**
+ * nilfs_palloc_prepare_alloc_entry - prepare to allocate a persistent object
+ * @inode: inode of metadata file using this allocator
+ * @req: nilfs_palloc_req structure exchanged for the allocation
+ */
int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
struct nilfs_palloc_req *req)
{
@@ -366,6 +481,11 @@ int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
return ret;
}
+/**
+ * nilfs_palloc_commit_alloc_entry - finish allocation of a persistent object
+ * @inode: inode of metadata file using this allocator
+ * @req: nilfs_palloc_req structure exchanged for the allocation
+ */
void nilfs_palloc_commit_alloc_entry(struct inode *inode,
struct nilfs_palloc_req *req)
{
@@ -377,6 +497,11 @@ void nilfs_palloc_commit_alloc_entry(struct inode *inode,
brelse(req->pr_desc_bh);
}
+/**
+ * nilfs_palloc_commit_free_entry - finish deallocating a persistent object
+ * @inode: inode of metadata file using this allocator
+ * @req: nilfs_palloc_req structure exchanged for the removal
+ */
void nilfs_palloc_commit_free_entry(struct inode *inode,
struct nilfs_palloc_req *req)
{
@@ -410,6 +535,11 @@ void nilfs_palloc_commit_free_entry(struct inode *inode,
brelse(req->pr_desc_bh);
}
+/**
+ * nilfs_palloc_abort_alloc_entry - cancel allocation of a persistent object
+ * @inode: inode of metadata file using this allocator
+ * @req: nilfs_palloc_req structure exchanged for the allocation
+ */
void nilfs_palloc_abort_alloc_entry(struct inode *inode,
struct nilfs_palloc_req *req)
{
@@ -442,6 +572,11 @@ void nilfs_palloc_abort_alloc_entry(struct inode *inode,
req->pr_desc_bh = NULL;
}
+/**
+ * nilfs_palloc_prepare_free_entry - prepare to deallocate a persistent object
+ * @inode: inode of metadata file using this allocator
+ * @req: nilfs_palloc_req structure exchanged for the removal
+ */
int nilfs_palloc_prepare_free_entry(struct inode *inode,
struct nilfs_palloc_req *req)
{
@@ -464,6 +599,11 @@ int nilfs_palloc_prepare_free_entry(struct inode *inode,
return 0;
}
+/**
+ * nilfs_palloc_abort_free_entry - cancel deallocating a persistent object
+ * @inode: inode of metadata file using this allocator
+ * @req: nilfs_palloc_req structure exchanged for the removal
+ */
void nilfs_palloc_abort_free_entry(struct inode *inode,
struct nilfs_palloc_req *req)
{
@@ -475,6 +615,12 @@ void nilfs_palloc_abort_free_entry(struct inode *inode,
req->pr_desc_bh = NULL;
}
+/**
+ * nilfs_palloc_group_is_in - judge if an entry is in a group
+ * @inode: inode of metadata file using this allocator
+ * @group: group number
+ * @nr: serial number of the entry (e.g. inode number)
+ */
static int
nilfs_palloc_group_is_in(struct inode *inode, unsigned long group, __u64 nr)
{
@@ -485,6 +631,12 @@ nilfs_palloc_group_is_in(struct inode *inode, unsigned long group, __u64 nr)
return (nr >= first) && (nr <= last);
}
+/**
+ * nilfs_palloc_freev - deallocate a set of persistent objects
+ * @inode: inode of metadata file using this allocator
+ * @entry_nrs: array of entry numbers to be deallocated
+ * @nitems: number of entries stored in @entry_nrs
+ */
int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
{
struct buffer_head *desc_bh, *bitmap_bh;
diff --git a/fs/nilfs2/alloc.h b/fs/nilfs2/alloc.h
index 5cccf87..9af34a7 100644
--- a/fs/nilfs2/alloc.h
+++ b/fs/nilfs2/alloc.h
@@ -29,6 +29,13 @@
#include <linux/buffer_head.h>
#include <linux/fs.h>
+/**
+ * nilfs_palloc_entries_per_group - get the number of entries per group
+ * @inode: inode of metadata file using this allocator
+ *
+ * The number of entries per group is defined by the number of bits
+ * that a bitmap block can maintain.
+ */
static inline unsigned long
nilfs_palloc_entries_per_group(const struct inode *inode)
{
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index 76c38e3..b27a342 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -31,63 +31,16 @@
#include "alloc.h"
#include "dat.h"
-/**
- * struct nilfs_btree_path - A path on which B-tree operations are executed
- * @bp_bh: buffer head of node block
- * @bp_sib_bh: buffer head of sibling node block
- * @bp_index: index of child node
- * @bp_oldreq: ptr end request for old ptr
- * @bp_newreq: ptr alloc request for new ptr
- * @bp_op: rebalance operation
- */
-struct nilfs_btree_path {
- struct buffer_head *bp_bh;
- struct buffer_head *bp_sib_bh;
- int bp_index;
- union nilfs_bmap_ptr_req bp_oldreq;
- union nilfs_bmap_ptr_req bp_newreq;
- struct nilfs_btnode_chkey_ctxt bp_ctxt;
- void (*bp_op)(struct nilfs_btree *, struct nilfs_btree_path *,
- int, __u64 *, __u64 *);
-};
-
-/*
- * B-tree path operations
- */
-
-static struct kmem_cache *nilfs_btree_path_cache;
-
-int __init nilfs_btree_path_cache_init(void)
-{
- nilfs_btree_path_cache =
- kmem_cache_create("nilfs2_btree_path_cache",
- sizeof(struct nilfs_btree_path) *
- NILFS_BTREE_LEVEL_MAX, 0, 0, NULL);
- return (nilfs_btree_path_cache != NULL) ? 0 : -ENOMEM;
-}
-
-void nilfs_btree_path_cache_destroy(void)
-{
- kmem_cache_destroy(nilfs_btree_path_cache);
-}
-
-static inline struct nilfs_btree_path *nilfs_btree_alloc_path(void)
-{
- return kmem_cache_alloc(nilfs_btree_path_cache, GFP_NOFS);
-}
-
-static inline void nilfs_btree_free_path(struct nilfs_btree_path *path)
+static struct nilfs_btree_path *nilfs_btree_alloc_path(void)
{
- kmem_cache_free(nilfs_btree_path_cache, path);
-}
+ struct nilfs_btree_path *path;
+ int level = NILFS_BTREE_LEVEL_DATA;
-static void nilfs_btree_init_path(struct nilfs_btree_path *path)
-{
- int level;
+ path = kmem_cache_alloc(nilfs_btree_path_cache, GFP_NOFS);
+ if (path == NULL)
+ goto out;
- for (level = NILFS_BTREE_LEVEL_DATA;
- level < NILFS_BTREE_LEVEL_MAX;
- level++) {
+ for (; level < NILFS_BTREE_LEVEL_MAX; level++) {
path[level].bp_bh = NULL;
path[level].bp_sib_bh = NULL;
path[level].bp_index = 0;
@@ -95,15 +48,19 @@ static void nilfs_btree_init_path(struct nilfs_btree_path *path)
path[level].bp_newreq.bpr_ptr = NILFS_BMAP_INVALID_PTR;
path[level].bp_op = NULL;
}
+
+out:
+ return path;
}
-static void nilfs_btree_release_path(struct nilfs_btree_path *path)
+static void nilfs_btree_free_path(struct nilfs_btree_path *path)
{
- int level;
+ int level = NILFS_BTREE_LEVEL_DATA;
- for (level = NILFS_BTREE_LEVEL_DATA; level < NILFS_BTREE_LEVEL_MAX;
- level++)
+ for (; level < NILFS_BTREE_LEVEL_MAX; level++)
brelse(path[level].bp_bh);
+
+ kmem_cache_free(nilfs_btree_path_cache, path);
}
/*
@@ -566,14 +523,12 @@ static int nilfs_btree_lookup(const struct nilfs_bmap *bmap,
path = nilfs_btree_alloc_path();
if (path == NULL)
return -ENOMEM;
- nilfs_btree_init_path(path);
ret = nilfs_btree_do_lookup(btree, path, key, &ptr, level);
if (ptrp != NULL)
*ptrp = ptr;
- nilfs_btree_release_path(path);
nilfs_btree_free_path(path);
return ret;
@@ -594,7 +549,7 @@ static int nilfs_btree_lookup_contig(const struct nilfs_bmap *bmap,
path = nilfs_btree_alloc_path();
if (path == NULL)
return -ENOMEM;
- nilfs_btree_init_path(path);
+
ret = nilfs_btree_do_lookup(btree, path, key, &ptr, level);
if (ret < 0)
goto out;
@@ -655,7 +610,6 @@ static int nilfs_btree_lookup_contig(const struct nilfs_bmap *bmap,
*ptrp = ptr;
ret = cnt;
out:
- nilfs_btree_release_path(path);
nilfs_btree_free_path(path);
return ret;
}
@@ -1123,7 +1077,6 @@ static int nilfs_btree_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr)
path = nilfs_btree_alloc_path();
if (path == NULL)
return -ENOMEM;
- nilfs_btree_init_path(path);
ret = nilfs_btree_do_lookup(btree, path, key, NULL,
NILFS_BTREE_LEVEL_NODE_MIN);
@@ -1140,7 +1093,6 @@ static int nilfs_btree_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr)
nilfs_bmap_add_blocks(bmap, stats.bs_nblocks);
out:
- nilfs_btree_release_path(path);
nilfs_btree_free_path(path);
return ret;
}
@@ -1456,7 +1408,7 @@ static int nilfs_btree_delete(struct nilfs_bmap *bmap, __u64 key)
path = nilfs_btree_alloc_path();
if (path == NULL)
return -ENOMEM;
- nilfs_btree_init_path(path);
+
ret = nilfs_btree_do_lookup(btree, path, key, NULL,
NILFS_BTREE_LEVEL_NODE_MIN);
if (ret < 0)
@@ -1473,7 +1425,6 @@ static int nilfs_btree_delete(struct nilfs_bmap *bmap, __u64 key)
nilfs_bmap_sub_blocks(bmap, stats.bs_nblocks);
out:
- nilfs_btree_release_path(path);
nilfs_btree_free_path(path);
return ret;
}
@@ -1488,11 +1439,9 @@ static int nilfs_btree_last_key(const struct nilfs_bmap *bmap, __u64 *keyp)
path = nilfs_btree_alloc_path();
if (path == NULL)
return -ENOMEM;
- nilfs_btree_init_path(path);
ret = nilfs_btree_do_lookup_last(btree, path, keyp, NULL);
- nilfs_btree_release_path(path);
nilfs_btree_free_path(path);
return ret;
@@ -1923,7 +1872,6 @@ static int nilfs_btree_propagate(const struct nilfs_bmap *bmap,
path = nilfs_btree_alloc_path();
if (path == NULL)
return -ENOMEM;
- nilfs_btree_init_path(path);
if (buffer_nilfs_node(bh)) {
node = (struct nilfs_btree_node *)bh->b_data;
@@ -1947,7 +1895,6 @@ static int nilfs_btree_propagate(const struct nilfs_bmap *bmap,
nilfs_btree_propagate_p(btree, path, level, bh);
out:
- nilfs_btree_release_path(path);
nilfs_btree_free_path(path);
return ret;
@@ -2108,7 +2055,6 @@ static int nilfs_btree_assign(struct nilfs_bmap *bmap,
path = nilfs_btree_alloc_path();
if (path == NULL)
return -ENOMEM;
- nilfs_btree_init_path(path);
if (buffer_nilfs_node(*bh)) {
node = (struct nilfs_btree_node *)(*bh)->b_data;
@@ -2130,7 +2076,6 @@ static int nilfs_btree_assign(struct nilfs_bmap *bmap,
nilfs_btree_assign_p(btree, path, level, bh, blocknr, binfo);
out:
- nilfs_btree_release_path(path);
nilfs_btree_free_path(path);
return ret;
@@ -2175,7 +2120,6 @@ static int nilfs_btree_mark(struct nilfs_bmap *bmap, __u64 key, int level)
path = nilfs_btree_alloc_path();
if (path == NULL)
return -ENOMEM;
- nilfs_btree_init_path(path);
ret = nilfs_btree_do_lookup(btree, path, key, &ptr, level + 1);
if (ret < 0) {
@@ -2195,7 +2139,6 @@ static int nilfs_btree_mark(struct nilfs_bmap *bmap, __u64 key, int level)
nilfs_bmap_set_dirty(&btree->bt_bmap);
out:
- nilfs_btree_release_path(path);
nilfs_btree_free_path(path);
return ret;
}
diff --git a/fs/nilfs2/btree.h b/fs/nilfs2/btree.h
index 4b82d84..af638d5 100644
--- a/fs/nilfs2/btree.h
+++ b/fs/nilfs2/btree.h
@@ -30,9 +30,6 @@
#include "btnode.h"
#include "bmap.h"
-struct nilfs_btree;
-struct nilfs_btree_path;
-
/**
* struct nilfs_btree - B-tree structure
* @bt_bmap: bmap base structure
@@ -41,6 +38,25 @@ struct nilfs_btree {
struct nilfs_bmap bt_bmap;
};
+/**
+ * struct nilfs_btree_path - A path on which B-tree operations are executed
+ * @bp_bh: buffer head of node block
+ * @bp_sib_bh: buffer head of sibling node block
+ * @bp_index: index of child node
+ * @bp_oldreq: ptr end request for old ptr
+ * @bp_newreq: ptr alloc request for new ptr
+ * @bp_op: rebalance operation
+ */
+struct nilfs_btree_path {
+ struct buffer_head *bp_bh;
+ struct buffer_head *bp_sib_bh;
+ int bp_index;
+ union nilfs_bmap_ptr_req bp_oldreq;
+ union nilfs_bmap_ptr_req bp_newreq;
+ struct nilfs_btnode_chkey_ctxt bp_ctxt;
+ void (*bp_op)(struct nilfs_btree *, struct nilfs_btree_path *,
+ int, __u64 *, __u64 *);
+};
#define NILFS_BTREE_ROOT_SIZE NILFS_BMAP_SIZE
#define NILFS_BTREE_ROOT_NCHILDREN_MAX \
@@ -57,6 +73,7 @@ struct nilfs_btree {
#define NILFS_BTREE_KEY_MIN ((__u64)0)
#define NILFS_BTREE_KEY_MAX (~(__u64)0)
+extern struct kmem_cache *nilfs_btree_path_cache;
int nilfs_btree_path_cache_init(void);
void nilfs_btree_path_cache_destroy(void);
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 0957b58..5e226d4 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -451,7 +451,7 @@ static int __nilfs_read_inode(struct super_block *sb, unsigned long ino,
inode->i_op = &nilfs_special_inode_operations;
init_special_inode(
inode, inode->i_mode,
- new_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
+ huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
}
nilfs_ifile_unmap_inode(sbi->s_ifile, ino, bh);
brelse(bh);
@@ -511,7 +511,7 @@ void nilfs_write_inode_common(struct inode *inode,
nilfs_bmap_write(ii->i_bmap, raw_inode);
else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
raw_inode->i_device_code =
- cpu_to_le64(new_encode_dev(inode->i_rdev));
+ cpu_to_le64(huge_encode_dev(inode->i_rdev));
/* When extending inode, nilfs->ns_inode_size should be checked
for substitutions of appended fields */
}
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
index ba43146..bae2a51 100644
--- a/fs/nilfs2/recovery.c
+++ b/fs/nilfs2/recovery.c
@@ -105,6 +105,8 @@ static void store_segsum_info(struct nilfs_segsum_info *ssi,
ssi->nsumblk = DIV_ROUND_UP(ssi->sumbytes, blocksize);
ssi->nfileblk = ssi->nblocks - ssi->nsumblk - !!NILFS_SEG_HAS_SR(ssi);
+
+ /* need to verify ->ss_bytes field if read ->ss_cno */
}
/**
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index 17851f7..2e6a272 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -40,35 +40,10 @@ struct nilfs_write_info {
sector_t blocknr;
};
-
static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
struct the_nilfs *nilfs);
static int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf);
-
-static struct kmem_cache *nilfs_segbuf_cachep;
-
-static void nilfs_segbuf_init_once(void *obj)
-{
- memset(obj, 0, sizeof(struct nilfs_segment_buffer));
-}
-
-int __init nilfs_init_segbuf_cache(void)
-{
- nilfs_segbuf_cachep =
- kmem_cache_create("nilfs2_segbuf_cache",
- sizeof(struct nilfs_segment_buffer),
- 0, SLAB_RECLAIM_ACCOUNT,
- nilfs_segbuf_init_once);
-
- return (nilfs_segbuf_cachep == NULL) ? -ENOMEM : 0;
-}
-
-void nilfs_destroy_segbuf_cache(void)
-{
- kmem_cache_destroy(nilfs_segbuf_cachep);
-}
-
struct nilfs_segment_buffer *nilfs_segbuf_new(struct super_block *sb)
{
struct nilfs_segment_buffer *segbuf;
@@ -81,6 +56,7 @@ struct nilfs_segment_buffer *nilfs_segbuf_new(struct super_block *sb)
INIT_LIST_HEAD(&segbuf->sb_list);
INIT_LIST_HEAD(&segbuf->sb_segsum_buffers);
INIT_LIST_HEAD(&segbuf->sb_payload_buffers);
+ segbuf->sb_super_root = NULL;
init_completion(&segbuf->sb_bio_event);
atomic_set(&segbuf->sb_err, 0);
@@ -158,7 +134,7 @@ int nilfs_segbuf_extend_payload(struct nilfs_segment_buffer *segbuf,
}
int nilfs_segbuf_reset(struct nilfs_segment_buffer *segbuf, unsigned flags,
- time_t ctime)
+ time_t ctime, __u64 cno)
{
int err;
@@ -171,6 +147,7 @@ int nilfs_segbuf_reset(struct nilfs_segment_buffer *segbuf, unsigned flags,
segbuf->sb_sum.sumbytes = sizeof(struct nilfs_segment_summary);
segbuf->sb_sum.nfinfo = segbuf->sb_sum.nfileblk = 0;
segbuf->sb_sum.ctime = ctime;
+ segbuf->sb_sum.cno = cno;
return 0;
}
@@ -196,13 +173,14 @@ void nilfs_segbuf_fill_in_segsum(struct nilfs_segment_buffer *segbuf)
raw_sum->ss_nfinfo = cpu_to_le32(segbuf->sb_sum.nfinfo);
raw_sum->ss_sumbytes = cpu_to_le32(segbuf->sb_sum.sumbytes);
raw_sum->ss_pad = 0;
+ raw_sum->ss_cno = cpu_to_le64(segbuf->sb_sum.cno);
}
/*
* CRC calculation routines
*/
-void nilfs_segbuf_fill_in_segsum_crc(struct nilfs_segment_buffer *segbuf,
- u32 seed)
+static void
+nilfs_segbuf_fill_in_segsum_crc(struct nilfs_segment_buffer *segbuf, u32 seed)
{
struct buffer_head *bh;
struct nilfs_segment_summary *raw_sum;
@@ -229,8 +207,8 @@ void nilfs_segbuf_fill_in_segsum_crc(struct nilfs_segment_buffer *segbuf,
raw_sum->ss_sumsum = cpu_to_le32(crc);
}
-void nilfs_segbuf_fill_in_data_crc(struct nilfs_segment_buffer *segbuf,
- u32 seed)
+static void nilfs_segbuf_fill_in_data_crc(struct nilfs_segment_buffer *segbuf,
+ u32 seed)
{
struct buffer_head *bh;
struct nilfs_segment_summary *raw_sum;
@@ -256,6 +234,20 @@ void nilfs_segbuf_fill_in_data_crc(struct nilfs_segment_buffer *segbuf,
raw_sum->ss_datasum = cpu_to_le32(crc);
}
+static void
+nilfs_segbuf_fill_in_super_root_crc(struct nilfs_segment_buffer *segbuf,
+ u32 seed)
+{
+ struct nilfs_super_root *raw_sr;
+ u32 crc;
+
+ raw_sr = (struct nilfs_super_root *)segbuf->sb_super_root->b_data;
+ crc = crc32_le(seed,
+ (unsigned char *)raw_sr + sizeof(raw_sr->sr_sum),
+ NILFS_SR_BYTES - sizeof(raw_sr->sr_sum));
+ raw_sr->sr_sum = cpu_to_le32(crc);
+}
+
static void nilfs_release_buffers(struct list_head *list)
{
struct buffer_head *bh, *n;
@@ -282,6 +274,7 @@ static void nilfs_segbuf_clear(struct nilfs_segment_buffer *segbuf)
{
nilfs_release_buffers(&segbuf->sb_segsum_buffers);
nilfs_release_buffers(&segbuf->sb_payload_buffers);
+ segbuf->sb_super_root = NULL;
}
/*
@@ -334,6 +327,23 @@ int nilfs_wait_on_logs(struct list_head *logs)
return ret;
}
+/**
+ * nilfs_add_checksums_on_logs - add checksums on the logs
+ * @logs: list of segment buffers storing target logs
+ * @seed: checksum seed value
+ */
+void nilfs_add_checksums_on_logs(struct list_head *logs, u32 seed)
+{
+ struct nilfs_segment_buffer *segbuf;
+
+ list_for_each_entry(segbuf, logs, sb_list) {
+ if (segbuf->sb_super_root)
+ nilfs_segbuf_fill_in_super_root_crc(segbuf, seed);
+ nilfs_segbuf_fill_in_segsum_crc(segbuf, seed);
+ nilfs_segbuf_fill_in_data_crc(segbuf, seed);
+ }
+}
+
/*
* BIO operations
*/
diff --git a/fs/nilfs2/segbuf.h b/fs/nilfs2/segbuf.h
index 94dfd35..fdf1c3b 100644
--- a/fs/nilfs2/segbuf.h
+++ b/fs/nilfs2/segbuf.h
@@ -37,6 +37,7 @@
* @sumbytes: Byte count of segment summary
* @nfileblk: Total number of file blocks
* @seg_seq: Segment sequence number
+ * @cno: Checkpoint number
* @ctime: Creation time
* @next: Block number of the next full segment
*/
@@ -48,6 +49,7 @@ struct nilfs_segsum_info {
unsigned long sumbytes;
unsigned long nfileblk;
u64 seg_seq;
+ __u64 cno;
time_t ctime;
sector_t next;
};
@@ -76,6 +78,7 @@ struct nilfs_segsum_info {
* @sb_rest_blocks: Number of residual blocks in the current segment
* @sb_segsum_buffers: List of buffers for segment summaries
* @sb_payload_buffers: List of buffers for segment payload
+ * @sb_super_root: Pointer to buffer storing a super root block (if exists)
* @sb_nbio: Number of flying bio requests
* @sb_err: I/O error status
* @sb_bio_event: Completion event of log writing
@@ -95,6 +98,7 @@ struct nilfs_segment_buffer {
/* Buffers */
struct list_head sb_segsum_buffers;
struct list_head sb_payload_buffers; /* including super root */
+ struct buffer_head *sb_super_root;
/* io status */
int sb_nbio;
@@ -121,6 +125,7 @@ struct nilfs_segment_buffer {
b_assoc_buffers))
#define NILFS_SEGBUF_BH_IS_LAST(bh, head) ((bh)->b_assoc_buffers.next == head)
+extern struct kmem_cache *nilfs_segbuf_cachep;
int __init nilfs_init_segbuf_cache(void);
void nilfs_destroy_segbuf_cache(void);
@@ -132,13 +137,11 @@ void nilfs_segbuf_map_cont(struct nilfs_segment_buffer *segbuf,
struct nilfs_segment_buffer *prev);
void nilfs_segbuf_set_next_segnum(struct nilfs_segment_buffer *, __u64,
struct the_nilfs *);
-int nilfs_segbuf_reset(struct nilfs_segment_buffer *, unsigned, time_t);
+int nilfs_segbuf_reset(struct nilfs_segment_buffer *, unsigned, time_t, __u64);
int nilfs_segbuf_extend_segsum(struct nilfs_segment_buffer *);
int nilfs_segbuf_extend_payload(struct nilfs_segment_buffer *,
struct buffer_head **);
void nilfs_segbuf_fill_in_segsum(struct nilfs_segment_buffer *);
-void nilfs_segbuf_fill_in_segsum_crc(struct nilfs_segment_buffer *, u32);
-void nilfs_segbuf_fill_in_data_crc(struct nilfs_segment_buffer *, u32);
static inline void
nilfs_segbuf_add_segsum_buffer(struct nilfs_segment_buffer *segbuf,
@@ -171,6 +174,7 @@ void nilfs_truncate_logs(struct list_head *logs,
struct nilfs_segment_buffer *last);
int nilfs_write_logs(struct list_head *logs, struct the_nilfs *nilfs);
int nilfs_wait_on_logs(struct list_head *logs);
+void nilfs_add_checksums_on_logs(struct list_head *logs, u32 seed);
static inline void nilfs_destroy_logs(struct list_head *logs)
{
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 6a7dbd8..c920164 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -116,42 +116,6 @@ static void nilfs_dispose_list(struct nilfs_sb_info *, struct list_head *,
#define nilfs_cnt32_lt(a, b) nilfs_cnt32_gt(b, a)
#define nilfs_cnt32_le(a, b) nilfs_cnt32_ge(b, a)
-/*
- * Transaction
- */
-static struct kmem_cache *nilfs_transaction_cachep;
-
-/**
- * nilfs_init_transaction_cache - create a cache for nilfs_transaction_info
- *
- * nilfs_init_transaction_cache() creates a slab cache for the struct
- * nilfs_transaction_info.
- *
- * Return Value: On success, it returns 0. On error, one of the following
- * negative error code is returned.
- *
- * %-ENOMEM - Insufficient memory available.
- */
-int nilfs_init_transaction_cache(void)
-{
- nilfs_transaction_cachep =
- kmem_cache_create("nilfs2_transaction_cache",
- sizeof(struct nilfs_transaction_info),
- 0, SLAB_RECLAIM_ACCOUNT, NULL);
- return (nilfs_transaction_cachep == NULL) ? -ENOMEM : 0;
-}
-
-/**
- * nilfs_destroy_transaction_cache - destroy the cache for transaction info
- *
- * nilfs_destroy_transaction_cache() frees the slab cache for the struct
- * nilfs_transaction_info.
- */
-void nilfs_destroy_transaction_cache(void)
-{
- kmem_cache_destroy(nilfs_transaction_cachep);
-}
-
static int nilfs_prepare_segment_lock(struct nilfs_transaction_info *ti)
{
struct nilfs_transaction_info *cur_ti = current->journal_info;
@@ -402,7 +366,8 @@ static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
if (nilfs_doing_gc())
flags = NILFS_SS_GC;
- err = nilfs_segbuf_reset(segbuf, flags, sci->sc_seg_ctime);
+ err = nilfs_segbuf_reset(segbuf, flags, sci->sc_seg_ctime,
+ sci->sc_sbi->s_nilfs->ns_cno);
if (unlikely(err))
return err;
@@ -435,7 +400,7 @@ static int nilfs_segctor_add_super_root(struct nilfs_sc_info *sci)
return err;
segbuf = sci->sc_curseg;
}
- err = nilfs_segbuf_extend_payload(segbuf, &sci->sc_super_root);
+ err = nilfs_segbuf_extend_payload(segbuf, &segbuf->sb_super_root);
if (likely(!err))
segbuf->sb_sum.flags |= NILFS_SS_SR;
return err;
@@ -599,7 +564,7 @@ static void nilfs_write_file_node_binfo(struct nilfs_sc_info *sci,
*vblocknr = binfo->bi_v.bi_vblocknr;
}
-struct nilfs_sc_operations nilfs_sc_file_ops = {
+static struct nilfs_sc_operations nilfs_sc_file_ops = {
.collect_data = nilfs_collect_file_data,
.collect_node = nilfs_collect_file_node,
.collect_bmap = nilfs_collect_file_bmap,
@@ -649,7 +614,7 @@ static void nilfs_write_dat_node_binfo(struct nilfs_sc_info *sci,
*binfo_dat = binfo->bi_dat;
}
-struct nilfs_sc_operations nilfs_sc_dat_ops = {
+static struct nilfs_sc_operations nilfs_sc_dat_ops = {
.collect_data = nilfs_collect_dat_data,
.collect_node = nilfs_collect_file_node,
.collect_bmap = nilfs_collect_dat_bmap,
@@ -657,7 +622,7 @@ struct nilfs_sc_operations nilfs_sc_dat_ops = {
.write_node_binfo = nilfs_write_dat_node_binfo,
};
-struct nilfs_sc_operations nilfs_sc_dsync_ops = {
+static struct nilfs_sc_operations nilfs_sc_dsync_ops = {
.collect_data = nilfs_collect_file_data,
.collect_node = NULL,
.collect_bmap = NULL,
@@ -932,43 +897,16 @@ static void nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info *sci,
}
}
-/*
- * CRC calculation routines
- */
-static void nilfs_fill_in_super_root_crc(struct buffer_head *bh_sr, u32 seed)
-{
- struct nilfs_super_root *raw_sr =
- (struct nilfs_super_root *)bh_sr->b_data;
- u32 crc;
-
- crc = crc32_le(seed,
- (unsigned char *)raw_sr + sizeof(raw_sr->sr_sum),
- NILFS_SR_BYTES - sizeof(raw_sr->sr_sum));
- raw_sr->sr_sum = cpu_to_le32(crc);
-}
-
-static void nilfs_segctor_fill_in_checksums(struct nilfs_sc_info *sci,
- u32 seed)
-{
- struct nilfs_segment_buffer *segbuf;
-
- if (sci->sc_super_root)
- nilfs_fill_in_super_root_crc(sci->sc_super_root, seed);
-
- list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
- nilfs_segbuf_fill_in_segsum_crc(segbuf, seed);
- nilfs_segbuf_fill_in_data_crc(segbuf, seed);
- }
-}
-
static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
struct the_nilfs *nilfs)
{
- struct buffer_head *bh_sr = sci->sc_super_root;
- struct nilfs_super_root *raw_sr =
- (struct nilfs_super_root *)bh_sr->b_data;
+ struct buffer_head *bh_sr;
+ struct nilfs_super_root *raw_sr;
unsigned isz = nilfs->ns_inode_size;
+ bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root;
+ raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
+
raw_sr->sr_bytes = cpu_to_le16(NILFS_SR_BYTES);
raw_sr->sr_nongc_ctime
= cpu_to_le64(nilfs_doing_gc() ?
@@ -1491,7 +1429,6 @@ static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
/* Collection retry loop */
for (;;) {
- sci->sc_super_root = NULL;
sci->sc_nblk_this_inc = 0;
sci->sc_curseg = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
@@ -1568,7 +1505,7 @@ nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci,
ssp.offset = sizeof(struct nilfs_segment_summary);
list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
- if (bh == sci->sc_super_root)
+ if (bh == segbuf->sb_super_root)
break;
if (!finfo) {
finfo = nilfs_segctor_map_segsum_entry(
@@ -1729,7 +1666,7 @@ static int nilfs_segctor_prepare_write(struct nilfs_sc_info *sci,
list_for_each_entry(bh, &segbuf->sb_payload_buffers,
b_assoc_buffers) {
- if (bh == sci->sc_super_root) {
+ if (bh == segbuf->sb_super_root) {
if (bh->b_page != bd_page) {
lock_page(bd_page);
clear_page_dirty_for_io(bd_page);
@@ -1848,7 +1785,7 @@ static void nilfs_clear_copied_buffers(struct list_head *list, int err)
}
static void nilfs_abort_logs(struct list_head *logs, struct page *failed_page,
- struct buffer_head *bh_sr, int err)
+ int err)
{
struct nilfs_segment_buffer *segbuf;
struct page *bd_page = NULL, *fs_page = NULL;
@@ -1869,7 +1806,7 @@ static void nilfs_abort_logs(struct list_head *logs, struct page *failed_page,
list_for_each_entry(bh, &segbuf->sb_payload_buffers,
b_assoc_buffers) {
- if (bh == bh_sr) {
+ if (bh == segbuf->sb_super_root) {
if (bh->b_page != bd_page) {
end_page_writeback(bd_page);
bd_page = bh->b_page;
@@ -1898,7 +1835,7 @@ static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
list_splice_tail_init(&sci->sc_write_logs, &logs);
ret = nilfs_wait_on_logs(&logs);
- nilfs_abort_logs(&logs, NULL, sci->sc_super_root, ret ? : err);
+ nilfs_abort_logs(&logs, NULL, ret ? : err);
list_splice_tail_init(&sci->sc_segbufs, &logs);
nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
@@ -1914,7 +1851,6 @@ static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
}
nilfs_destroy_logs(&logs);
- sci->sc_super_root = NULL;
}
static void nilfs_set_next_segment(struct the_nilfs *nilfs,
@@ -1933,7 +1869,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
struct nilfs_segment_buffer *segbuf;
struct page *bd_page = NULL, *fs_page = NULL;
struct the_nilfs *nilfs = sci->sc_sbi->s_nilfs;
- int update_sr = (sci->sc_super_root != NULL);
+ int update_sr = false;
list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) {
struct buffer_head *bh;
@@ -1964,11 +1900,12 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
set_buffer_uptodate(bh);
clear_buffer_dirty(bh);
clear_buffer_nilfs_volatile(bh);
- if (bh == sci->sc_super_root) {
+ if (bh == segbuf->sb_super_root) {
if (bh->b_page != bd_page) {
end_page_writeback(bd_page);
bd_page = bh->b_page;
}
+ update_sr = true;
break;
}
if (bh->b_page != fs_page) {
@@ -2115,7 +2052,7 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
struct nilfs_sb_info *sbi = sci->sc_sbi;
struct the_nilfs *nilfs = sbi->s_nilfs;
struct page *failed_page;
- int err, has_sr = 0;
+ int err;
sci->sc_stage.scnt = NILFS_ST_INIT;
@@ -2143,8 +2080,6 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
if (unlikely(err))
goto failed;
- has_sr = (sci->sc_super_root != NULL);
-
/* Avoid empty segment */
if (sci->sc_stage.scnt == NILFS_ST_DONE &&
NILFS_SEG_EMPTY(&sci->sc_curseg->sb_sum)) {
@@ -2159,7 +2094,8 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
nilfs_segctor_fill_in_file_bmap(sci, sbi->s_ifile);
- if (has_sr) {
+ if (mode == SC_LSEG_SR &&
+ sci->sc_stage.scnt >= NILFS_ST_CPFILE) {
err = nilfs_segctor_fill_in_checkpoint(sci);
if (unlikely(err))
goto failed_to_write;
@@ -2171,11 +2107,12 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
/* Write partial segments */
err = nilfs_segctor_prepare_write(sci, &failed_page);
if (err) {
- nilfs_abort_logs(&sci->sc_segbufs, failed_page,
- sci->sc_super_root, err);
+ nilfs_abort_logs(&sci->sc_segbufs, failed_page, err);
goto failed_to_write;
}
- nilfs_segctor_fill_in_checksums(sci, nilfs->ns_crc_seed);
+
+ nilfs_add_checksums_on_logs(&sci->sc_segbufs,
+ nilfs->ns_crc_seed);
err = nilfs_segctor_write(sci, nilfs);
if (unlikely(err))
@@ -2196,8 +2133,6 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
}
} while (sci->sc_stage.scnt != NILFS_ST_DONE);
- sci->sc_super_root = NULL;
-
out:
nilfs_segctor_check_out_files(sci, sbi);
return err;
@@ -2224,9 +2159,9 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci)
{
spin_lock(&sci->sc_state_lock);
- if (sci->sc_timer && !(sci->sc_state & NILFS_SEGCTOR_COMMIT)) {
- sci->sc_timer->expires = jiffies + sci->sc_interval;
- add_timer(sci->sc_timer);
+ if (!(sci->sc_state & NILFS_SEGCTOR_COMMIT)) {
+ sci->sc_timer.expires = jiffies + sci->sc_interval;
+ add_timer(&sci->sc_timer);
sci->sc_state |= NILFS_SEGCTOR_COMMIT;
}
spin_unlock(&sci->sc_state_lock);
@@ -2431,9 +2366,7 @@ static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
spin_lock(&sci->sc_state_lock);
sci->sc_seq_accepted = sci->sc_seq_request;
spin_unlock(&sci->sc_state_lock);
-
- if (sci->sc_timer)
- del_timer_sync(sci->sc_timer);
+ del_timer_sync(&sci->sc_timer);
}
/**
@@ -2459,9 +2392,9 @@ static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
sci->sc_flush_request &= ~FLUSH_DAT_BIT;
/* re-enable timer if checkpoint creation was not done */
- if (sci->sc_timer && (sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
- time_before(jiffies, sci->sc_timer->expires))
- add_timer(sci->sc_timer);
+ if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
+ time_before(jiffies, sci->sc_timer.expires))
+ add_timer(&sci->sc_timer);
}
spin_unlock(&sci->sc_state_lock);
}
@@ -2640,13 +2573,10 @@ static int nilfs_segctor_thread(void *arg)
{
struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg;
struct the_nilfs *nilfs = sci->sc_sbi->s_nilfs;
- struct timer_list timer;
int timeout = 0;
- init_timer(&timer);
- timer.data = (unsigned long)current;
- timer.function = nilfs_construction_timeout;
- sci->sc_timer = &timer;
+ sci->sc_timer.data = (unsigned long)current;
+ sci->sc_timer.function = nilfs_construction_timeout;
/* start sync. */
sci->sc_task = current;
@@ -2695,7 +2625,7 @@ static int nilfs_segctor_thread(void *arg)
should_sleep = 0;
else if (sci->sc_state & NILFS_SEGCTOR_COMMIT)
should_sleep = time_before(jiffies,
- sci->sc_timer->expires);
+ sci->sc_timer.expires);
if (should_sleep) {
spin_unlock(&sci->sc_state_lock);
@@ -2704,7 +2634,7 @@ static int nilfs_segctor_thread(void *arg)
}
finish_wait(&sci->sc_wait_daemon, &wait);
timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
- time_after_eq(jiffies, sci->sc_timer->expires));
+ time_after_eq(jiffies, sci->sc_timer.expires));
if (nilfs_sb_dirty(nilfs) && nilfs_sb_need_update(nilfs))
set_nilfs_discontinued(nilfs);
@@ -2713,8 +2643,6 @@ static int nilfs_segctor_thread(void *arg)
end_thread:
spin_unlock(&sci->sc_state_lock);
- del_timer_sync(sci->sc_timer);
- sci->sc_timer = NULL;
/* end sync. */
sci->sc_task = NULL;
@@ -2750,13 +2678,6 @@ static void nilfs_segctor_kill_thread(struct nilfs_sc_info *sci)
}
}
-static int nilfs_segctor_init(struct nilfs_sc_info *sci)
-{
- sci->sc_seq_done = sci->sc_seq_request;
-
- return nilfs_segctor_start_thread(sci);
-}
-
/*
* Setup & clean-up functions
*/
@@ -2780,6 +2701,7 @@ static struct nilfs_sc_info *nilfs_segctor_new(struct nilfs_sb_info *sbi)
INIT_LIST_HEAD(&sci->sc_write_logs);
INIT_LIST_HEAD(&sci->sc_gc_inodes);
INIT_LIST_HEAD(&sci->sc_copied_buffers);
+ init_timer(&sci->sc_timer);
sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ;
@@ -2846,6 +2768,7 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
down_write(&sbi->s_nilfs->ns_segctor_sem);
+ del_timer_sync(&sci->sc_timer);
kfree(sci);
}
@@ -2880,7 +2803,7 @@ int nilfs_attach_segment_constructor(struct nilfs_sb_info *sbi)
return -ENOMEM;
nilfs_attach_writer(nilfs, sbi);
- err = nilfs_segctor_init(NILFS_SC(sbi));
+ err = nilfs_segctor_start_thread(NILFS_SC(sbi));
if (err) {
nilfs_detach_writer(nilfs, sbi);
kfree(sbi->s_sc_info);
diff --git a/fs/nilfs2/segment.h b/fs/nilfs2/segment.h
index 82dfd6a..dca1423 100644
--- a/fs/nilfs2/segment.h
+++ b/fs/nilfs2/segment.h
@@ -100,7 +100,6 @@ struct nilfs_segsum_pointer {
* @sc_write_logs: List of segment buffers to hold logs under writing
* @sc_segbuf_nblocks: Number of available blocks in segment buffers.
* @sc_curseg: Current segment buffer
- * @sc_super_root: Pointer to the super root buffer
* @sc_stage: Collection stage
* @sc_finfo_ptr: pointer to the current finfo struct in the segment summary
* @sc_binfo_ptr: pointer to the current binfo struct in the segment summary
@@ -148,7 +147,6 @@ struct nilfs_sc_info {
struct list_head sc_write_logs;
unsigned long sc_segbuf_nblocks;
struct nilfs_segment_buffer *sc_curseg;
- struct buffer_head *sc_super_root;
struct nilfs_cstage sc_stage;
@@ -179,7 +177,7 @@ struct nilfs_sc_info {
unsigned long sc_lseg_stime; /* in 1/HZ seconds */
unsigned long sc_watermark;
- struct timer_list *sc_timer;
+ struct timer_list sc_timer;
struct task_struct *sc_task;
};
@@ -219,6 +217,8 @@ enum {
*/
#define NILFS_SC_DEFAULT_WATERMARK 3600
+/* super.c */
+extern struct kmem_cache *nilfs_transaction_cachep;
/* segment.c */
extern int nilfs_init_transaction_cache(void);
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 0cdbc5e..03b34b7 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -67,6 +67,11 @@ MODULE_DESCRIPTION("A New Implementation of the Log-structured Filesystem "
"(NILFS)");
MODULE_LICENSE("GPL");
+struct kmem_cache *nilfs_inode_cachep;
+struct kmem_cache *nilfs_transaction_cachep;
+struct kmem_cache *nilfs_segbuf_cachep;
+struct kmem_cache *nilfs_btree_path_cache;
+
static int nilfs_remount(struct super_block *sb, int *flags, char *data);
/**
@@ -129,7 +134,6 @@ void nilfs_warning(struct super_block *sb, const char *function,
va_end(args);
}
-static struct kmem_cache *nilfs_inode_cachep;
struct inode *nilfs_alloc_inode_common(struct the_nilfs *nilfs)
{
@@ -155,34 +159,6 @@ void nilfs_destroy_inode(struct inode *inode)
kmem_cache_free(nilfs_inode_cachep, NILFS_I(inode));
}
-static void init_once(void *obj)
-{
- struct nilfs_inode_info *ii = obj;
-
- INIT_LIST_HEAD(&ii->i_dirty);
-#ifdef CONFIG_NILFS_XATTR
- init_rwsem(&ii->xattr_sem);
-#endif
- nilfs_btnode_cache_init_once(&ii->i_btnode_cache);
- ii->i_bmap = (struct nilfs_bmap *)&ii->i_bmap_union;
- inode_init_once(&ii->vfs_inode);
-}
-
-static int nilfs_init_inode_cache(void)
-{
- nilfs_inode_cachep = kmem_cache_create("nilfs2_inode_cache",
- sizeof(struct nilfs_inode_info),
- 0, SLAB_RECLAIM_ACCOUNT,
- init_once);
-
- return (nilfs_inode_cachep == NULL) ? -ENOMEM : 0;
-}
-
-static inline void nilfs_destroy_inode_cache(void)
-{
- kmem_cache_destroy(nilfs_inode_cachep);
-}
-
static void nilfs_clear_inode(struct inode *inode)
{
struct nilfs_inode_info *ii = NILFS_I(inode);
@@ -266,8 +242,8 @@ int nilfs_commit_super(struct nilfs_sb_info *sbi, int dupsb)
int err;
/* nilfs->sem must be locked by the caller. */
- if (sbp[0]->s_magic != NILFS_SUPER_MAGIC) {
- if (sbp[1] && sbp[1]->s_magic == NILFS_SUPER_MAGIC)
+ if (sbp[0]->s_magic != cpu_to_le16(NILFS_SUPER_MAGIC)) {
+ if (sbp[1] && sbp[1]->s_magic == cpu_to_le16(NILFS_SUPER_MAGIC))
nilfs_swap_super_block(nilfs);
else {
printk(KERN_CRIT "NILFS: superblock broke on dev %s\n",
@@ -470,10 +446,10 @@ static int nilfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
if (nilfs_test_opt(sbi, SNAPSHOT))
seq_printf(seq, ",cp=%llu",
(unsigned long long int)sbi->s_snapshot_cno);
- if (nilfs_test_opt(sbi, ERRORS_RO))
- seq_printf(seq, ",errors=remount-ro");
if (nilfs_test_opt(sbi, ERRORS_PANIC))
seq_printf(seq, ",errors=panic");
+ if (nilfs_test_opt(sbi, ERRORS_CONT))
+ seq_printf(seq, ",errors=continue");
if (nilfs_test_opt(sbi, STRICT_ORDER))
seq_printf(seq, ",order=strict");
if (nilfs_test_opt(sbi, NORECOVERY))
@@ -631,7 +607,7 @@ nilfs_set_default_options(struct nilfs_sb_info *sbi,
struct nilfs_super_block *sbp)
{
sbi->s_mount_opt =
- NILFS_MOUNT_ERRORS_CONT | NILFS_MOUNT_BARRIER;
+ NILFS_MOUNT_ERRORS_RO | NILFS_MOUNT_BARRIER;
}
static int nilfs_setup_super(struct nilfs_sb_info *sbi)
@@ -749,6 +725,7 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent,
sb->s_export_op = &nilfs_export_ops;
sb->s_root = NULL;
sb->s_time_gran = 1;
+ sb->s_bdi = nilfs->ns_bdi;
err = load_nilfs(nilfs, sbi);
if (err)
@@ -777,9 +754,7 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent,
goto failed_sbi;
}
cno = sbi->s_snapshot_cno;
- } else
- /* Read-only mount */
- sbi->s_snapshot_cno = cno;
+ }
}
err = nilfs_attach_checkpoint(sbi, cno);
@@ -848,7 +823,7 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
struct the_nilfs *nilfs = sbi->s_nilfs;
unsigned long old_sb_flags;
struct nilfs_mount_options old_opts;
- int err;
+ int was_snapshot, err;
lock_kernel();
@@ -856,6 +831,7 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
old_sb_flags = sb->s_flags;
old_opts.mount_opt = sbi->s_mount_opt;
old_opts.snapshot_cno = sbi->s_snapshot_cno;
+ was_snapshot = nilfs_test_opt(sbi, SNAPSHOT);
if (!parse_options(data, sb)) {
err = -EINVAL;
@@ -863,20 +839,32 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
}
sb->s_flags = (sb->s_flags & ~MS_POSIXACL);
- if ((*flags & MS_RDONLY) &&
- sbi->s_snapshot_cno != old_opts.snapshot_cno) {
- printk(KERN_WARNING "NILFS (device %s): couldn't "
- "remount to a different snapshot.\n",
- sb->s_id);
- err = -EINVAL;
- goto restore_opts;
+ err = -EINVAL;
+ if (was_snapshot) {
+ if (!(*flags & MS_RDONLY)) {
+ printk(KERN_ERR "NILFS (device %s): cannot remount "
+ "snapshot read/write.\n",
+ sb->s_id);
+ goto restore_opts;
+ } else if (sbi->s_snapshot_cno != old_opts.snapshot_cno) {
+ printk(KERN_ERR "NILFS (device %s): cannot "
+ "remount to a different snapshot.\n",
+ sb->s_id);
+ goto restore_opts;
+ }
+ } else {
+ if (nilfs_test_opt(sbi, SNAPSHOT)) {
+ printk(KERN_ERR "NILFS (device %s): cannot change "
+ "a regular mount to a snapshot.\n",
+ sb->s_id);
+ goto restore_opts;
+ }
}
if (!nilfs_valid_fs(nilfs)) {
printk(KERN_WARNING "NILFS (device %s): couldn't "
"remount because the filesystem is in an "
"incomplete recovery state.\n", sb->s_id);
- err = -EINVAL;
goto restore_opts;
}
@@ -887,9 +875,6 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
nilfs_detach_segment_constructor(sbi);
sb->s_flags |= MS_RDONLY;
- sbi->s_snapshot_cno = nilfs_last_cno(nilfs);
- /* nilfs_set_opt(sbi, SNAPSHOT); */
-
/*
* Remounting a valid RW partition RDONLY, so set
* the RDONLY flag and then mark the partition as valid again.
@@ -908,24 +893,7 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
* store the current valid flag. (It may have been changed
* by fsck since we originally mounted the partition.)
*/
- if (nilfs->ns_current && nilfs->ns_current != sbi) {
- printk(KERN_WARNING "NILFS (device %s): couldn't "
- "remount because an RW-mount exists.\n",
- sb->s_id);
- err = -EBUSY;
- goto restore_opts;
- }
- if (sbi->s_snapshot_cno != nilfs_last_cno(nilfs)) {
- printk(KERN_WARNING "NILFS (device %s): couldn't "
- "remount because the current RO-mount is not "
- "the latest one.\n",
- sb->s_id);
- err = -EINVAL;
- goto restore_opts;
- }
sb->s_flags &= ~MS_RDONLY;
- nilfs_clear_opt(sbi, SNAPSHOT);
- sbi->s_snapshot_cno = 0;
err = nilfs_attach_segment_constructor(sbi);
if (err)
@@ -934,8 +902,6 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
down_write(&nilfs->ns_sem);
nilfs_setup_super(sbi);
up_write(&nilfs->ns_sem);
-
- nilfs->ns_current = sbi;
}
out:
up_write(&nilfs->ns_super_sem);
@@ -1021,10 +987,14 @@ nilfs_get_sb(struct file_system_type *fs_type, int flags,
{
struct nilfs_super_data sd;
struct super_block *s;
+ fmode_t mode = FMODE_READ;
struct the_nilfs *nilfs;
int err, need_to_close = 1;
- sd.bdev = open_bdev_exclusive(dev_name, flags, fs_type);
+ if (!(flags & MS_RDONLY))
+ mode |= FMODE_WRITE;
+
+ sd.bdev = open_bdev_exclusive(dev_name, mode, fs_type);
if (IS_ERR(sd.bdev))
return PTR_ERR(sd.bdev);
@@ -1091,10 +1061,12 @@ nilfs_get_sb(struct file_system_type *fs_type, int flags,
/* New superblock instance created */
s->s_flags = flags;
+ s->s_mode = mode;
strlcpy(s->s_id, bdevname(sd.bdev, b), sizeof(s->s_id));
sb_set_blocksize(s, block_size(sd.bdev));
- err = nilfs_fill_super(s, data, flags & MS_VERBOSE, nilfs);
+ err = nilfs_fill_super(s, data, flags & MS_SILENT ? 1 : 0,
+ nilfs);
if (err)
goto cancel_new;
@@ -1105,7 +1077,7 @@ nilfs_get_sb(struct file_system_type *fs_type, int flags,
mutex_unlock(&nilfs->ns_mount_mutex);
put_nilfs(nilfs);
if (need_to_close)
- close_bdev_exclusive(sd.bdev, flags);
+ close_bdev_exclusive(sd.bdev, mode);
simple_set_mnt(mnt, s);
return 0;
@@ -1113,7 +1085,7 @@ nilfs_get_sb(struct file_system_type *fs_type, int flags,
mutex_unlock(&nilfs->ns_mount_mutex);
put_nilfs(nilfs);
failed:
- close_bdev_exclusive(sd.bdev, flags);
+ close_bdev_exclusive(sd.bdev, mode);
return err;
@@ -1123,7 +1095,7 @@ nilfs_get_sb(struct file_system_type *fs_type, int flags,
put_nilfs(nilfs);
deactivate_locked_super(s);
/*
- * deactivate_super() invokes close_bdev_exclusive().
+ * deactivate_locked_super() invokes close_bdev_exclusive().
* We must finish all post-cleaning before this call;
* put_nilfs() needs the block device.
*/
@@ -1138,54 +1110,93 @@ struct file_system_type nilfs_fs_type = {
.fs_flags = FS_REQUIRES_DEV,
};
-static int __init init_nilfs_fs(void)
+static void nilfs_inode_init_once(void *obj)
{
- int err;
-
- err = nilfs_init_inode_cache();
- if (err)
- goto failed;
+ struct nilfs_inode_info *ii = obj;
- err = nilfs_init_transaction_cache();
- if (err)
- goto failed_inode_cache;
+ INIT_LIST_HEAD(&ii->i_dirty);
+#ifdef CONFIG_NILFS_XATTR
+ init_rwsem(&ii->xattr_sem);
+#endif
+ nilfs_btnode_cache_init_once(&ii->i_btnode_cache);
+ ii->i_bmap = (struct nilfs_bmap *)&ii->i_bmap_union;
+ inode_init_once(&ii->vfs_inode);
+}
- err = nilfs_init_segbuf_cache();
- if (err)
- goto failed_transaction_cache;
+static void nilfs_segbuf_init_once(void *obj)
+{
+ memset(obj, 0, sizeof(struct nilfs_segment_buffer));
+}
- err = nilfs_btree_path_cache_init();
- if (err)
- goto failed_segbuf_cache;
+static void nilfs_destroy_cachep(void)
+{
+ if (nilfs_inode_cachep)
+ kmem_cache_destroy(nilfs_inode_cachep);
+ if (nilfs_transaction_cachep)
+ kmem_cache_destroy(nilfs_transaction_cachep);
+ if (nilfs_segbuf_cachep)
+ kmem_cache_destroy(nilfs_segbuf_cachep);
+ if (nilfs_btree_path_cache)
+ kmem_cache_destroy(nilfs_btree_path_cache);
+}
- err = register_filesystem(&nilfs_fs_type);
- if (err)
- goto failed_btree_path_cache;
+static int __init nilfs_init_cachep(void)
+{
+ nilfs_inode_cachep = kmem_cache_create("nilfs2_inode_cache",
+ sizeof(struct nilfs_inode_info), 0,
+ SLAB_RECLAIM_ACCOUNT, nilfs_inode_init_once);
+ if (!nilfs_inode_cachep)
+ goto fail;
+
+ nilfs_transaction_cachep = kmem_cache_create("nilfs2_transaction_cache",
+ sizeof(struct nilfs_transaction_info), 0,
+ SLAB_RECLAIM_ACCOUNT, NULL);
+ if (!nilfs_transaction_cachep)
+ goto fail;
+
+ nilfs_segbuf_cachep = kmem_cache_create("nilfs2_segbuf_cache",
+ sizeof(struct nilfs_segment_buffer), 0,
+ SLAB_RECLAIM_ACCOUNT, nilfs_segbuf_init_once);
+ if (!nilfs_segbuf_cachep)
+ goto fail;
+
+ nilfs_btree_path_cache = kmem_cache_create("nilfs2_btree_path_cache",
+ sizeof(struct nilfs_btree_path) * NILFS_BTREE_LEVEL_MAX,
+ 0, 0, NULL);
+ if (!nilfs_btree_path_cache)
+ goto fail;
return 0;
- failed_btree_path_cache:
- nilfs_btree_path_cache_destroy();
+fail:
+ nilfs_destroy_cachep();
+ return -ENOMEM;
+}
+
+static int __init init_nilfs_fs(void)
+{
+ int err;
- failed_segbuf_cache:
- nilfs_destroy_segbuf_cache();
+ err = nilfs_init_cachep();
+ if (err)
+ goto fail;
- failed_transaction_cache:
- nilfs_destroy_transaction_cache();
+ err = register_filesystem(&nilfs_fs_type);
+ if (err)
+ goto free_cachep;
- failed_inode_cache:
- nilfs_destroy_inode_cache();
+ printk(KERN_INFO "NILFS version 2 loaded\n");
+ return 0;
- failed:
+free_cachep:
+ nilfs_destroy_cachep();
+fail:
return err;
}
static void __exit exit_nilfs_fs(void)
{
- nilfs_destroy_segbuf_cache();
- nilfs_destroy_transaction_cache();
- nilfs_destroy_inode_cache();
- nilfs_btree_path_cache_destroy();
+ nilfs_destroy_cachep();
unregister_filesystem(&nilfs_fs_type);
}
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index 7ffcf2b..8c10973 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -486,11 +486,15 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
printk(KERN_WARNING
"NILFS warning: unable to read secondary superblock\n");
+ /*
+ * Compare two super blocks and set 1 in swp if the secondary
+ * super block is valid and newer. Otherwise, set 0 in swp.
+ */
valid[0] = nilfs_valid_sb(sbp[0]);
valid[1] = nilfs_valid_sb(sbp[1]);
- swp = valid[1] &&
- (!valid[0] ||
- le64_to_cpu(sbp[1]->s_wtime) > le64_to_cpu(sbp[0]->s_wtime));
+ swp = valid[1] && (!valid[0] ||
+ le64_to_cpu(sbp[1]->s_last_cno) >
+ le64_to_cpu(sbp[0]->s_last_cno));
if (valid[swp] && nilfs_sb2_bad_offset(sbp[swp], sb2off)) {
brelse(sbh[1]);
diff --git a/fs/notify/inotify/Kconfig b/fs/notify/inotify/Kconfig
index 3e56dbf..b3a159b 100644
--- a/fs/notify/inotify/Kconfig
+++ b/fs/notify/inotify/Kconfig
@@ -15,6 +15,7 @@ config INOTIFY
config INOTIFY_USER
bool "Inotify support for userspace"
+ select ANON_INODES
select FSNOTIFY
default y
---help---
diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
index 1afb0a1..e27960c 100644
--- a/fs/notify/inotify/inotify_fsnotify.c
+++ b/fs/notify/inotify/inotify_fsnotify.c
@@ -28,6 +28,7 @@
#include <linux/path.h> /* struct path */
#include <linux/slab.h> /* kmem_* */
#include <linux/types.h>
+#include <linux/sched.h>
#include "inotify.h"
@@ -146,6 +147,7 @@ static void inotify_free_group_priv(struct fsnotify_group *group)
idr_for_each(&group->inotify_data.idr, idr_callback, group);
idr_remove_all(&group->inotify_data.idr);
idr_destroy(&group->inotify_data.idr);
+ free_uid(group->inotify_data.user);
}
void inotify_free_event_priv(struct fsnotify_event_private_data *fsn_event_priv)
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 472cdf2..e46ca68 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -546,21 +546,24 @@ retry:
if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL)))
goto out_err;
+ /* we are putting the mark on the idr, take a reference */
+ fsnotify_get_mark(&tmp_ientry->fsn_entry);
+
spin_lock(&group->inotify_data.idr_lock);
ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry,
group->inotify_data.last_wd+1,
&tmp_ientry->wd);
spin_unlock(&group->inotify_data.idr_lock);
if (ret) {
+ /* we didn't get on the idr, drop the idr reference */
+ fsnotify_put_mark(&tmp_ientry->fsn_entry);
+
/* idr was out of memory allocate and try again */
if (ret == -EAGAIN)
goto retry;
goto out_err;
}
- /* we put the mark on the idr, take a reference */
- fsnotify_get_mark(&tmp_ientry->fsn_entry);
-
/* we are on the idr, now get on the inode */
ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode);
if (ret) {
@@ -578,16 +581,13 @@ retry:
/* return the watch descriptor for this new entry */
ret = tmp_ientry->wd;
- /* match the ref from fsnotify_init_markentry() */
- fsnotify_put_mark(&tmp_ientry->fsn_entry);
-
/* if this mark added a new event update the group mask */
if (mask & ~group->mask)
fsnotify_recalc_group_mask(group);
out_err:
- if (ret < 0)
- kmem_cache_free(inotify_inode_mark_cachep, tmp_ientry);
+ /* match the ref from fsnotify_init_markentry() */
+ fsnotify_put_mark(&tmp_ientry->fsn_entry);
return ret;
}
diff --git a/fs/ocfs2/Makefile b/fs/ocfs2/Makefile
index 791c088..07d9fd8 100644
--- a/fs/ocfs2/Makefile
+++ b/fs/ocfs2/Makefile
@@ -29,6 +29,7 @@ ocfs2-objs := \
mmap.o \
namei.o \
refcounttree.o \
+ reservations.o \
resize.o \
slot_map.o \
suballoc.o \
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 9f8bd91..215e12c 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -1006,7 +1006,7 @@ static int ocfs2_create_new_meta_bhs(handle_t *handle,
int count, status, i;
u16 suballoc_bit_start;
u32 num_got;
- u64 first_blkno;
+ u64 suballoc_loc, first_blkno;
struct ocfs2_super *osb =
OCFS2_SB(ocfs2_metadata_cache_get_super(et->et_ci));
struct ocfs2_extent_block *eb;
@@ -1015,10 +1015,10 @@ static int ocfs2_create_new_meta_bhs(handle_t *handle,
count = 0;
while (count < wanted) {
- status = ocfs2_claim_metadata(osb,
- handle,
+ status = ocfs2_claim_metadata(handle,
meta_ac,
wanted - count,
+ &suballoc_loc,
&suballoc_bit_start,
&num_got,
&first_blkno);
@@ -1052,6 +1052,7 @@ static int ocfs2_create_new_meta_bhs(handle_t *handle,
eb->h_fs_generation = cpu_to_le32(osb->fs_generation);
eb->h_suballoc_slot =
cpu_to_le16(meta_ac->ac_alloc_slot);
+ eb->h_suballoc_loc = cpu_to_le64(suballoc_loc);
eb->h_suballoc_bit = cpu_to_le16(suballoc_bit_start);
eb->h_list.l_count =
cpu_to_le16(ocfs2_extent_recs_per_eb(osb->sb));
@@ -1061,11 +1062,7 @@ static int ocfs2_create_new_meta_bhs(handle_t *handle,
/* We'll also be dirtied by the caller, so
* this isn't absolutely necessary. */
- status = ocfs2_journal_dirty(handle, bhs[i]);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
+ ocfs2_journal_dirty(handle, bhs[i]);
}
count += num_got;
@@ -1129,8 +1126,7 @@ static int ocfs2_adjust_rightmost_branch(handle_t *handle,
goto out;
}
- status = ocfs2_extend_trans(handle, path_num_items(path) +
- handle->h_buffer_credits);
+ status = ocfs2_extend_trans(handle, path_num_items(path));
if (status < 0) {
mlog_errno(status);
goto out;
@@ -1270,12 +1266,7 @@ static int ocfs2_add_branch(handle_t *handle,
if (!eb_el->l_tree_depth)
new_last_eb_blk = le64_to_cpu(eb->h_blkno);
- status = ocfs2_journal_dirty(handle, bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
-
+ ocfs2_journal_dirty(handle, bh);
next_blkno = le64_to_cpu(eb->h_blkno);
}
@@ -1321,17 +1312,10 @@ static int ocfs2_add_branch(handle_t *handle,
eb = (struct ocfs2_extent_block *) (*last_eb_bh)->b_data;
eb->h_next_leaf_blk = cpu_to_le64(new_last_eb_blk);
- status = ocfs2_journal_dirty(handle, *last_eb_bh);
- if (status < 0)
- mlog_errno(status);
- status = ocfs2_journal_dirty(handle, et->et_root_bh);
- if (status < 0)
- mlog_errno(status);
- if (eb_bh) {
- status = ocfs2_journal_dirty(handle, eb_bh);
- if (status < 0)
- mlog_errno(status);
- }
+ ocfs2_journal_dirty(handle, *last_eb_bh);
+ ocfs2_journal_dirty(handle, et->et_root_bh);
+ if (eb_bh)
+ ocfs2_journal_dirty(handle, eb_bh);
/*
* Some callers want to track the rightmost leaf so pass it
@@ -1399,11 +1383,7 @@ static int ocfs2_shift_tree_depth(handle_t *handle,
for (i = 0; i < le16_to_cpu(root_el->l_next_free_rec); i++)
eb_el->l_recs[i] = root_el->l_recs[i];
- status = ocfs2_journal_dirty(handle, new_eb_bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
+ ocfs2_journal_dirty(handle, new_eb_bh);
status = ocfs2_et_root_journal_access(handle, et,
OCFS2_JOURNAL_ACCESS_WRITE);
@@ -1428,11 +1408,7 @@ static int ocfs2_shift_tree_depth(handle_t *handle,
if (root_el->l_tree_depth == cpu_to_le16(1))
ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno));
- status = ocfs2_journal_dirty(handle, et->et_root_bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
+ ocfs2_journal_dirty(handle, et->et_root_bh);
*ret_new_eb_bh = new_eb_bh;
new_eb_bh = NULL;
@@ -2064,7 +2040,7 @@ static void ocfs2_complete_edge_insert(handle_t *handle,
struct ocfs2_path *right_path,
int subtree_index)
{
- int ret, i, idx;
+ int i, idx;
struct ocfs2_extent_list *el, *left_el, *right_el;
struct ocfs2_extent_rec *left_rec, *right_rec;
struct buffer_head *root_bh = left_path->p_node[subtree_index].bh;
@@ -2102,13 +2078,8 @@ static void ocfs2_complete_edge_insert(handle_t *handle,
ocfs2_adjust_adjacent_records(left_rec, left_el, right_rec,
right_el);
- ret = ocfs2_journal_dirty(handle, left_path->p_node[i].bh);
- if (ret)
- mlog_errno(ret);
-
- ret = ocfs2_journal_dirty(handle, right_path->p_node[i].bh);
- if (ret)
- mlog_errno(ret);
+ ocfs2_journal_dirty(handle, left_path->p_node[i].bh);
+ ocfs2_journal_dirty(handle, right_path->p_node[i].bh);
/*
* Setup our list pointers now so that the current
@@ -2132,9 +2103,7 @@ static void ocfs2_complete_edge_insert(handle_t *handle,
root_bh = left_path->p_node[subtree_index].bh;
- ret = ocfs2_journal_dirty(handle, root_bh);
- if (ret)
- mlog_errno(ret);
+ ocfs2_journal_dirty(handle, root_bh);
}
static int ocfs2_rotate_subtree_right(handle_t *handle,
@@ -2207,11 +2176,7 @@ static int ocfs2_rotate_subtree_right(handle_t *handle,
ocfs2_create_empty_extent(right_el);
- ret = ocfs2_journal_dirty(handle, right_leaf_bh);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
+ ocfs2_journal_dirty(handle, right_leaf_bh);
/* Do the copy now. */
i = le16_to_cpu(left_el->l_next_free_rec) - 1;
@@ -2230,11 +2195,7 @@ static int ocfs2_rotate_subtree_right(handle_t *handle,
memset(&left_el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec));
le16_add_cpu(&left_el->l_next_free_rec, 1);
- ret = ocfs2_journal_dirty(handle, left_leaf_bh);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
+ ocfs2_journal_dirty(handle, left_leaf_bh);
ocfs2_complete_edge_insert(handle, left_path, right_path,
subtree_index);
@@ -2249,8 +2210,8 @@ out:
*
* Will return zero if the path passed in is already the leftmost path.
*/
-static int ocfs2_find_cpos_for_left_leaf(struct super_block *sb,
- struct ocfs2_path *path, u32 *cpos)
+int ocfs2_find_cpos_for_left_leaf(struct super_block *sb,
+ struct ocfs2_path *path, u32 *cpos)
{
int i, j, ret = 0;
u64 blkno;
@@ -2327,20 +2288,14 @@ static int ocfs2_extend_rotate_transaction(handle_t *handle, int subtree_depth,
int op_credits,
struct ocfs2_path *path)
{
- int ret;
+ int ret = 0;
int credits = (path->p_tree_depth - subtree_depth) * 2 + 1 + op_credits;
- if (handle->h_buffer_credits < credits) {
+ if (handle->h_buffer_credits < credits)
ret = ocfs2_extend_trans(handle,
credits - handle->h_buffer_credits);
- if (ret)
- return ret;
- if (unlikely(handle->h_buffer_credits < credits))
- return ocfs2_extend_trans(handle, credits);
- }
-
- return 0;
+ return ret;
}
/*
@@ -2584,8 +2539,7 @@ static int ocfs2_update_edge_lengths(handle_t *handle,
* records for all the bh in the path.
* So we have to allocate extra credits and access them.
*/
- ret = ocfs2_extend_trans(handle,
- handle->h_buffer_credits + subtree_index);
+ ret = ocfs2_extend_trans(handle, subtree_index);
if (ret) {
mlog_errno(ret);
goto out;
@@ -2823,12 +2777,8 @@ static int ocfs2_rotate_subtree_left(handle_t *handle,
ocfs2_remove_empty_extent(right_leaf_el);
}
- ret = ocfs2_journal_dirty(handle, path_leaf_bh(left_path));
- if (ret)
- mlog_errno(ret);
- ret = ocfs2_journal_dirty(handle, path_leaf_bh(right_path));
- if (ret)
- mlog_errno(ret);
+ ocfs2_journal_dirty(handle, path_leaf_bh(left_path));
+ ocfs2_journal_dirty(handle, path_leaf_bh(right_path));
if (del_right_subtree) {
ocfs2_unlink_subtree(handle, et, left_path, right_path,
@@ -2851,9 +2801,7 @@ static int ocfs2_rotate_subtree_left(handle_t *handle,
if (right_has_empty)
ocfs2_remove_empty_extent(left_leaf_el);
- ret = ocfs2_journal_dirty(handle, et_root_bh);
- if (ret)
- mlog_errno(ret);
+ ocfs2_journal_dirty(handle, et_root_bh);
*deleted = 1;
} else
@@ -2962,10 +2910,7 @@ static int ocfs2_rotate_rightmost_leaf_left(handle_t *handle,
}
ocfs2_remove_empty_extent(el);
-
- ret = ocfs2_journal_dirty(handle, bh);
- if (ret)
- mlog_errno(ret);
+ ocfs2_journal_dirty(handle, bh);
out:
return ret;
@@ -3506,15 +3451,9 @@ static int ocfs2_merge_rec_right(struct ocfs2_path *left_path,
ocfs2_cleanup_merge(el, index);
- ret = ocfs2_journal_dirty(handle, bh);
- if (ret)
- mlog_errno(ret);
-
+ ocfs2_journal_dirty(handle, bh);
if (right_path) {
- ret = ocfs2_journal_dirty(handle, path_leaf_bh(right_path));
- if (ret)
- mlog_errno(ret);
-
+ ocfs2_journal_dirty(handle, path_leaf_bh(right_path));
ocfs2_complete_edge_insert(handle, left_path, right_path,
subtree_index);
}
@@ -3683,14 +3622,9 @@ static int ocfs2_merge_rec_left(struct ocfs2_path *right_path,
ocfs2_cleanup_merge(el, index);
- ret = ocfs2_journal_dirty(handle, bh);
- if (ret)
- mlog_errno(ret);
-
+ ocfs2_journal_dirty(handle, bh);
if (left_path) {
- ret = ocfs2_journal_dirty(handle, path_leaf_bh(left_path));
- if (ret)
- mlog_errno(ret);
+ ocfs2_journal_dirty(handle, path_leaf_bh(left_path));
/*
* In the situation that the right_rec is empty and the extent
@@ -4016,10 +3950,7 @@ static void ocfs2_adjust_rightmost_records(handle_t *handle,
le32_add_cpu(&rec->e_int_clusters,
-le32_to_cpu(rec->e_cpos));
- ret = ocfs2_journal_dirty(handle, bh);
- if (ret)
- mlog_errno(ret);
-
+ ocfs2_journal_dirty(handle, bh);
}
}
@@ -4203,17 +4134,13 @@ static int ocfs2_insert_path(handle_t *handle,
struct buffer_head *leaf_bh = path_leaf_bh(right_path);
if (left_path) {
- int credits = handle->h_buffer_credits;
-
/*
* There's a chance that left_path got passed back to
* us without being accounted for in the
* journal. Extend our transaction here to be sure we
* can change those blocks.
*/
- credits += left_path->p_tree_depth;
-
- ret = ocfs2_extend_trans(handle, credits);
+ ret = ocfs2_extend_trans(handle, left_path->p_tree_depth);
if (ret < 0) {
mlog_errno(ret);
goto out;
@@ -4251,17 +4178,13 @@ static int ocfs2_insert_path(handle_t *handle,
* dirty this for us.
*/
if (left_path)
- ret = ocfs2_journal_dirty(handle,
- path_leaf_bh(left_path));
- if (ret)
- mlog_errno(ret);
+ ocfs2_journal_dirty(handle,
+ path_leaf_bh(left_path));
} else
ocfs2_insert_at_leaf(et, insert_rec, path_leaf_el(right_path),
insert);
- ret = ocfs2_journal_dirty(handle, leaf_bh);
- if (ret)
- mlog_errno(ret);
+ ocfs2_journal_dirty(handle, leaf_bh);
if (left_path) {
/*
@@ -4384,9 +4307,7 @@ out_update_clusters:
ocfs2_et_update_clusters(et,
le16_to_cpu(insert_rec->e_leaf_clusters));
- ret = ocfs2_journal_dirty(handle, et->et_root_bh);
- if (ret)
- mlog_errno(ret);
+ ocfs2_journal_dirty(handle, et->et_root_bh);
out:
ocfs2_free_path(left_path);
@@ -4866,7 +4787,7 @@ int ocfs2_add_clusters_in_btree(handle_t *handle,
goto leave;
}
- status = __ocfs2_claim_clusters(osb, handle, data_ac, 1,
+ status = __ocfs2_claim_clusters(handle, data_ac, 1,
clusters_to_add, &bit_off, &num_bits);
if (status < 0) {
if (status != -ENOSPC)
@@ -4895,11 +4816,7 @@ int ocfs2_add_clusters_in_btree(handle_t *handle,
goto leave;
}
- status = ocfs2_journal_dirty(handle, et->et_root_bh);
- if (status < 0) {
- mlog_errno(status);
- goto leave;
- }
+ ocfs2_journal_dirty(handle, et->et_root_bh);
clusters_to_add -= num_bits;
*logical_offset += num_bits;
@@ -5309,7 +5226,7 @@ static int ocfs2_split_tree(handle_t *handle, struct ocfs2_extent_tree *et,
int index, u32 new_range,
struct ocfs2_alloc_context *meta_ac)
{
- int ret, depth, credits = handle->h_buffer_credits;
+ int ret, depth, credits;
struct buffer_head *last_eb_bh = NULL;
struct ocfs2_extent_block *eb;
struct ocfs2_extent_list *rightmost_el, *el;
@@ -5340,8 +5257,8 @@ static int ocfs2_split_tree(handle_t *handle, struct ocfs2_extent_tree *et,
} else
rightmost_el = path_leaf_el(path);
- credits += path->p_tree_depth +
- ocfs2_extend_meta_needed(et->et_root_el);
+ credits = path->p_tree_depth +
+ ocfs2_extend_meta_needed(et->et_root_el);
ret = ocfs2_extend_trans(handle, credits);
if (ret) {
mlog_errno(ret);
@@ -5671,19 +5588,97 @@ out:
return ret;
}
+/*
+ * ocfs2_reserve_blocks_for_rec_trunc() would look basically the
+ * same as ocfs2_lock_alloctors(), except for it accepts a blocks
+ * number to reserve some extra blocks, and it only handles meta
+ * data allocations.
+ *
+ * Currently, only ocfs2_remove_btree_range() uses it for truncating
+ * and punching holes.
+ */
+static int ocfs2_reserve_blocks_for_rec_trunc(struct inode *inode,
+ struct ocfs2_extent_tree *et,
+ u32 extents_to_split,
+ struct ocfs2_alloc_context **ac,
+ int extra_blocks)
+{
+ int ret = 0, num_free_extents;
+ unsigned int max_recs_needed = 2 * extents_to_split;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+
+ *ac = NULL;
+
+ num_free_extents = ocfs2_num_free_extents(osb, et);
+ if (num_free_extents < 0) {
+ ret = num_free_extents;
+ mlog_errno(ret);
+ goto out;
+ }
+
+ if (!num_free_extents ||
+ (ocfs2_sparse_alloc(osb) && num_free_extents < max_recs_needed))
+ extra_blocks += ocfs2_extend_meta_needed(et->et_root_el);
+
+ if (extra_blocks) {
+ ret = ocfs2_reserve_new_metadata_blocks(osb, extra_blocks, ac);
+ if (ret < 0) {
+ if (ret != -ENOSPC)
+ mlog_errno(ret);
+ goto out;
+ }
+ }
+
+out:
+ if (ret) {
+ if (*ac) {
+ ocfs2_free_alloc_context(*ac);
+ *ac = NULL;
+ }
+ }
+
+ return ret;
+}
+
int ocfs2_remove_btree_range(struct inode *inode,
struct ocfs2_extent_tree *et,
- u32 cpos, u32 phys_cpos, u32 len,
- struct ocfs2_cached_dealloc_ctxt *dealloc)
+ u32 cpos, u32 phys_cpos, u32 len, int flags,
+ struct ocfs2_cached_dealloc_ctxt *dealloc,
+ u64 refcount_loc)
{
- int ret;
+ int ret, credits = 0, extra_blocks = 0;
u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct inode *tl_inode = osb->osb_tl_inode;
handle_t *handle;
struct ocfs2_alloc_context *meta_ac = NULL;
+ struct ocfs2_refcount_tree *ref_tree = NULL;
+
+ if ((flags & OCFS2_EXT_REFCOUNTED) && len) {
+ BUG_ON(!(OCFS2_I(inode)->ip_dyn_features &
+ OCFS2_HAS_REFCOUNT_FL));
+
+ ret = ocfs2_lock_refcount_tree(osb, refcount_loc, 1,
+ &ref_tree, NULL);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
- ret = ocfs2_lock_allocators(inode, et, 0, 1, NULL, &meta_ac);
+ ret = ocfs2_prepare_refcount_change_for_del(inode,
+ refcount_loc,
+ phys_blkno,
+ len,
+ &credits,
+ &extra_blocks);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out;
+ }
+ }
+
+ ret = ocfs2_reserve_blocks_for_rec_trunc(inode, et, 1, &meta_ac,
+ extra_blocks);
if (ret) {
mlog_errno(ret);
return ret;
@@ -5699,7 +5694,8 @@ int ocfs2_remove_btree_range(struct inode *inode,
}
}
- handle = ocfs2_start_trans(osb, ocfs2_remove_extent_credits(osb->sb));
+ handle = ocfs2_start_trans(osb,
+ ocfs2_remove_extent_credits(osb->sb) + credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
@@ -5724,15 +5720,22 @@ int ocfs2_remove_btree_range(struct inode *inode,
ocfs2_et_update_clusters(et, -len);
- ret = ocfs2_journal_dirty(handle, et->et_root_bh);
- if (ret) {
- mlog_errno(ret);
- goto out_commit;
- }
+ ocfs2_journal_dirty(handle, et->et_root_bh);
- ret = ocfs2_truncate_log_append(osb, handle, phys_blkno, len);
- if (ret)
- mlog_errno(ret);
+ if (phys_blkno) {
+ if (flags & OCFS2_EXT_REFCOUNTED)
+ ret = ocfs2_decrease_refcount(inode, handle,
+ ocfs2_blocks_to_clusters(osb->sb,
+ phys_blkno),
+ len, meta_ac,
+ dealloc, 1);
+ else
+ ret = ocfs2_truncate_log_append(osb, handle,
+ phys_blkno, len);
+ if (ret)
+ mlog_errno(ret);
+
+ }
out_commit:
ocfs2_commit_trans(osb, handle);
@@ -5742,6 +5745,9 @@ out:
if (meta_ac)
ocfs2_free_alloc_context(meta_ac);
+ if (ref_tree)
+ ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
+
return ret;
}
@@ -5850,11 +5856,7 @@ int ocfs2_truncate_log_append(struct ocfs2_super *osb,
}
tl->tl_recs[index].t_clusters = cpu_to_le32(num_clusters);
- status = ocfs2_journal_dirty(handle, tl_bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
+ ocfs2_journal_dirty(handle, tl_bh);
bail:
mlog_exit(status);
@@ -5893,11 +5895,7 @@ static int ocfs2_replay_truncate_records(struct ocfs2_super *osb,
tl->tl_used = cpu_to_le16(i);
- status = ocfs2_journal_dirty(handle, tl_bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
+ ocfs2_journal_dirty(handle, tl_bh);
/* TODO: Perhaps we can calculate the bulk of the
* credits up front rather than extending like
@@ -6298,6 +6296,7 @@ int ocfs2_truncate_log_init(struct ocfs2_super *osb)
*/
struct ocfs2_cached_block_free {
struct ocfs2_cached_block_free *free_next;
+ u64 free_bg;
u64 free_blk;
unsigned int free_bit;
};
@@ -6344,8 +6343,11 @@ static int ocfs2_free_cached_blocks(struct ocfs2_super *osb,
}
while (head) {
- bg_blkno = ocfs2_which_suballoc_group(head->free_blk,
- head->free_bit);
+ if (head->free_bg)
+ bg_blkno = head->free_bg;
+ else
+ bg_blkno = ocfs2_which_suballoc_group(head->free_blk,
+ head->free_bit);
mlog(0, "Free bit: (bit %u, blkno %llu)\n",
head->free_bit, (unsigned long long)head->free_blk);
@@ -6393,7 +6395,7 @@ int ocfs2_cache_cluster_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt,
int ret = 0;
struct ocfs2_cached_block_free *item;
- item = kmalloc(sizeof(*item), GFP_NOFS);
+ item = kzalloc(sizeof(*item), GFP_NOFS);
if (item == NULL) {
ret = -ENOMEM;
mlog_errno(ret);
@@ -6533,8 +6535,8 @@ ocfs2_find_per_slot_free_list(int type,
}
int ocfs2_cache_block_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt,
- int type, int slot, u64 blkno,
- unsigned int bit)
+ int type, int slot, u64 suballoc,
+ u64 blkno, unsigned int bit)
{
int ret;
struct ocfs2_per_slot_free_list *fl;
@@ -6547,7 +6549,7 @@ int ocfs2_cache_block_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt,
goto out;
}
- item = kmalloc(sizeof(*item), GFP_NOFS);
+ item = kzalloc(sizeof(*item), GFP_NOFS);
if (item == NULL) {
ret = -ENOMEM;
mlog_errno(ret);
@@ -6557,6 +6559,7 @@ int ocfs2_cache_block_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt,
mlog(0, "Insert: (type %d, slot %u, bit %u, blk %llu)\n",
type, slot, bit, (unsigned long long)blkno);
+ item->free_bg = suballoc;
item->free_blk = blkno;
item->free_bit = bit;
item->free_next = fl->f_first;
@@ -6573,433 +6576,11 @@ static int ocfs2_cache_extent_block_free(struct ocfs2_cached_dealloc_ctxt *ctxt,
{
return ocfs2_cache_block_dealloc(ctxt, EXTENT_ALLOC_SYSTEM_INODE,
le16_to_cpu(eb->h_suballoc_slot),
+ le64_to_cpu(eb->h_suballoc_loc),
le64_to_cpu(eb->h_blkno),
le16_to_cpu(eb->h_suballoc_bit));
}
-/* This function will figure out whether the currently last extent
- * block will be deleted, and if it will, what the new last extent
- * block will be so we can update his h_next_leaf_blk field, as well
- * as the dinodes i_last_eb_blk */
-static int ocfs2_find_new_last_ext_blk(struct inode *inode,
- unsigned int clusters_to_del,
- struct ocfs2_path *path,
- struct buffer_head **new_last_eb)
-{
- int next_free, ret = 0;
- u32 cpos;
- struct ocfs2_extent_rec *rec;
- struct ocfs2_extent_block *eb;
- struct ocfs2_extent_list *el;
- struct buffer_head *bh = NULL;
-
- *new_last_eb = NULL;
-
- /* we have no tree, so of course, no last_eb. */
- if (!path->p_tree_depth)
- goto out;
-
- /* trunc to zero special case - this makes tree_depth = 0
- * regardless of what it is. */
- if (OCFS2_I(inode)->ip_clusters == clusters_to_del)
- goto out;
-
- el = path_leaf_el(path);
- BUG_ON(!el->l_next_free_rec);
-
- /*
- * Make sure that this extent list will actually be empty
- * after we clear away the data. We can shortcut out if
- * there's more than one non-empty extent in the
- * list. Otherwise, a check of the remaining extent is
- * necessary.
- */
- next_free = le16_to_cpu(el->l_next_free_rec);
- rec = NULL;
- if (ocfs2_is_empty_extent(&el->l_recs[0])) {
- if (next_free > 2)
- goto out;
-
- /* We may have a valid extent in index 1, check it. */
- if (next_free == 2)
- rec = &el->l_recs[1];
-
- /*
- * Fall through - no more nonempty extents, so we want
- * to delete this leaf.
- */
- } else {
- if (next_free > 1)
- goto out;
-
- rec = &el->l_recs[0];
- }
-
- if (rec) {
- /*
- * Check it we'll only be trimming off the end of this
- * cluster.
- */
- if (le16_to_cpu(rec->e_leaf_clusters) > clusters_to_del)
- goto out;
- }
-
- ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, path, &cpos);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
-
- ret = ocfs2_find_leaf(INODE_CACHE(inode), path_root_el(path), cpos, &bh);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
-
- eb = (struct ocfs2_extent_block *) bh->b_data;
- el = &eb->h_list;
-
- /* ocfs2_find_leaf() gets the eb from ocfs2_read_extent_block().
- * Any corruption is a code bug. */
- BUG_ON(!OCFS2_IS_VALID_EXTENT_BLOCK(eb));
-
- *new_last_eb = bh;
- get_bh(*new_last_eb);
- mlog(0, "returning block %llu, (cpos: %u)\n",
- (unsigned long long)le64_to_cpu(eb->h_blkno), cpos);
-out:
- brelse(bh);
-
- return ret;
-}
-
-/*
- * Trim some clusters off the rightmost edge of a tree. Only called
- * during truncate.
- *
- * The caller needs to:
- * - start journaling of each path component.
- * - compute and fully set up any new last ext block
- */
-static int ocfs2_trim_tree(struct inode *inode, struct ocfs2_path *path,
- handle_t *handle, struct ocfs2_truncate_context *tc,
- u32 clusters_to_del, u64 *delete_start, u8 *flags)
-{
- int ret, i, index = path->p_tree_depth;
- u32 new_edge = 0;
- u64 deleted_eb = 0;
- struct buffer_head *bh;
- struct ocfs2_extent_list *el;
- struct ocfs2_extent_rec *rec;
-
- *delete_start = 0;
- *flags = 0;
-
- while (index >= 0) {
- bh = path->p_node[index].bh;
- el = path->p_node[index].el;
-
- mlog(0, "traveling tree (index = %d, block = %llu)\n",
- index, (unsigned long long)bh->b_blocknr);
-
- BUG_ON(le16_to_cpu(el->l_next_free_rec) == 0);
-
- if (index !=
- (path->p_tree_depth - le16_to_cpu(el->l_tree_depth))) {
- ocfs2_error(inode->i_sb,
- "Inode %lu has invalid ext. block %llu",
- inode->i_ino,
- (unsigned long long)bh->b_blocknr);
- ret = -EROFS;
- goto out;
- }
-
-find_tail_record:
- i = le16_to_cpu(el->l_next_free_rec) - 1;
- rec = &el->l_recs[i];
-
- mlog(0, "Extent list before: record %d: (%u, %u, %llu), "
- "next = %u\n", i, le32_to_cpu(rec->e_cpos),
- ocfs2_rec_clusters(el, rec),
- (unsigned long long)le64_to_cpu(rec->e_blkno),
- le16_to_cpu(el->l_next_free_rec));
-
- BUG_ON(ocfs2_rec_clusters(el, rec) < clusters_to_del);
-
- if (le16_to_cpu(el->l_tree_depth) == 0) {
- /*
- * If the leaf block contains a single empty
- * extent and no records, we can just remove
- * the block.
- */
- if (i == 0 && ocfs2_is_empty_extent(rec)) {
- memset(rec, 0,
- sizeof(struct ocfs2_extent_rec));
- el->l_next_free_rec = cpu_to_le16(0);
-
- goto delete;
- }
-
- /*
- * Remove any empty extents by shifting things
- * left. That should make life much easier on
- * the code below. This condition is rare
- * enough that we shouldn't see a performance
- * hit.
- */
- if (ocfs2_is_empty_extent(&el->l_recs[0])) {
- le16_add_cpu(&el->l_next_free_rec, -1);
-
- for(i = 0;
- i < le16_to_cpu(el->l_next_free_rec); i++)
- el->l_recs[i] = el->l_recs[i + 1];
-
- memset(&el->l_recs[i], 0,
- sizeof(struct ocfs2_extent_rec));
-
- /*
- * We've modified our extent list. The
- * simplest way to handle this change
- * is to being the search from the
- * start again.
- */
- goto find_tail_record;
- }
-
- le16_add_cpu(&rec->e_leaf_clusters, -clusters_to_del);
-
- /*
- * We'll use "new_edge" on our way back up the
- * tree to know what our rightmost cpos is.
- */
- new_edge = le16_to_cpu(rec->e_leaf_clusters);
- new_edge += le32_to_cpu(rec->e_cpos);
-
- /*
- * The caller will use this to delete data blocks.
- */
- *delete_start = le64_to_cpu(rec->e_blkno)
- + ocfs2_clusters_to_blocks(inode->i_sb,
- le16_to_cpu(rec->e_leaf_clusters));
- *flags = rec->e_flags;
-
- /*
- * If it's now empty, remove this record.
- */
- if (le16_to_cpu(rec->e_leaf_clusters) == 0) {
- memset(rec, 0,
- sizeof(struct ocfs2_extent_rec));
- le16_add_cpu(&el->l_next_free_rec, -1);
- }
- } else {
- if (le64_to_cpu(rec->e_blkno) == deleted_eb) {
- memset(rec, 0,
- sizeof(struct ocfs2_extent_rec));
- le16_add_cpu(&el->l_next_free_rec, -1);
-
- goto delete;
- }
-
- /* Can this actually happen? */
- if (le16_to_cpu(el->l_next_free_rec) == 0)
- goto delete;
-
- /*
- * We never actually deleted any clusters
- * because our leaf was empty. There's no
- * reason to adjust the rightmost edge then.
- */
- if (new_edge == 0)
- goto delete;
-
- rec->e_int_clusters = cpu_to_le32(new_edge);
- le32_add_cpu(&rec->e_int_clusters,
- -le32_to_cpu(rec->e_cpos));
-
- /*
- * A deleted child record should have been
- * caught above.
- */
- BUG_ON(le32_to_cpu(rec->e_int_clusters) == 0);
- }
-
-delete:
- ret = ocfs2_journal_dirty(handle, bh);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
-
- mlog(0, "extent list container %llu, after: record %d: "
- "(%u, %u, %llu), next = %u.\n",
- (unsigned long long)bh->b_blocknr, i,
- le32_to_cpu(rec->e_cpos), ocfs2_rec_clusters(el, rec),
- (unsigned long long)le64_to_cpu(rec->e_blkno),
- le16_to_cpu(el->l_next_free_rec));
-
- /*
- * We must be careful to only attempt delete of an
- * extent block (and not the root inode block).
- */
- if (index > 0 && le16_to_cpu(el->l_next_free_rec) == 0) {
- struct ocfs2_extent_block *eb =
- (struct ocfs2_extent_block *)bh->b_data;
-
- /*
- * Save this for use when processing the
- * parent block.
- */
- deleted_eb = le64_to_cpu(eb->h_blkno);
-
- mlog(0, "deleting this extent block.\n");
-
- ocfs2_remove_from_cache(INODE_CACHE(inode), bh);
-
- BUG_ON(ocfs2_rec_clusters(el, &el->l_recs[0]));
- BUG_ON(le32_to_cpu(el->l_recs[0].e_cpos));
- BUG_ON(le64_to_cpu(el->l_recs[0].e_blkno));
-
- ret = ocfs2_cache_extent_block_free(&tc->tc_dealloc, eb);
- /* An error here is not fatal. */
- if (ret < 0)
- mlog_errno(ret);
- } else {
- deleted_eb = 0;
- }
-
- index--;
- }
-
- ret = 0;
-out:
- return ret;
-}
-
-static int ocfs2_do_truncate(struct ocfs2_super *osb,
- unsigned int clusters_to_del,
- struct inode *inode,
- struct buffer_head *fe_bh,
- handle_t *handle,
- struct ocfs2_truncate_context *tc,
- struct ocfs2_path *path,
- struct ocfs2_alloc_context *meta_ac)
-{
- int status;
- struct ocfs2_dinode *fe;
- struct ocfs2_extent_block *last_eb = NULL;
- struct ocfs2_extent_list *el;
- struct buffer_head *last_eb_bh = NULL;
- u64 delete_blk = 0;
- u8 rec_flags;
-
- fe = (struct ocfs2_dinode *) fe_bh->b_data;
-
- status = ocfs2_find_new_last_ext_blk(inode, clusters_to_del,
- path, &last_eb_bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
-
- /*
- * Each component will be touched, so we might as well journal
- * here to avoid having to handle errors later.
- */
- status = ocfs2_journal_access_path(INODE_CACHE(inode), handle, path);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
-
- if (last_eb_bh) {
- status = ocfs2_journal_access_eb(handle, INODE_CACHE(inode), last_eb_bh,
- OCFS2_JOURNAL_ACCESS_WRITE);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
-
- last_eb = (struct ocfs2_extent_block *) last_eb_bh->b_data;
- }
-
- el = &(fe->id2.i_list);
-
- /*
- * Lower levels depend on this never happening, but it's best
- * to check it up here before changing the tree.
- */
- if (el->l_tree_depth && el->l_recs[0].e_int_clusters == 0) {
- ocfs2_error(inode->i_sb,
- "Inode %lu has an empty extent record, depth %u\n",
- inode->i_ino, le16_to_cpu(el->l_tree_depth));
- status = -EROFS;
- goto bail;
- }
-
- dquot_free_space_nodirty(inode,
- ocfs2_clusters_to_bytes(osb->sb, clusters_to_del));
- spin_lock(&OCFS2_I(inode)->ip_lock);
- OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters) -
- clusters_to_del;
- spin_unlock(&OCFS2_I(inode)->ip_lock);
- le32_add_cpu(&fe->i_clusters, -clusters_to_del);
- inode->i_blocks = ocfs2_inode_sector_count(inode);
-
- status = ocfs2_trim_tree(inode, path, handle, tc,
- clusters_to_del, &delete_blk, &rec_flags);
- if (status) {
- mlog_errno(status);
- goto bail;
- }
-
- if (le32_to_cpu(fe->i_clusters) == 0) {
- /* trunc to zero is a special case. */
- el->l_tree_depth = 0;
- fe->i_last_eb_blk = 0;
- } else if (last_eb)
- fe->i_last_eb_blk = last_eb->h_blkno;
-
- status = ocfs2_journal_dirty(handle, fe_bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
-
- if (last_eb) {
- /* If there will be a new last extent block, then by
- * definition, there cannot be any leaves to the right of
- * him. */
- last_eb->h_next_leaf_blk = 0;
- status = ocfs2_journal_dirty(handle, last_eb_bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
- }
-
- if (delete_blk) {
- if (rec_flags & OCFS2_EXT_REFCOUNTED)
- status = ocfs2_decrease_refcount(inode, handle,
- ocfs2_blocks_to_clusters(osb->sb,
- delete_blk),
- clusters_to_del, meta_ac,
- &tc->tc_dealloc, 1);
- else
- status = ocfs2_truncate_log_append(osb, handle,
- delete_blk,
- clusters_to_del);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
- }
- status = 0;
-bail:
- brelse(last_eb_bh);
- mlog_exit(status);
- return status;
-}
-
static int ocfs2_zero_func(handle_t *handle, struct buffer_head *bh)
{
set_buffer_uptodate(bh);
@@ -7307,7 +6888,9 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
goto out_commit;
did_quota = 1;
- ret = ocfs2_claim_clusters(osb, handle, data_ac, 1, &bit_off,
+ data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv;
+
+ ret = ocfs2_claim_clusters(handle, data_ac, 1, &bit_off,
&num);
if (ret) {
mlog_errno(ret);
@@ -7406,26 +6989,29 @@ out:
*/
int ocfs2_commit_truncate(struct ocfs2_super *osb,
struct inode *inode,
- struct buffer_head *fe_bh,
- struct ocfs2_truncate_context *tc)
+ struct buffer_head *di_bh)
{
- int status, i, credits, tl_sem = 0;
- u32 clusters_to_del, new_highest_cpos, range;
+ int status = 0, i, flags = 0;
+ u32 new_highest_cpos, range, trunc_cpos, trunc_len, phys_cpos, coff;
u64 blkno = 0;
struct ocfs2_extent_list *el;
- handle_t *handle = NULL;
- struct inode *tl_inode = osb->osb_tl_inode;
+ struct ocfs2_extent_rec *rec;
struct ocfs2_path *path = NULL;
- struct ocfs2_dinode *di = (struct ocfs2_dinode *)fe_bh->b_data;
- struct ocfs2_alloc_context *meta_ac = NULL;
- struct ocfs2_refcount_tree *ref_tree = NULL;
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
+ struct ocfs2_extent_list *root_el = &(di->id2.i_list);
+ u64 refcount_loc = le64_to_cpu(di->i_refcount_loc);
+ struct ocfs2_extent_tree et;
+ struct ocfs2_cached_dealloc_ctxt dealloc;
mlog_entry_void();
+ ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
+ ocfs2_init_dealloc_ctxt(&dealloc);
+
new_highest_cpos = ocfs2_clusters_for_bytes(osb->sb,
i_size_read(inode));
- path = ocfs2_new_path(fe_bh, &di->id2.i_list,
+ path = ocfs2_new_path(di_bh, &di->id2.i_list,
ocfs2_journal_access_di);
if (!path) {
status = -ENOMEM;
@@ -7444,8 +7030,6 @@ start:
goto bail;
}
- credits = 0;
-
/*
* Truncate always works against the rightmost tree branch.
*/
@@ -7480,101 +7064,62 @@ start:
}
i = le16_to_cpu(el->l_next_free_rec) - 1;
- range = le32_to_cpu(el->l_recs[i].e_cpos) +
- ocfs2_rec_clusters(el, &el->l_recs[i]);
- if (i == 0 && ocfs2_is_empty_extent(&el->l_recs[i])) {
- clusters_to_del = 0;
- } else if (le32_to_cpu(el->l_recs[i].e_cpos) >= new_highest_cpos) {
- clusters_to_del = ocfs2_rec_clusters(el, &el->l_recs[i]);
- blkno = le64_to_cpu(el->l_recs[i].e_blkno);
+ rec = &el->l_recs[i];
+ flags = rec->e_flags;
+ range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
+
+ if (i == 0 && ocfs2_is_empty_extent(rec)) {
+ /*
+ * Lower levels depend on this never happening, but it's best
+ * to check it up here before changing the tree.
+ */
+ if (root_el->l_tree_depth && rec->e_int_clusters == 0) {
+ ocfs2_error(inode->i_sb, "Inode %lu has an empty "
+ "extent record, depth %u\n", inode->i_ino,
+ le16_to_cpu(root_el->l_tree_depth));
+ status = -EROFS;
+ goto bail;
+ }
+ trunc_cpos = le32_to_cpu(rec->e_cpos);
+ trunc_len = 0;
+ blkno = 0;
+ } else if (le32_to_cpu(rec->e_cpos) >= new_highest_cpos) {
+ /*
+ * Truncate entire record.
+ */
+ trunc_cpos = le32_to_cpu(rec->e_cpos);
+ trunc_len = ocfs2_rec_clusters(el, rec);
+ blkno = le64_to_cpu(rec->e_blkno);
} else if (range > new_highest_cpos) {
- clusters_to_del = (ocfs2_rec_clusters(el, &el->l_recs[i]) +
- le32_to_cpu(el->l_recs[i].e_cpos)) -
- new_highest_cpos;
- blkno = le64_to_cpu(el->l_recs[i].e_blkno) +
- ocfs2_clusters_to_blocks(inode->i_sb,
- ocfs2_rec_clusters(el, &el->l_recs[i]) -
- clusters_to_del);
+ /*
+ * Partial truncate. it also should be
+ * the last truncate we're doing.
+ */
+ trunc_cpos = new_highest_cpos;
+ trunc_len = range - new_highest_cpos;
+ coff = new_highest_cpos - le32_to_cpu(rec->e_cpos);
+ blkno = le64_to_cpu(rec->e_blkno) +
+ ocfs2_clusters_to_blocks(inode->i_sb, coff);
} else {
+ /*
+ * Truncate completed, leave happily.
+ */
status = 0;
goto bail;
}
- mlog(0, "clusters_to_del = %u in this pass, tail blk=%llu\n",
- clusters_to_del, (unsigned long long)path_leaf_bh(path)->b_blocknr);
-
- if (el->l_recs[i].e_flags & OCFS2_EXT_REFCOUNTED && clusters_to_del) {
- BUG_ON(!(OCFS2_I(inode)->ip_dyn_features &
- OCFS2_HAS_REFCOUNT_FL));
-
- status = ocfs2_lock_refcount_tree(osb,
- le64_to_cpu(di->i_refcount_loc),
- 1, &ref_tree, NULL);
- if (status) {
- mlog_errno(status);
- goto bail;
- }
-
- status = ocfs2_prepare_refcount_change_for_del(inode, fe_bh,
- blkno,
- clusters_to_del,
- &credits,
- &meta_ac);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
- }
-
- mutex_lock(&tl_inode->i_mutex);
- tl_sem = 1;
- /* ocfs2_truncate_log_needs_flush guarantees us at least one
- * record is free for use. If there isn't any, we flush to get
- * an empty truncate log. */
- if (ocfs2_truncate_log_needs_flush(osb)) {
- status = __ocfs2_flush_truncate_log(osb);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
- }
+ phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb, blkno);
- credits += ocfs2_calc_tree_trunc_credits(osb->sb, clusters_to_del,
- (struct ocfs2_dinode *)fe_bh->b_data,
- el);
- handle = ocfs2_start_trans(osb, credits);
- if (IS_ERR(handle)) {
- status = PTR_ERR(handle);
- handle = NULL;
- mlog_errno(status);
- goto bail;
- }
-
- status = ocfs2_do_truncate(osb, clusters_to_del, inode, fe_bh, handle,
- tc, path, meta_ac);
+ status = ocfs2_remove_btree_range(inode, &et, trunc_cpos,
+ phys_cpos, trunc_len, flags, &dealloc,
+ refcount_loc);
if (status < 0) {
mlog_errno(status);
goto bail;
}
- mutex_unlock(&tl_inode->i_mutex);
- tl_sem = 0;
-
- ocfs2_commit_trans(osb, handle);
- handle = NULL;
-
ocfs2_reinit_path(path, 1);
- if (meta_ac) {
- ocfs2_free_alloc_context(meta_ac);
- meta_ac = NULL;
- }
-
- if (ref_tree) {
- ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
- ref_tree = NULL;
- }
-
/*
* The check above will catch the case where we've truncated
* away all allocation.
@@ -7585,25 +7130,10 @@ bail:
ocfs2_schedule_truncate_log_flush(osb, 1);
- if (tl_sem)
- mutex_unlock(&tl_inode->i_mutex);
-
- if (handle)
- ocfs2_commit_trans(osb, handle);
-
- if (meta_ac)
- ocfs2_free_alloc_context(meta_ac);
-
- if (ref_tree)
- ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
-
- ocfs2_run_deallocs(osb, &tc->tc_dealloc);
+ ocfs2_run_deallocs(osb, &dealloc);
ocfs2_free_path(path);
- /* This will drop the ext_alloc cluster lock for us */
- ocfs2_free_truncate_context(tc);
-
mlog_exit(status);
return status;
}
diff --git a/fs/ocfs2/alloc.h b/fs/ocfs2/alloc.h
index 1db4359..55762b5 100644
--- a/fs/ocfs2/alloc.h
+++ b/fs/ocfs2/alloc.h
@@ -140,8 +140,9 @@ int ocfs2_remove_extent(handle_t *handle, struct ocfs2_extent_tree *et,
struct ocfs2_cached_dealloc_ctxt *dealloc);
int ocfs2_remove_btree_range(struct inode *inode,
struct ocfs2_extent_tree *et,
- u32 cpos, u32 phys_cpos, u32 len,
- struct ocfs2_cached_dealloc_ctxt *dealloc);
+ u32 cpos, u32 phys_cpos, u32 len, int flags,
+ struct ocfs2_cached_dealloc_ctxt *dealloc,
+ u64 refcount_loc);
int ocfs2_num_free_extents(struct ocfs2_super *osb,
struct ocfs2_extent_tree *et);
@@ -209,7 +210,7 @@ static inline void ocfs2_init_dealloc_ctxt(struct ocfs2_cached_dealloc_ctxt *c)
int ocfs2_cache_cluster_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt,
u64 blkno, unsigned int bit);
int ocfs2_cache_block_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt,
- int type, int slot, u64 blkno,
+ int type, int slot, u64 suballoc, u64 blkno,
unsigned int bit);
static inline int ocfs2_dealloc_has_cluster(struct ocfs2_cached_dealloc_ctxt *c)
{
@@ -233,8 +234,7 @@ int ocfs2_prepare_truncate(struct ocfs2_super *osb,
struct ocfs2_truncate_context **tc);
int ocfs2_commit_truncate(struct ocfs2_super *osb,
struct inode *inode,
- struct buffer_head *fe_bh,
- struct ocfs2_truncate_context *tc);
+ struct buffer_head *di_bh);
int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh,
unsigned int start, unsigned int end, int trunc);
@@ -319,6 +319,8 @@ int ocfs2_journal_access_path(struct ocfs2_caching_info *ci,
struct ocfs2_path *path);
int ocfs2_find_cpos_for_right_leaf(struct super_block *sb,
struct ocfs2_path *path, u32 *cpos);
+int ocfs2_find_cpos_for_left_leaf(struct super_block *sb,
+ struct ocfs2_path *path, u32 *cpos);
int ocfs2_find_subtree_root(struct ocfs2_extent_tree *et,
struct ocfs2_path *left,
struct ocfs2_path *right);
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 21441dd..3623ca2 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -1735,6 +1735,9 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
goto out;
}
+ if (data_ac)
+ data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv;
+
credits = ocfs2_calc_extend_credits(inode->i_sb,
&di->id2.i_list,
clusters_to_alloc);
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
index ecebb22..f9d5d3f 100644
--- a/fs/ocfs2/buffer_head_io.c
+++ b/fs/ocfs2/buffer_head_io.c
@@ -406,6 +406,7 @@ int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
struct buffer_head *bh)
{
int ret = 0;
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;
mlog_entry_void();
@@ -425,6 +426,7 @@ int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
get_bh(bh); /* for end_buffer_write_sync() */
bh->b_end_io = end_buffer_write_sync;
+ ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &di->i_check);
submit_bh(WRITE, bh);
wait_on_buffer(bh);
diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c
index 3bb928a..c7fba39 100644
--- a/fs/ocfs2/cluster/masklog.c
+++ b/fs/ocfs2/cluster/masklog.c
@@ -116,6 +116,7 @@ static struct mlog_attribute mlog_attrs[MLOG_MAX_BITS] = {
define_mask(ERROR),
define_mask(NOTICE),
define_mask(KTHREAD),
+ define_mask(RESERVATIONS),
};
static struct attribute *mlog_attr_ptrs[MLOG_MAX_BITS] = {NULL, };
diff --git a/fs/ocfs2/cluster/masklog.h b/fs/ocfs2/cluster/masklog.h
index 3dfddbe..fd96e2a 100644
--- a/fs/ocfs2/cluster/masklog.h
+++ b/fs/ocfs2/cluster/masklog.h
@@ -119,6 +119,7 @@
#define ML_ERROR 0x0000000100000000ULL /* sent to KERN_ERR */
#define ML_NOTICE 0x0000000200000000ULL /* setn to KERN_NOTICE */
#define ML_KTHREAD 0x0000000400000000ULL /* kernel thread activity */
+#define ML_RESERVATIONS 0x0000000800000000ULL /* ocfs2 alloc reservations */
#define MLOG_INITIAL_AND_MASK (ML_ERROR|ML_NOTICE)
#define MLOG_INITIAL_NOT_MASK (ML_ENTRY|ML_EXIT)
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 73e743e..aa75ca3 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -583,6 +583,9 @@ static void o2net_state_change(struct sock *sk)
o2net_sc_queue_work(sc, &sc->sc_connect_work);
break;
default:
+ printk(KERN_INFO "o2net: connection to " SC_NODEF_FMT
+ " shutdown, state %d\n",
+ SC_NODEF_ARGS(sc), sk->sk_state);
o2net_sc_queue_work(sc, &sc->sc_shutdown_work);
break;
}
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index efd77d0..f04ebcf 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -1194,7 +1194,7 @@ static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir,
else
de->inode = 0;
dir->i_version++;
- status = ocfs2_journal_dirty(handle, bh);
+ ocfs2_journal_dirty(handle, bh);
goto bail;
}
i += le16_to_cpu(de->rec_len);
@@ -1752,7 +1752,7 @@ int __ocfs2_add_entry(handle_t *handle,
ocfs2_recalc_free_list(dir, handle, lookup);
dir->i_version++;
- status = ocfs2_journal_dirty(handle, insert_bh);
+ ocfs2_journal_dirty(handle, insert_bh);
retval = 0;
goto bail;
}
@@ -2297,12 +2297,7 @@ static int ocfs2_fill_new_dir_id(struct ocfs2_super *osb,
}
ocfs2_fill_initial_dirents(inode, parent, data->id_data, size);
-
ocfs2_journal_dirty(handle, di_bh);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
i_size_write(inode, size);
inode->i_nlink = 2;
@@ -2366,11 +2361,7 @@ static int ocfs2_fill_new_dir_el(struct ocfs2_super *osb,
ocfs2_init_dir_trailer(inode, new_bh, size);
}
- status = ocfs2_journal_dirty(handle, new_bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
+ ocfs2_journal_dirty(handle, new_bh);
i_size_write(inode, inode->i_sb->s_blocksize);
inode->i_nlink = 2;
@@ -2404,15 +2395,15 @@ static int ocfs2_dx_dir_attach_index(struct ocfs2_super *osb,
int ret;
struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data;
u16 dr_suballoc_bit;
- u64 dr_blkno;
+ u64 suballoc_loc, dr_blkno;
unsigned int num_bits;
struct buffer_head *dx_root_bh = NULL;
struct ocfs2_dx_root_block *dx_root;
struct ocfs2_dir_block_trailer *trailer =
ocfs2_trailer_from_bh(dirdata_bh, dir->i_sb);
- ret = ocfs2_claim_metadata(osb, handle, meta_ac, 1, &dr_suballoc_bit,
- &num_bits, &dr_blkno);
+ ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc,
+ &dr_suballoc_bit, &num_bits, &dr_blkno);
if (ret) {
mlog_errno(ret);
goto out;
@@ -2440,6 +2431,7 @@ static int ocfs2_dx_dir_attach_index(struct ocfs2_super *osb,
memset(dx_root, 0, osb->sb->s_blocksize);
strcpy(dx_root->dr_signature, OCFS2_DX_ROOT_SIGNATURE);
dx_root->dr_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
+ dx_root->dr_suballoc_loc = cpu_to_le64(suballoc_loc);
dx_root->dr_suballoc_bit = cpu_to_le16(dr_suballoc_bit);
dx_root->dr_fs_generation = cpu_to_le32(osb->fs_generation);
dx_root->dr_blkno = cpu_to_le64(dr_blkno);
@@ -2458,10 +2450,7 @@ static int ocfs2_dx_dir_attach_index(struct ocfs2_super *osb,
dx_root->dr_list.l_count =
cpu_to_le16(ocfs2_extent_recs_per_dx_root(osb->sb));
}
-
- ret = ocfs2_journal_dirty(handle, dx_root_bh);
- if (ret)
- mlog_errno(ret);
+ ocfs2_journal_dirty(handle, dx_root_bh);
ret = ocfs2_journal_access_di(handle, INODE_CACHE(dir), di_bh,
OCFS2_JOURNAL_ACCESS_CREATE);
@@ -2475,9 +2464,7 @@ static int ocfs2_dx_dir_attach_index(struct ocfs2_super *osb,
OCFS2_I(dir)->ip_dyn_features |= OCFS2_INDEXED_DIR_FL;
di->i_dyn_features = cpu_to_le16(OCFS2_I(dir)->ip_dyn_features);
- ret = ocfs2_journal_dirty(handle, di_bh);
- if (ret)
- mlog_errno(ret);
+ ocfs2_journal_dirty(handle, di_bh);
*ret_dx_root_bh = dx_root_bh;
dx_root_bh = NULL;
@@ -2558,7 +2545,7 @@ static int __ocfs2_dx_dir_new_cluster(struct inode *dir,
* chance of contiguousness as the directory grows in number
* of entries.
*/
- ret = __ocfs2_claim_clusters(osb, handle, data_ac, 1, 1, &phys, &num);
+ ret = __ocfs2_claim_clusters(handle, data_ac, 1, 1, &phys, &num);
if (ret) {
mlog_errno(ret);
goto out;
@@ -2991,7 +2978,9 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh,
* if we only get one now, that's enough to continue. The rest
* will be claimed after the conversion to extents.
*/
- ret = ocfs2_claim_clusters(osb, handle, data_ac, 1, &bit_off, &len);
+ if (ocfs2_dir_resv_allowed(osb))
+ data_ac->ac_resv = &oi->ip_la_data_resv;
+ ret = ocfs2_claim_clusters(handle, data_ac, 1, &bit_off, &len);
if (ret) {
mlog_errno(ret);
goto out_commit;
@@ -3034,11 +3023,7 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh,
ocfs2_init_dir_trailer(dir, dirdata_bh, i);
}
- ret = ocfs2_journal_dirty(handle, dirdata_bh);
- if (ret) {
- mlog_errno(ret);
- goto out_commit;
- }
+ ocfs2_journal_dirty(handle, dirdata_bh);
if (ocfs2_supports_indexed_dirs(osb) && !dx_inline) {
/*
@@ -3104,11 +3089,7 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh,
*/
dir->i_blocks = ocfs2_inode_sector_count(dir);
- ret = ocfs2_journal_dirty(handle, di_bh);
- if (ret) {
- mlog_errno(ret);
- goto out_commit;
- }
+ ocfs2_journal_dirty(handle, di_bh);
if (ocfs2_supports_indexed_dirs(osb)) {
ret = ocfs2_dx_dir_attach_index(osb, handle, dir, di_bh,
@@ -3138,7 +3119,7 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh,
* pass. Claim the 2nd cluster as a separate extent.
*/
if (alloc > len) {
- ret = ocfs2_claim_clusters(osb, handle, data_ac, 1, &bit_off,
+ ret = ocfs2_claim_clusters(handle, data_ac, 1, &bit_off,
&len);
if (ret) {
mlog_errno(ret);
@@ -3369,6 +3350,9 @@ static int ocfs2_extend_dir(struct ocfs2_super *osb,
goto bail;
}
+ if (ocfs2_dir_resv_allowed(osb))
+ data_ac->ac_resv = &OCFS2_I(dir)->ip_la_data_resv;
+
credits = ocfs2_calc_extend_credits(sb, el, 1);
} else {
spin_unlock(&OCFS2_I(dir)->ip_lock);
@@ -3423,11 +3407,7 @@ do_extend:
} else {
de->rec_len = cpu_to_le16(sb->s_blocksize);
}
- status = ocfs2_journal_dirty(handle, new_bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
+ ocfs2_journal_dirty(handle, new_bh);
dir_i_size += dir->i_sb->s_blocksize;
i_size_write(dir, dir_i_size);
@@ -3906,11 +3886,7 @@ static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir,
sizeof(struct ocfs2_dx_entry), dx_leaf_sort_cmp,
dx_leaf_sort_swap);
- ret = ocfs2_journal_dirty(handle, dx_leaf_bh);
- if (ret) {
- mlog_errno(ret);
- goto out_commit;
- }
+ ocfs2_journal_dirty(handle, dx_leaf_bh);
ret = ocfs2_dx_dir_find_leaf_split(dx_leaf, leaf_cpos, insert_hash,
&split_hash);
@@ -4490,7 +4466,10 @@ static int ocfs2_dx_dir_remove_index(struct inode *dir,
blk = le64_to_cpu(dx_root->dr_blkno);
bit = le16_to_cpu(dx_root->dr_suballoc_bit);
- bg_blkno = ocfs2_which_suballoc_group(blk, bit);
+ if (dx_root->dr_suballoc_loc)
+ bg_blkno = le64_to_cpu(dx_root->dr_suballoc_loc);
+ else
+ bg_blkno = ocfs2_which_suballoc_group(blk, bit);
ret = ocfs2_free_suballoc_bits(handle, dx_alloc_inode, dx_alloc_bh,
bit, bg_blkno, 1);
if (ret)
@@ -4551,8 +4530,8 @@ int ocfs2_dx_dir_truncate(struct inode *dir, struct buffer_head *di_bh)
p_cpos = ocfs2_blocks_to_clusters(dir->i_sb, blkno);
- ret = ocfs2_remove_btree_range(dir, &et, cpos, p_cpos, clen,
- &dealloc);
+ ret = ocfs2_remove_btree_range(dir, &et, cpos, p_cpos, clen, 0,
+ &dealloc, 0);
if (ret) {
mlog_errno(ret);
goto out;
diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c
index a795eb9..f449991 100644
--- a/fs/ocfs2/dlm/dlmast.c
+++ b/fs/ocfs2/dlm/dlmast.c
@@ -88,7 +88,7 @@ static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
return 0;
}
-static void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
+void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
{
mlog_entry_void();
@@ -145,7 +145,7 @@ void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
}
-static void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
+void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
{
mlog_entry_void();
@@ -184,9 +184,8 @@ static void dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
BUG_ON(!lksb);
/* only updates if this node masters the lockres */
+ spin_lock(&res->spinlock);
if (res->owner == dlm->node_num) {
-
- spin_lock(&res->spinlock);
/* check the lksb flags for the direction */
if (lksb->flags & DLM_LKSB_GET_LVB) {
mlog(0, "getting lvb from lockres for %s node\n",
@@ -201,8 +200,8 @@ static void dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
* here. In the future we might want to clear it at the time
* the put is actually done.
*/
- spin_unlock(&res->spinlock);
}
+ spin_unlock(&res->spinlock);
/* reset any lvb flags on the lksb */
lksb->flags &= ~(DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB);
@@ -452,7 +451,9 @@ int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
ret = o2net_send_message_vec(DLM_PROXY_AST_MSG, dlm->key, vec, veclen,
lock->ml.node, &status);
if (ret < 0)
- mlog_errno(ret);
+ mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
+ "node %u\n", ret, DLM_PROXY_AST_MSG, dlm->key,
+ lock->ml.node);
else {
if (status == DLM_RECOVERING) {
mlog(ML_ERROR, "sent AST to node %u, it thinks this "
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index 0102be3..4b6ae2c 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -37,7 +37,7 @@
#define DLM_THREAD_SHUFFLE_INTERVAL 5 // flush everything every 5 passes
#define DLM_THREAD_MS 200 // flush at least every 200 ms
-#define DLM_HASH_SIZE_DEFAULT (1 << 14)
+#define DLM_HASH_SIZE_DEFAULT (1 << 17)
#if DLM_HASH_SIZE_DEFAULT < PAGE_SIZE
# define DLM_HASH_PAGES 1
#else
@@ -904,6 +904,8 @@ void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
+void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
+void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
void dlm_do_local_ast(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
struct dlm_lock *lock);
diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
index 90803b4..9f30491 100644
--- a/fs/ocfs2/dlm/dlmconvert.c
+++ b/fs/ocfs2/dlm/dlmconvert.c
@@ -390,7 +390,9 @@ static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm,
} else if (ret != DLM_NORMAL && ret != DLM_NOTQUEUED)
dlm_error(ret);
} else {
- mlog_errno(tmpret);
+ mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
+ "node %u\n", tmpret, DLM_CONVERT_LOCK_MSG, dlm->key,
+ res->owner);
if (dlm_is_host_down(tmpret)) {
/* instead of logging the same network error over
* and over, sleep here and wait for the heartbeat
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 988c905..6b5a492 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -511,7 +511,7 @@ static void __dlm_print_nodes(struct dlm_ctxt *dlm)
assert_spin_locked(&dlm->spinlock);
- printk(KERN_INFO "ocfs2_dlm: Nodes in domain (\"%s\"): ", dlm->name);
+ printk(KERN_NOTICE "o2dlm: Nodes in domain %s: ", dlm->name);
while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES,
node + 1)) < O2NM_MAX_NODES) {
@@ -534,7 +534,7 @@ static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data,
node = exit_msg->node_idx;
- printk(KERN_INFO "ocfs2_dlm: Node %u leaves domain %s\n", node, dlm->name);
+ printk(KERN_NOTICE "o2dlm: Node %u leaves domain %s\n", node, dlm->name);
spin_lock(&dlm->spinlock);
clear_bit(node, dlm->domain_map);
@@ -565,7 +565,9 @@ static int dlm_send_one_domain_exit(struct dlm_ctxt *dlm,
status = o2net_send_message(DLM_EXIT_DOMAIN_MSG, dlm->key,
&leave_msg, sizeof(leave_msg), node,
NULL);
-
+ if (status < 0)
+ mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
+ "node %u\n", status, DLM_EXIT_DOMAIN_MSG, dlm->key, node);
mlog(0, "status return %d from o2net_send_message\n", status);
return status;
@@ -904,7 +906,7 @@ static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data,
set_bit(assert->node_idx, dlm->domain_map);
__dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
- printk(KERN_INFO "ocfs2_dlm: Node %u joins domain %s\n",
+ printk(KERN_NOTICE "o2dlm: Node %u joins domain %s\n",
assert->node_idx, dlm->name);
__dlm_print_nodes(dlm);
@@ -962,7 +964,9 @@ static int dlm_send_one_join_cancel(struct dlm_ctxt *dlm,
&cancel_msg, sizeof(cancel_msg), node,
NULL);
if (status < 0) {
- mlog_errno(status);
+ mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
+ "node %u\n", status, DLM_CANCEL_JOIN_MSG, DLM_MOD_KEY,
+ node);
goto bail;
}
@@ -1029,10 +1033,11 @@ static int dlm_request_join(struct dlm_ctxt *dlm,
byte_copymap(join_msg.node_map, dlm->live_nodes_map, O2NM_MAX_NODES);
status = o2net_send_message(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY, &join_msg,
- sizeof(join_msg), node,
- &join_resp);
+ sizeof(join_msg), node, &join_resp);
if (status < 0 && status != -ENOPROTOOPT) {
- mlog_errno(status);
+ mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
+ "node %u\n", status, DLM_QUERY_JOIN_MSG, DLM_MOD_KEY,
+ node);
goto bail;
}
dlm_query_join_wire_to_packet(join_resp, &packet);
@@ -1103,7 +1108,9 @@ static int dlm_send_one_join_assert(struct dlm_ctxt *dlm,
&assert_msg, sizeof(assert_msg), node,
NULL);
if (status < 0)
- mlog_errno(status);
+ mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
+ "node %u\n", status, DLM_ASSERT_JOINED_MSG, DLM_MOD_KEY,
+ node);
return status;
}
@@ -1516,7 +1523,7 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain,
goto leave;
}
- dlm->name = kmalloc(strlen(domain) + 1, GFP_KERNEL);
+ dlm->name = kstrdup(domain, GFP_KERNEL);
if (dlm->name == NULL) {
mlog_errno(-ENOMEM);
kfree(dlm);
@@ -1550,7 +1557,6 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain,
for (i = 0; i < DLM_HASH_BUCKETS; i++)
INIT_HLIST_HEAD(dlm_master_hash(dlm, i));
- strcpy(dlm->name, domain);
dlm->key = key;
dlm->node_num = o2nm_this_node();
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c
index 73333777..69cf369 100644
--- a/fs/ocfs2/dlm/dlmlock.c
+++ b/fs/ocfs2/dlm/dlmlock.c
@@ -329,7 +329,9 @@ static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm,
BUG();
}
} else {
- mlog_errno(tmpret);
+ mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
+ "node %u\n", tmpret, DLM_CREATE_LOCK_MSG, dlm->key,
+ res->owner);
if (dlm_is_host_down(tmpret)) {
ret = DLM_RECOVERING;
mlog(0, "node %u died so returning DLM_RECOVERING "
@@ -429,7 +431,7 @@ struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie,
struct dlm_lock *lock;
int kernel_allocated = 0;
- lock = (struct dlm_lock *) kmem_cache_zalloc(dlm_lock_cache, GFP_NOFS);
+ lock = kmem_cache_zalloc(dlm_lock_cache, GFP_NOFS);
if (!lock)
return NULL;
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 9289b43..4a7506a 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -617,13 +617,11 @@ struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
{
struct dlm_lock_resource *res = NULL;
- res = (struct dlm_lock_resource *)
- kmem_cache_zalloc(dlm_lockres_cache, GFP_NOFS);
+ res = kmem_cache_zalloc(dlm_lockres_cache, GFP_NOFS);
if (!res)
goto error;
- res->lockname.name = (char *)
- kmem_cache_zalloc(dlm_lockname_cache, GFP_NOFS);
+ res->lockname.name = kmem_cache_zalloc(dlm_lockname_cache, GFP_NOFS);
if (!res->lockname.name)
goto error;
@@ -757,8 +755,7 @@ lookup:
spin_unlock(&dlm->spinlock);
mlog(0, "allocating a new resource\n");
/* nothing found and we need to allocate one. */
- alloc_mle = (struct dlm_master_list_entry *)
- kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
+ alloc_mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
if (!alloc_mle)
goto leave;
res = dlm_new_lockres(dlm, lockid, namelen);
@@ -1542,8 +1539,7 @@ way_up_top:
spin_unlock(&dlm->master_lock);
spin_unlock(&dlm->spinlock);
- mle = (struct dlm_master_list_entry *)
- kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
+ mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
if (!mle) {
response = DLM_MASTER_RESP_ERROR;
mlog_errno(-ENOMEM);
@@ -1666,7 +1662,9 @@ again:
tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key,
&assert, sizeof(assert), to, &r);
if (tmpret < 0) {
- mlog(0, "assert_master returned %d!\n", tmpret);
+ mlog(ML_ERROR, "Error %d when sending message %u (key "
+ "0x%x) to node %u\n", tmpret,
+ DLM_ASSERT_MASTER_MSG, dlm->key, to);
if (!dlm_is_host_down(tmpret)) {
mlog(ML_ERROR, "unhandled error=%d!\n", tmpret);
BUG();
@@ -2205,7 +2203,9 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key,
&deref, sizeof(deref), res->owner, &r);
if (ret < 0)
- mlog_errno(ret);
+ mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
+ "node %u\n", ret, DLM_DEREF_LOCKRES_MSG, dlm->key,
+ res->owner);
else if (r < 0) {
/* BAD. other node says I did not have a ref. */
mlog(ML_ERROR,"while dropping ref on %s:%.*s "
@@ -2452,8 +2452,7 @@ static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
goto leave;
}
- mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
- GFP_NOFS);
+ mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
if (!mle) {
mlog_errno(ret);
goto leave;
@@ -2975,7 +2974,9 @@ static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
&migrate, sizeof(migrate), nodenum,
&status);
if (ret < 0) {
- mlog(0, "migrate_request returned %d!\n", ret);
+ mlog(ML_ERROR, "Error %d when sending message %u (key "
+ "0x%x) to node %u\n", ret, DLM_MIGRATE_REQUEST_MSG,
+ dlm->key, nodenum);
if (!dlm_is_host_down(ret)) {
mlog(ML_ERROR, "unhandled error=%d!\n", ret);
BUG();
@@ -3033,8 +3034,7 @@ int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
hash = dlm_lockid_hash(name, namelen);
/* preallocate.. if this fails, abort */
- mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
- GFP_NOFS);
+ mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
if (!mle) {
ret = -ENOMEM;
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index b4f99de..f8b75ce 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -803,7 +803,9 @@ static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from,
/* negative status is handled by caller */
if (ret < 0)
- mlog_errno(ret);
+ mlog(ML_ERROR, "Error %d when sending message %u (key "
+ "0x%x) to node %u\n", ret, DLM_LOCK_REQUEST_MSG,
+ dlm->key, request_from);
// return from here, then
// sleep until all received or error
@@ -955,10 +957,10 @@ static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to)
ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg,
sizeof(done_msg), send_to, &tmpret);
if (ret < 0) {
+ mlog(ML_ERROR, "Error %d when sending message %u (key "
+ "0x%x) to node %u\n", ret, DLM_RECO_DATA_DONE_MSG,
+ dlm->key, send_to);
if (!dlm_is_host_down(ret)) {
- mlog_errno(ret);
- mlog(ML_ERROR, "%s: unknown error sending data-done "
- "to %u\n", dlm->name, send_to);
BUG();
}
} else
@@ -1126,7 +1128,9 @@ static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
if (ret < 0) {
/* XXX: negative status is not handled.
* this will end up killing this node. */
- mlog_errno(ret);
+ mlog(ML_ERROR, "Error %d when sending message %u (key "
+ "0x%x) to node %u\n", ret, DLM_MIG_LOCKRES_MSG,
+ dlm->key, send_to);
} else {
/* might get an -ENOMEM back here */
ret = status;
@@ -1642,7 +1646,9 @@ int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
&req, sizeof(req), nodenum, &status);
/* XXX: negative status not handled properly here. */
if (ret < 0)
- mlog_errno(ret);
+ mlog(ML_ERROR, "Error %d when sending message %u (key "
+ "0x%x) to node %u\n", ret, DLM_MASTER_REQUERY_MSG,
+ dlm->key, nodenum);
else {
BUG_ON(status < 0);
BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN);
@@ -2640,7 +2646,7 @@ retry:
if (dlm_is_host_down(ret)) {
/* node is down. not involved in recovery
* so just keep going */
- mlog(0, "%s: node %u was down when sending "
+ mlog(ML_NOTICE, "%s: node %u was down when sending "
"begin reco msg (%d)\n", dlm->name, nodenum, ret);
ret = 0;
}
@@ -2660,11 +2666,12 @@ retry:
}
if (ret < 0) {
struct dlm_lock_resource *res;
+
/* this is now a serious problem, possibly ENOMEM
* in the network stack. must retry */
mlog_errno(ret);
mlog(ML_ERROR, "begin reco of dlm %s to node %u "
- " returned %d\n", dlm->name, nodenum, ret);
+ "returned %d\n", dlm->name, nodenum, ret);
res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
DLM_RECOVERY_LOCK_NAME_LEN);
if (res) {
@@ -2789,7 +2796,9 @@ stage2:
if (ret >= 0)
ret = status;
if (ret < 0) {
- mlog_errno(ret);
+ mlog(ML_ERROR, "Error %d when sending message %u (key "
+ "0x%x) to node %u\n", ret, DLM_FINALIZE_RECO_MSG,
+ dlm->key, nodenum);
if (dlm_is_host_down(ret)) {
/* this has no effect on this recovery
* session, so set the status to zero to
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
index 11a6d1f..d4f73ca 100644
--- a/fs/ocfs2/dlm/dlmthread.c
+++ b/fs/ocfs2/dlm/dlmthread.c
@@ -309,6 +309,7 @@ static void dlm_shuffle_lists(struct dlm_ctxt *dlm,
* spinlock, and because we know that it is not migrating/
* recovering/in-progress, it is fine to reserve asts and
* basts right before queueing them all throughout */
+ assert_spin_locked(&dlm->ast_lock);
assert_spin_locked(&res->spinlock);
BUG_ON((res->state & (DLM_LOCK_RES_MIGRATING|
DLM_LOCK_RES_RECOVERING|
@@ -337,7 +338,7 @@ converting:
/* queue the BAST if not already */
if (lock->ml.highest_blocked == LKM_IVMODE) {
__dlm_lockres_reserve_ast(res);
- dlm_queue_bast(dlm, lock);
+ __dlm_queue_bast(dlm, lock);
}
/* update the highest_blocked if needed */
if (lock->ml.highest_blocked < target->ml.convert_type)
@@ -355,7 +356,7 @@ converting:
can_grant = 0;
if (lock->ml.highest_blocked == LKM_IVMODE) {
__dlm_lockres_reserve_ast(res);
- dlm_queue_bast(dlm, lock);
+ __dlm_queue_bast(dlm, lock);
}
if (lock->ml.highest_blocked < target->ml.convert_type)
lock->ml.highest_blocked =
@@ -383,7 +384,7 @@ converting:
spin_unlock(&target->spinlock);
__dlm_lockres_reserve_ast(res);
- dlm_queue_ast(dlm, target);
+ __dlm_queue_ast(dlm, target);
/* go back and check for more */
goto converting;
}
@@ -402,7 +403,7 @@ blocked:
can_grant = 0;
if (lock->ml.highest_blocked == LKM_IVMODE) {
__dlm_lockres_reserve_ast(res);
- dlm_queue_bast(dlm, lock);
+ __dlm_queue_bast(dlm, lock);
}
if (lock->ml.highest_blocked < target->ml.type)
lock->ml.highest_blocked = target->ml.type;
@@ -418,7 +419,7 @@ blocked:
can_grant = 0;
if (lock->ml.highest_blocked == LKM_IVMODE) {
__dlm_lockres_reserve_ast(res);
- dlm_queue_bast(dlm, lock);
+ __dlm_queue_bast(dlm, lock);
}
if (lock->ml.highest_blocked < target->ml.type)
lock->ml.highest_blocked = target->ml.type;
@@ -444,7 +445,7 @@ blocked:
spin_unlock(&target->spinlock);
__dlm_lockres_reserve_ast(res);
- dlm_queue_ast(dlm, target);
+ __dlm_queue_ast(dlm, target);
/* go back and check for more */
goto converting;
}
@@ -674,6 +675,7 @@ static int dlm_thread(void *data)
/* lockres can be re-dirtied/re-added to the
* dirty_list in this gap, but that is ok */
+ spin_lock(&dlm->ast_lock);
spin_lock(&res->spinlock);
if (res->owner != dlm->node_num) {
__dlm_print_one_lock_resource(res);
@@ -694,6 +696,7 @@ static int dlm_thread(void *data)
/* move it to the tail and keep going */
res->state &= ~DLM_LOCK_RES_DIRTY;
spin_unlock(&res->spinlock);
+ spin_unlock(&dlm->ast_lock);
mlog(0, "delaying list shuffling for in-"
"progress lockres %.*s, state=%d\n",
res->lockname.len, res->lockname.name,
@@ -715,6 +718,7 @@ static int dlm_thread(void *data)
dlm_shuffle_lists(dlm, res);
res->state &= ~DLM_LOCK_RES_DIRTY;
spin_unlock(&res->spinlock);
+ spin_unlock(&dlm->ast_lock);
dlm_lockres_calc_usage(dlm, res);
diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c
index b47c1b9..817287c 100644
--- a/fs/ocfs2/dlm/dlmunlock.c
+++ b/fs/ocfs2/dlm/dlmunlock.c
@@ -354,7 +354,8 @@ static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm,
mlog(0, "master was in-progress. retry\n");
ret = status;
} else {
- mlog_errno(tmpret);
+ mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
+ "node %u\n", tmpret, DLM_UNLOCK_LOCK_MSG, dlm->key, owner);
if (dlm_is_host_down(tmpret)) {
/* NOTE: this seems strange, but it is what we want.
* when the master goes down during a cancel or
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index 1b0de15..b83d610 100644
--- a/fs/ocfs2/dlmfs/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -112,20 +112,20 @@ MODULE_PARM_DESC(capabilities, DLMFS_CAPABILITIES);
* O_RDONLY -> PRMODE level
* O_WRONLY -> EXMODE level
*
- * O_NONBLOCK -> LKM_NOQUEUE
+ * O_NONBLOCK -> NOQUEUE
*/
static int dlmfs_decode_open_flags(int open_flags,
int *level,
int *flags)
{
if (open_flags & (O_WRONLY|O_RDWR))
- *level = LKM_EXMODE;
+ *level = DLM_LOCK_EX;
else
- *level = LKM_PRMODE;
+ *level = DLM_LOCK_PR;
*flags = 0;
if (open_flags & O_NONBLOCK)
- *flags |= LKM_NOQUEUE;
+ *flags |= DLM_LKF_NOQUEUE;
return 0;
}
@@ -166,7 +166,7 @@ static int dlmfs_file_open(struct inode *inode,
* to be able userspace to be able to distinguish a
* valid lock request from one that simply couldn't be
* granted. */
- if (flags & LKM_NOQUEUE && status == -EAGAIN)
+ if (flags & DLM_LKF_NOQUEUE && status == -EAGAIN)
status = -ETXTBSY;
kfree(fp);
goto bail;
@@ -193,7 +193,7 @@ static int dlmfs_file_release(struct inode *inode,
status = 0;
if (fp) {
level = fp->fp_lock_level;
- if (level != LKM_IVMODE)
+ if (level != DLM_LOCK_IV)
user_dlm_cluster_unlock(&ip->ip_lockres, level);
kfree(fp);
@@ -262,7 +262,7 @@ static ssize_t dlmfs_file_read(struct file *filp,
if ((count + *ppos) > i_size_read(inode))
readlen = i_size_read(inode) - *ppos;
else
- readlen = count - *ppos;
+ readlen = count;
lvb_buf = kmalloc(readlen, GFP_NOFS);
if (!lvb_buf)
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 50c4ee8..39eb16a 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -3897,7 +3897,8 @@ static int ocfs2_refresh_qinfo(struct ocfs2_mem_dqinfo *oinfo)
oinfo->dqi_gi.dqi_free_entry =
be32_to_cpu(lvb->lvb_free_entry);
} else {
- status = ocfs2_read_quota_block(oinfo->dqi_gqinode, 0, &bh);
+ status = ocfs2_read_quota_phys_block(oinfo->dqi_gqinode,
+ oinfo->dqi_giblk, &bh);
if (status) {
mlog_errno(status);
goto bail;
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 17947dc..97e54b9 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -278,10 +278,7 @@ int ocfs2_update_inode_atime(struct inode *inode,
inode->i_atime = CURRENT_TIME;
di->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
di->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
-
- ret = ocfs2_journal_dirty(handle, bh);
- if (ret < 0)
- mlog_errno(ret);
+ ocfs2_journal_dirty(handle, bh);
out_commit:
ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
@@ -430,9 +427,7 @@ static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec);
di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
- status = ocfs2_journal_dirty(handle, fe_bh);
- if (status < 0)
- mlog_errno(status);
+ ocfs2_journal_dirty(handle, fe_bh);
out_commit:
ocfs2_commit_trans(osb, handle);
@@ -449,7 +444,6 @@ static int ocfs2_truncate_file(struct inode *inode,
int status = 0;
struct ocfs2_dinode *fe = NULL;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
- struct ocfs2_truncate_context *tc = NULL;
mlog_entry("(inode = %llu, new_i_size = %llu\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno,
@@ -488,6 +482,9 @@ static int ocfs2_truncate_file(struct inode *inode,
down_write(&OCFS2_I(inode)->ip_alloc_sem);
+ ocfs2_resv_discard(&osb->osb_la_resmap,
+ &OCFS2_I(inode)->ip_la_data_resv);
+
/*
* The inode lock forced other nodes to sync and drop their
* pages, which (correctly) happens even if we have a truncate
@@ -517,13 +514,7 @@ static int ocfs2_truncate_file(struct inode *inode,
goto bail_unlock_sem;
}
- status = ocfs2_prepare_truncate(osb, inode, di_bh, &tc);
- if (status < 0) {
- mlog_errno(status);
- goto bail_unlock_sem;
- }
-
- status = ocfs2_commit_truncate(osb, inode, di_bh, tc);
+ status = ocfs2_commit_truncate(osb, inode, di_bh);
if (status < 0) {
mlog_errno(status);
goto bail_unlock_sem;
@@ -666,11 +657,7 @@ restarted_transaction:
goto leave;
}
- status = ocfs2_journal_dirty(handle, bh);
- if (status < 0) {
- mlog_errno(status);
- goto leave;
- }
+ ocfs2_journal_dirty(handle, bh);
spin_lock(&OCFS2_I(inode)->ip_lock);
clusters_to_add -= (OCFS2_I(inode)->ip_clusters - prev_clusters);
@@ -684,6 +671,7 @@ restarted_transaction:
if (why == RESTART_META) {
mlog(0, "restarting function.\n");
restart_func = 1;
+ status = 0;
} else {
BUG_ON(why != RESTART_TRANS);
@@ -945,9 +933,8 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
struct ocfs2_super *osb = OCFS2_SB(sb);
struct buffer_head *bh = NULL;
handle_t *handle = NULL;
- int qtype;
- struct dquot *transfer_from[MAXQUOTAS] = { };
struct dquot *transfer_to[MAXQUOTAS] = { };
+ int qtype;
mlog_entry("(0x%p, '%.*s')\n", dentry,
dentry->d_name.len, dentry->d_name.name);
@@ -978,10 +965,10 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
if (status)
return status;
+ if (is_quota_modification(inode, attr))
+ dquot_initialize(inode);
size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
if (size_change) {
- dquot_initialize(inode);
-
status = ocfs2_rw_lock(inode, 1);
if (status < 0) {
mlog_errno(status);
@@ -1031,9 +1018,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) {
transfer_to[USRQUOTA] = dqget(sb, attr->ia_uid,
USRQUOTA);
- transfer_from[USRQUOTA] = dqget(sb, inode->i_uid,
- USRQUOTA);
- if (!transfer_to[USRQUOTA] || !transfer_from[USRQUOTA]) {
+ if (!transfer_to[USRQUOTA]) {
status = -ESRCH;
goto bail_unlock;
}
@@ -1043,9 +1028,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) {
transfer_to[GRPQUOTA] = dqget(sb, attr->ia_gid,
GRPQUOTA);
- transfer_from[GRPQUOTA] = dqget(sb, inode->i_gid,
- GRPQUOTA);
- if (!transfer_to[GRPQUOTA] || !transfer_from[GRPQUOTA]) {
+ if (!transfer_to[GRPQUOTA]) {
status = -ESRCH;
goto bail_unlock;
}
@@ -1057,7 +1040,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
mlog_errno(status);
goto bail_unlock;
}
- status = dquot_transfer(inode, attr);
+ status = __dquot_transfer(inode, transfer_to);
if (status < 0)
goto bail_commit;
} else {
@@ -1097,10 +1080,8 @@ bail:
brelse(bh);
/* Release quota pointers in case we acquired them */
- for (qtype = 0; qtype < MAXQUOTAS; qtype++) {
+ for (qtype = 0; qtype < MAXQUOTAS; qtype++)
dqput(transfer_to[qtype]);
- dqput(transfer_from[qtype]);
- }
if (!status && attr->ia_valid & ATTR_MODE) {
status = ocfs2_acl_chmod(inode);
@@ -1194,9 +1175,7 @@ static int __ocfs2_write_remove_suid(struct inode *inode,
di = (struct ocfs2_dinode *) bh->b_data;
di->i_mode = cpu_to_le16(inode->i_mode);
- ret = ocfs2_journal_dirty(handle, bh);
- if (ret < 0)
- mlog_errno(ret);
+ ocfs2_journal_dirty(handle, bh);
out_trans:
ocfs2_commit_trans(osb, handle);
@@ -1433,16 +1412,90 @@ out:
return ret;
}
+static int ocfs2_find_rec(struct ocfs2_extent_list *el, u32 pos)
+{
+ int i;
+ struct ocfs2_extent_rec *rec = NULL;
+
+ for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
+
+ rec = &el->l_recs[i];
+
+ if (le32_to_cpu(rec->e_cpos) < pos)
+ break;
+ }
+
+ return i;
+}
+
+/*
+ * Helper to calculate the punching pos and length in one run, we handle the
+ * following three cases in order:
+ *
+ * - remove the entire record
+ * - remove a partial record
+ * - no record needs to be removed (hole-punching completed)
+*/
+static void ocfs2_calc_trunc_pos(struct inode *inode,
+ struct ocfs2_extent_list *el,
+ struct ocfs2_extent_rec *rec,
+ u32 trunc_start, u32 *trunc_cpos,
+ u32 *trunc_len, u32 *trunc_end,
+ u64 *blkno, int *done)
+{
+ int ret = 0;
+ u32 coff, range;
+
+ range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
+
+ if (le32_to_cpu(rec->e_cpos) >= trunc_start) {
+ *trunc_cpos = le32_to_cpu(rec->e_cpos);
+ /*
+ * Skip holes if any.
+ */
+ if (range < *trunc_end)
+ *trunc_end = range;
+ *trunc_len = *trunc_end - le32_to_cpu(rec->e_cpos);
+ *blkno = le64_to_cpu(rec->e_blkno);
+ *trunc_end = le32_to_cpu(rec->e_cpos);
+ } else if (range > trunc_start) {
+ *trunc_cpos = trunc_start;
+ *trunc_len = *trunc_end - trunc_start;
+ coff = trunc_start - le32_to_cpu(rec->e_cpos);
+ *blkno = le64_to_cpu(rec->e_blkno) +
+ ocfs2_clusters_to_blocks(inode->i_sb, coff);
+ *trunc_end = trunc_start;
+ } else {
+ /*
+ * It may have two following possibilities:
+ *
+ * - last record has been removed
+ * - trunc_start was within a hole
+ *
+ * both two cases mean the completion of hole punching.
+ */
+ ret = 1;
+ }
+
+ *done = ret;
+}
+
static int ocfs2_remove_inode_range(struct inode *inode,
struct buffer_head *di_bh, u64 byte_start,
u64 byte_len)
{
- int ret = 0;
- u32 trunc_start, trunc_len, cpos, phys_cpos, alloc_size;
+ int ret = 0, flags = 0, done = 0, i;
+ u32 trunc_start, trunc_len, trunc_end, trunc_cpos, phys_cpos;
+ u32 cluster_in_el;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_cached_dealloc_ctxt dealloc;
struct address_space *mapping = inode->i_mapping;
struct ocfs2_extent_tree et;
+ struct ocfs2_path *path = NULL;
+ struct ocfs2_extent_list *el = NULL;
+ struct ocfs2_extent_rec *rec = NULL;
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
+ u64 blkno, refcount_loc = le64_to_cpu(di->i_refcount_loc);
ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
ocfs2_init_dealloc_ctxt(&dealloc);
@@ -1468,17 +1521,35 @@ static int ocfs2_remove_inode_range(struct inode *inode,
goto out;
}
+ /*
+ * For reflinks, we may need to CoW 2 clusters which might be
+ * partially zero'd later, if hole's start and end offset were
+ * within one cluster(means is not exactly aligned to clustersize).
+ */
+
+ if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL) {
+
+ ret = ocfs2_cow_file_pos(inode, di_bh, byte_start);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_cow_file_pos(inode, di_bh, byte_start + byte_len);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+ }
+
trunc_start = ocfs2_clusters_for_bytes(osb->sb, byte_start);
- trunc_len = (byte_start + byte_len) >> osb->s_clustersize_bits;
- if (trunc_len >= trunc_start)
- trunc_len -= trunc_start;
- else
- trunc_len = 0;
+ trunc_end = (byte_start + byte_len) >> osb->s_clustersize_bits;
+ cluster_in_el = trunc_end;
- mlog(0, "Inode: %llu, start: %llu, len: %llu, cstart: %u, clen: %u\n",
+ mlog(0, "Inode: %llu, start: %llu, len: %llu, cstart: %u, cend: %u\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno,
(unsigned long long)byte_start,
- (unsigned long long)byte_len, trunc_start, trunc_len);
+ (unsigned long long)byte_len, trunc_start, trunc_end);
ret = ocfs2_zero_partial_clusters(inode, byte_start, byte_len);
if (ret) {
@@ -1486,31 +1557,79 @@ static int ocfs2_remove_inode_range(struct inode *inode,
goto out;
}
- cpos = trunc_start;
- while (trunc_len) {
- ret = ocfs2_get_clusters(inode, cpos, &phys_cpos,
- &alloc_size, NULL);
+ path = ocfs2_new_path_from_et(&et);
+ if (!path) {
+ ret = -ENOMEM;
+ mlog_errno(ret);
+ goto out;
+ }
+
+ while (trunc_end > trunc_start) {
+
+ ret = ocfs2_find_path(INODE_CACHE(inode), path,
+ cluster_in_el);
if (ret) {
mlog_errno(ret);
goto out;
}
- if (alloc_size > trunc_len)
- alloc_size = trunc_len;
+ el = path_leaf_el(path);
+
+ i = ocfs2_find_rec(el, trunc_end);
+ /*
+ * Need to go to previous extent block.
+ */
+ if (i < 0) {
+ if (path->p_tree_depth == 0)
+ break;
- /* Only do work for non-holes */
- if (phys_cpos != 0) {
- ret = ocfs2_remove_btree_range(inode, &et, cpos,
- phys_cpos, alloc_size,
- &dealloc);
+ ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb,
+ path,
+ &cluster_in_el);
if (ret) {
mlog_errno(ret);
goto out;
}
+
+ /*
+ * We've reached the leftmost extent block,
+ * it's safe to leave.
+ */
+ if (cluster_in_el == 0)
+ break;
+
+ /*
+ * The 'pos' searched for previous extent block is
+ * always one cluster less than actual trunc_end.
+ */
+ trunc_end = cluster_in_el + 1;
+
+ ocfs2_reinit_path(path, 1);
+
+ continue;
+
+ } else
+ rec = &el->l_recs[i];
+
+ ocfs2_calc_trunc_pos(inode, el, rec, trunc_start, &trunc_cpos,
+ &trunc_len, &trunc_end, &blkno, &done);
+ if (done)
+ break;
+
+ flags = rec->e_flags;
+ phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb, blkno);
+
+ ret = ocfs2_remove_btree_range(inode, &et, trunc_cpos,
+ phys_cpos, trunc_len, flags,
+ &dealloc, refcount_loc);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out;
}
- cpos += alloc_size;
- trunc_len -= alloc_size;
+ cluster_in_el = trunc_end;
+
+ ocfs2_reinit_path(path, 1);
}
ocfs2_truncate_cluster_pages(inode, byte_start, byte_len);
@@ -1981,18 +2100,18 @@ relock:
/* communicate with ocfs2_dio_end_io */
ocfs2_iocb_set_rw_locked(iocb, rw_level);
- if (direct_io) {
- ret = generic_segment_checks(iov, &nr_segs, &ocount,
- VERIFY_READ);
- if (ret)
- goto out_dio;
+ ret = generic_segment_checks(iov, &nr_segs, &ocount,
+ VERIFY_READ);
+ if (ret)
+ goto out_dio;
- count = ocount;
- ret = generic_write_checks(file, ppos, &count,
- S_ISBLK(inode->i_mode));
- if (ret)
- goto out_dio;
+ count = ocount;
+ ret = generic_write_checks(file, ppos, &count,
+ S_ISBLK(inode->i_mode));
+ if (ret)
+ goto out_dio;
+ if (direct_io) {
written = generic_file_direct_write(iocb, iov, &nr_segs, *ppos,
ppos, count, ocount);
if (written < 0) {
@@ -2007,7 +2126,10 @@ relock:
goto out_dio;
}
} else {
- written = __generic_file_aio_write(iocb, iov, nr_segs, ppos);
+ current->backing_dev_info = file->f_mapping->backing_dev_info;
+ written = generic_file_buffered_write(iocb, iov, nr_segs, *ppos,
+ ppos, count, 0);
+ current->backing_dev_info = NULL;
}
out_dio:
@@ -2021,9 +2143,9 @@ out_dio:
if (ret < 0)
written = ret;
- if (!ret && (old_size != i_size_read(inode) ||
- old_clusters != OCFS2_I(inode)->ip_clusters ||
- has_refcount)) {
+ if (!ret && ((old_size != i_size_read(inode)) ||
+ (old_clusters != OCFS2_I(inode)->ip_clusters) ||
+ has_refcount)) {
ret = jbd2_journal_force_commit(osb->journal->j_journal);
if (ret < 0)
written = ret;
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 07cc8bb..abb0a95 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -376,6 +376,10 @@ void ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe,
OCFS2_I(inode)->ip_last_used_slot = 0;
OCFS2_I(inode)->ip_last_used_group = 0;
+
+ if (S_ISDIR(inode->i_mode))
+ ocfs2_resv_set_type(&OCFS2_I(inode)->ip_la_data_resv,
+ OCFS2_RESV_FLAG_DIR);
mlog_exit_void();
}
@@ -539,7 +543,6 @@ static int ocfs2_truncate_for_delete(struct ocfs2_super *osb,
struct buffer_head *fe_bh)
{
int status = 0;
- struct ocfs2_truncate_context *tc = NULL;
struct ocfs2_dinode *fe;
handle_t *handle = NULL;
@@ -558,6 +561,7 @@ static int ocfs2_truncate_for_delete(struct ocfs2_super *osb,
handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
+ handle = NULL;
mlog_errno(status);
goto out;
}
@@ -581,13 +585,7 @@ static int ocfs2_truncate_for_delete(struct ocfs2_super *osb,
ocfs2_commit_trans(osb, handle);
handle = NULL;
- status = ocfs2_prepare_truncate(osb, inode, fe_bh, &tc);
- if (status < 0) {
- mlog_errno(status);
- goto out;
- }
-
- status = ocfs2_commit_truncate(osb, inode, fe_bh, tc);
+ status = ocfs2_commit_truncate(osb, inode, fe_bh);
if (status < 0) {
mlog_errno(status);
goto out;
@@ -639,11 +637,13 @@ static int ocfs2_remove_inode(struct inode *inode,
goto bail_unlock;
}
- status = ocfs2_orphan_del(osb, handle, orphan_dir_inode, inode,
- orphan_dir_bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail_commit;
+ if (!(OCFS2_I(inode)->ip_flags & OCFS2_INODE_SKIP_ORPHAN_DIR)) {
+ status = ocfs2_orphan_del(osb, handle, orphan_dir_inode, inode,
+ orphan_dir_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail_commit;
+ }
}
/* set the inodes dtime */
@@ -656,12 +656,7 @@ static int ocfs2_remove_inode(struct inode *inode,
di->i_dtime = cpu_to_le64(CURRENT_TIME.tv_sec);
di->i_flags &= cpu_to_le32(~(OCFS2_VALID_FL | OCFS2_ORPHANED_FL));
-
- status = ocfs2_journal_dirty(handle, di_bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail_commit;
- }
+ ocfs2_journal_dirty(handle, di_bh);
ocfs2_remove_from_cache(INODE_CACHE(inode), di_bh);
dquot_free_inode(inode);
@@ -722,38 +717,39 @@ static void ocfs2_signal_wipe_completion(struct ocfs2_super *osb,
static int ocfs2_wipe_inode(struct inode *inode,
struct buffer_head *di_bh)
{
- int status, orphaned_slot;
+ int status, orphaned_slot = -1;
struct inode *orphan_dir_inode = NULL;
struct buffer_head *orphan_dir_bh = NULL;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
- struct ocfs2_dinode *di;
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data;
- di = (struct ocfs2_dinode *) di_bh->b_data;
- orphaned_slot = le16_to_cpu(di->i_orphaned_slot);
+ if (!(OCFS2_I(inode)->ip_flags & OCFS2_INODE_SKIP_ORPHAN_DIR)) {
+ orphaned_slot = le16_to_cpu(di->i_orphaned_slot);
- status = ocfs2_check_orphan_recovery_state(osb, orphaned_slot);
- if (status)
- return status;
+ status = ocfs2_check_orphan_recovery_state(osb, orphaned_slot);
+ if (status)
+ return status;
- orphan_dir_inode = ocfs2_get_system_file_inode(osb,
- ORPHAN_DIR_SYSTEM_INODE,
- orphaned_slot);
- if (!orphan_dir_inode) {
- status = -EEXIST;
- mlog_errno(status);
- goto bail;
- }
+ orphan_dir_inode = ocfs2_get_system_file_inode(osb,
+ ORPHAN_DIR_SYSTEM_INODE,
+ orphaned_slot);
+ if (!orphan_dir_inode) {
+ status = -EEXIST;
+ mlog_errno(status);
+ goto bail;
+ }
- /* Lock the orphan dir. The lock will be held for the entire
- * delete_inode operation. We do this now to avoid races with
- * recovery completion on other nodes. */
- mutex_lock(&orphan_dir_inode->i_mutex);
- status = ocfs2_inode_lock(orphan_dir_inode, &orphan_dir_bh, 1);
- if (status < 0) {
- mutex_unlock(&orphan_dir_inode->i_mutex);
+ /* Lock the orphan dir. The lock will be held for the entire
+ * delete_inode operation. We do this now to avoid races with
+ * recovery completion on other nodes. */
+ mutex_lock(&orphan_dir_inode->i_mutex);
+ status = ocfs2_inode_lock(orphan_dir_inode, &orphan_dir_bh, 1);
+ if (status < 0) {
+ mutex_unlock(&orphan_dir_inode->i_mutex);
- mlog_errno(status);
- goto bail;
+ mlog_errno(status);
+ goto bail;
+ }
}
/* we do this while holding the orphan dir lock because we
@@ -794,6 +790,9 @@ static int ocfs2_wipe_inode(struct inode *inode,
mlog_errno(status);
bail_unlock_dir:
+ if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SKIP_ORPHAN_DIR)
+ return status;
+
ocfs2_inode_unlock(orphan_dir_inode, 1);
mutex_unlock(&orphan_dir_inode->i_mutex);
brelse(orphan_dir_bh);
@@ -889,7 +888,8 @@ static int ocfs2_query_inode_wipe(struct inode *inode,
/* Do some basic inode verification... */
di = (struct ocfs2_dinode *) di_bh->b_data;
- if (!(di->i_flags & cpu_to_le32(OCFS2_ORPHANED_FL))) {
+ if (!(di->i_flags & cpu_to_le32(OCFS2_ORPHANED_FL)) &&
+ !(oi->ip_flags & OCFS2_INODE_SKIP_ORPHAN_DIR)) {
/*
* Inodes in the orphan dir must have ORPHANED_FL. The only
* inodes that come back out of the orphan dir are reflink
@@ -972,7 +972,7 @@ static void ocfs2_cleanup_delete_inode(struct inode *inode,
void ocfs2_delete_inode(struct inode *inode)
{
int wipe, status;
- sigset_t blocked, oldset;
+ sigset_t oldset;
struct buffer_head *di_bh = NULL;
mlog_entry("(inode->i_ino = %lu)\n", inode->i_ino);
@@ -999,13 +999,7 @@ void ocfs2_delete_inode(struct inode *inode)
* messaging paths may return us -ERESTARTSYS. Which would
* cause us to exit early, resulting in inodes being orphaned
* forever. */
- sigfillset(&blocked);
- status = sigprocmask(SIG_BLOCK, &blocked, &oldset);
- if (status < 0) {
- mlog_errno(status);
- ocfs2_cleanup_delete_inode(inode, 1);
- goto bail;
- }
+ ocfs2_block_signals(&oldset);
/*
* Synchronize us against ocfs2_get_dentry. We take this in
@@ -1079,9 +1073,7 @@ bail_unlock_nfs_sync:
ocfs2_nfs_sync_unlock(OCFS2_SB(inode->i_sb), 0);
bail_unblock:
- status = sigprocmask(SIG_SETMASK, &oldset, NULL);
- if (status < 0)
- mlog_errno(status);
+ ocfs2_unblock_signals(&oldset);
bail:
clear_inode(inode);
mlog_exit_void();
@@ -1115,6 +1107,10 @@ void ocfs2_clear_inode(struct inode *inode)
ocfs2_mark_lockres_freeing(&oi->ip_inode_lockres);
ocfs2_mark_lockres_freeing(&oi->ip_open_lockres);
+ ocfs2_resv_discard(&OCFS2_SB(inode->i_sb)->osb_la_resmap,
+ &oi->ip_la_data_resv);
+ ocfs2_resv_init_once(&oi->ip_la_data_resv);
+
/* We very well may get a clear_inode before all an inodes
* metadata has hit disk. Of course, we can't drop any cluster
* locks until the journal has finished with it. The only
@@ -1290,13 +1286,8 @@ int ocfs2_mark_inode_dirty(handle_t *handle,
fe->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
fe->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
- status = ocfs2_journal_dirty(handle, bh);
- if (status < 0)
- mlog_errno(status);
-
- status = 0;
+ ocfs2_journal_dirty(handle, bh);
leave:
-
mlog_exit(status);
return status;
}
diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h
index ba4fe07..9f5f5fc 100644
--- a/fs/ocfs2/inode.h
+++ b/fs/ocfs2/inode.h
@@ -70,6 +70,8 @@ struct ocfs2_inode_info
/* Only valid if the inode is the dir. */
u32 ip_last_used_slot;
u64 ip_last_used_group;
+
+ struct ocfs2_alloc_reservation ip_la_data_resv;
};
/*
@@ -100,6 +102,8 @@ struct ocfs2_inode_info
#define OCFS2_INODE_MAYBE_ORPHANED 0x00000020
/* Does someone have the file open O_DIRECT */
#define OCFS2_INODE_OPEN_DIRECT 0x00000040
+/* Tell the inode wipe code it's not in orphan dir */
+#define OCFS2_INODE_SKIP_ORPHAN_DIR 0x00000080
static inline struct ocfs2_inode_info *OCFS2_I(struct inode *inode)
{
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 9336c60..47878cf 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -402,9 +402,7 @@ int ocfs2_commit_trans(struct ocfs2_super *osb,
}
/*
- * 'nblocks' is what you want to add to the current
- * transaction. extend_trans will either extend the current handle by
- * nblocks, or commit it and start a new one with nblocks credits.
+ * 'nblocks' is what you want to add to the current transaction.
*
* This might call jbd2_journal_restart() which will commit dirty buffers
* and then restart the transaction. Before calling
@@ -422,11 +420,15 @@ int ocfs2_commit_trans(struct ocfs2_super *osb,
*/
int ocfs2_extend_trans(handle_t *handle, int nblocks)
{
- int status;
+ int status, old_nblocks;
BUG_ON(!handle);
- BUG_ON(!nblocks);
+ BUG_ON(nblocks < 0);
+
+ if (!nblocks)
+ return 0;
+ old_nblocks = handle->h_buffer_credits;
mlog_entry_void();
mlog(0, "Trying to extend transaction by %d blocks\n", nblocks);
@@ -445,7 +447,8 @@ int ocfs2_extend_trans(handle_t *handle, int nblocks)
mlog(0,
"jbd2_journal_extend failed, trying "
"jbd2_journal_restart\n");
- status = jbd2_journal_restart(handle, nblocks);
+ status = jbd2_journal_restart(handle,
+ old_nblocks + nblocks);
if (status < 0) {
mlog_errno(status);
goto bail;
@@ -734,8 +737,7 @@ int ocfs2_journal_access(handle_t *handle, struct ocfs2_caching_info *ci,
return __ocfs2_journal_access(handle, ci, bh, NULL, type);
}
-int ocfs2_journal_dirty(handle_t *handle,
- struct buffer_head *bh)
+void ocfs2_journal_dirty(handle_t *handle, struct buffer_head *bh)
{
int status;
@@ -743,13 +745,9 @@ int ocfs2_journal_dirty(handle_t *handle,
(unsigned long long)bh->b_blocknr);
status = jbd2_journal_dirty_metadata(handle, bh);
- if (status < 0)
- mlog(ML_ERROR, "Could not dirty metadata buffer. "
- "(bh->b_blocknr=%llu)\n",
- (unsigned long long)bh->b_blocknr);
+ BUG_ON(status);
- mlog_exit(status);
- return status;
+ mlog_exit_void();
}
#define OCFS2_DEFAULT_COMMIT_INTERVAL (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE)
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 3f74e09..b5baaa8 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -325,8 +325,7 @@ int ocfs2_journal_access(handle_t *handle, struct ocfs2_caching_info *ci,
* <modify the bh>
* ocfs2_journal_dirty(handle, bh);
*/
-int ocfs2_journal_dirty(handle_t *handle,
- struct buffer_head *bh);
+void ocfs2_journal_dirty(handle_t *handle, struct buffer_head *bh);
/*
* Credit Macros:
@@ -562,6 +561,18 @@ static inline int ocfs2_calc_group_alloc_credits(struct super_block *sb,
return blocks;
}
+/*
+ * Allocating a discontiguous block group requires the credits from
+ * ocfs2_calc_group_alloc_credits() as well as enough credits to fill
+ * the group descriptor's extent list. The caller already has started
+ * the transaction with ocfs2_calc_group_alloc_credits(). They extend
+ * it with these credits.
+ */
+static inline int ocfs2_calc_bg_discontig_credits(struct super_block *sb)
+{
+ return ocfs2_extent_recs_per_gd(sb);
+}
+
static inline int ocfs2_calc_tree_trunc_credits(struct super_block *sb,
unsigned int clusters_to_del,
struct ocfs2_dinode *fe,
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index c983715..3d74196 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -52,7 +52,8 @@ static u32 ocfs2_local_alloc_count_bits(struct ocfs2_dinode *alloc);
static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb,
struct ocfs2_dinode *alloc,
- u32 numbits);
+ u32 *numbits,
+ struct ocfs2_alloc_reservation *resv);
static void ocfs2_clear_local_alloc(struct ocfs2_dinode *alloc);
@@ -74,6 +75,144 @@ static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb,
static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
struct inode *local_alloc_inode);
+/*
+ * ocfs2_la_default_mb() - determine a default size, in megabytes of
+ * the local alloc.
+ *
+ * Generally, we'd like to pick as large a local alloc as
+ * possible. Performance on large workloads tends to scale
+ * proportionally to la size. In addition to that, the reservations
+ * code functions more efficiently as it can reserve more windows for
+ * write.
+ *
+ * Some things work against us when trying to choose a large local alloc:
+ *
+ * - We need to ensure our sizing is picked to leave enough space in
+ * group descriptors for other allocations (such as block groups,
+ * etc). Picking default sizes which are a multiple of 4 could help
+ * - block groups are allocated in 2mb and 4mb chunks.
+ *
+ * - Likewise, we don't want to starve other nodes of bits on small
+ * file systems. This can easily be taken care of by limiting our
+ * default to a reasonable size (256M) on larger cluster sizes.
+ *
+ * - Some file systems can't support very large sizes - 4k and 8k in
+ * particular are limited to less than 128 and 256 megabytes respectively.
+ *
+ * The following reference table shows group descriptor and local
+ * alloc maximums at various cluster sizes (4k blocksize)
+ *
+ * csize: 4K group: 126M la: 121M
+ * csize: 8K group: 252M la: 243M
+ * csize: 16K group: 504M la: 486M
+ * csize: 32K group: 1008M la: 972M
+ * csize: 64K group: 2016M la: 1944M
+ * csize: 128K group: 4032M la: 3888M
+ * csize: 256K group: 8064M la: 7776M
+ * csize: 512K group: 16128M la: 15552M
+ * csize: 1024K group: 32256M la: 31104M
+ */
+#define OCFS2_LA_MAX_DEFAULT_MB 256
+#define OCFS2_LA_OLD_DEFAULT 8
+unsigned int ocfs2_la_default_mb(struct ocfs2_super *osb)
+{
+ unsigned int la_mb;
+ unsigned int gd_mb;
+ unsigned int megs_per_slot;
+ struct super_block *sb = osb->sb;
+
+ gd_mb = ocfs2_clusters_to_megabytes(osb->sb,
+ 8 * ocfs2_group_bitmap_size(sb, 0, osb->s_feature_incompat));
+
+ /*
+ * This takes care of files systems with very small group
+ * descriptors - 512 byte blocksize at cluster sizes lower
+ * than 16K and also 1k blocksize with 4k cluster size.
+ */
+ if ((sb->s_blocksize == 512 && osb->s_clustersize <= 8192)
+ || (sb->s_blocksize == 1024 && osb->s_clustersize == 4096))
+ return OCFS2_LA_OLD_DEFAULT;
+
+ /*
+ * Leave enough room for some block groups and make the final
+ * value we work from a multiple of 4.
+ */
+ gd_mb -= 16;
+ gd_mb &= 0xFFFFFFFB;
+
+ la_mb = gd_mb;
+
+ /*
+ * Keep window sizes down to a reasonable default
+ */
+ if (la_mb > OCFS2_LA_MAX_DEFAULT_MB) {
+ /*
+ * Some clustersize / blocksize combinations will have
+ * given us a larger than OCFS2_LA_MAX_DEFAULT_MB
+ * default size, but get poor distribution when
+ * limited to exactly 256 megabytes.
+ *
+ * As an example, 16K clustersize at 4K blocksize
+ * gives us a cluster group size of 504M. Paring the
+ * local alloc size down to 256 however, would give us
+ * only one window and around 200MB left in the
+ * cluster group. Instead, find the first size below
+ * 256 which would give us an even distribution.
+ *
+ * Larger cluster group sizes actually work out pretty
+ * well when pared to 256, so we don't have to do this
+ * for any group that fits more than two
+ * OCFS2_LA_MAX_DEFAULT_MB windows.
+ */
+ if (gd_mb > (2 * OCFS2_LA_MAX_DEFAULT_MB))
+ la_mb = 256;
+ else {
+ unsigned int gd_mult = gd_mb;
+
+ while (gd_mult > 256)
+ gd_mult = gd_mult >> 1;
+
+ la_mb = gd_mult;
+ }
+ }
+
+ megs_per_slot = osb->osb_clusters_at_boot / osb->max_slots;
+ megs_per_slot = ocfs2_clusters_to_megabytes(osb->sb, megs_per_slot);
+ /* Too many nodes, too few disk clusters. */
+ if (megs_per_slot < la_mb)
+ la_mb = megs_per_slot;
+
+ return la_mb;
+}
+
+void ocfs2_la_set_sizes(struct ocfs2_super *osb, int requested_mb)
+{
+ struct super_block *sb = osb->sb;
+ unsigned int la_default_mb = ocfs2_la_default_mb(osb);
+ unsigned int la_max_mb;
+
+ la_max_mb = ocfs2_clusters_to_megabytes(sb,
+ ocfs2_local_alloc_size(sb) * 8);
+
+ mlog(0, "requested: %dM, max: %uM, default: %uM\n",
+ requested_mb, la_max_mb, la_default_mb);
+
+ if (requested_mb == -1) {
+ /* No user request - use defaults */
+ osb->local_alloc_default_bits =
+ ocfs2_megabytes_to_clusters(sb, la_default_mb);
+ } else if (requested_mb > la_max_mb) {
+ /* Request is too big, we give the maximum available */
+ osb->local_alloc_default_bits =
+ ocfs2_megabytes_to_clusters(sb, la_max_mb);
+ } else {
+ osb->local_alloc_default_bits =
+ ocfs2_megabytes_to_clusters(sb, requested_mb);
+ }
+
+ osb->local_alloc_bits = osb->local_alloc_default_bits;
+}
+
static inline int ocfs2_la_state_enabled(struct ocfs2_super *osb)
{
return (osb->local_alloc_state == OCFS2_LA_THROTTLED ||
@@ -156,7 +295,7 @@ int ocfs2_load_local_alloc(struct ocfs2_super *osb)
osb->local_alloc_bits, (osb->bitmap_cpg - 1));
osb->local_alloc_bits =
ocfs2_megabytes_to_clusters(osb->sb,
- OCFS2_DEFAULT_LOCAL_ALLOC_SIZE);
+ ocfs2_la_default_mb(osb));
}
/* read the alloc off disk */
@@ -262,6 +401,8 @@ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb)
osb->local_alloc_state = OCFS2_LA_DISABLED;
+ ocfs2_resmap_uninit(&osb->osb_la_resmap);
+
main_bm_inode = ocfs2_get_system_file_inode(osb,
GLOBAL_BITMAP_SYSTEM_INODE,
OCFS2_INVALID_SLOT);
@@ -305,12 +446,7 @@ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb)
}
ocfs2_clear_local_alloc(alloc);
-
- status = ocfs2_journal_dirty(handle, bh);
- if (status < 0) {
- mlog_errno(status);
- goto out_commit;
- }
+ ocfs2_journal_dirty(handle, bh);
brelse(bh);
osb->local_alloc_bh = NULL;
@@ -481,46 +617,6 @@ out:
return status;
}
-/* Check to see if the local alloc window is within ac->ac_max_block */
-static int ocfs2_local_alloc_in_range(struct inode *inode,
- struct ocfs2_alloc_context *ac,
- u32 bits_wanted)
-{
- struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
- struct ocfs2_dinode *alloc;
- struct ocfs2_local_alloc *la;
- int start;
- u64 block_off;
-
- if (!ac->ac_max_block)
- return 1;
-
- alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
- la = OCFS2_LOCAL_ALLOC(alloc);
-
- start = ocfs2_local_alloc_find_clear_bits(osb, alloc, bits_wanted);
- if (start == -1) {
- mlog_errno(-ENOSPC);
- return 0;
- }
-
- /*
- * Converting (bm_off + start + bits_wanted) to blocks gives us
- * the blkno just past our actual allocation. This is perfect
- * to compare with ac_max_block.
- */
- block_off = ocfs2_clusters_to_blocks(inode->i_sb,
- le32_to_cpu(la->la_bm_off) +
- start + bits_wanted);
- mlog(0, "Checking %llu against %llu\n",
- (unsigned long long)block_off,
- (unsigned long long)ac->ac_max_block);
- if (block_off > ac->ac_max_block)
- return 0;
-
- return 1;
-}
-
/*
* make sure we've got at least bits_wanted contiguous bits in the
* local alloc. You lose them when you drop i_mutex.
@@ -613,17 +709,6 @@ int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb,
mlog(0, "Calling in_range for max block %llu\n",
(unsigned long long)ac->ac_max_block);
- if (!ocfs2_local_alloc_in_range(local_alloc_inode, ac,
- bits_wanted)) {
- /*
- * The window is outside ac->ac_max_block.
- * This errno tells the caller to keep localalloc enabled
- * but to get the allocation from the main bitmap.
- */
- status = -EFBIG;
- goto bail;
- }
-
ac->ac_inode = local_alloc_inode;
/* We should never use localalloc from another slot */
ac->ac_alloc_slot = osb->slot_num;
@@ -664,7 +749,8 @@ int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb,
alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
la = OCFS2_LOCAL_ALLOC(alloc);
- start = ocfs2_local_alloc_find_clear_bits(osb, alloc, bits_wanted);
+ start = ocfs2_local_alloc_find_clear_bits(osb, alloc, &bits_wanted,
+ ac->ac_resv);
if (start == -1) {
/* TODO: Shouldn't we just BUG here? */
status = -ENOSPC;
@@ -674,8 +760,6 @@ int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb,
bitmap = la->la_bitmap;
*bit_off = le32_to_cpu(la->la_bm_off) + start;
- /* local alloc is always contiguous by nature -- we never
- * delete bits from it! */
*num_bits = bits_wanted;
status = ocfs2_journal_access_di(handle,
@@ -687,18 +771,15 @@ int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb,
goto bail;
}
+ ocfs2_resmap_claimed_bits(&osb->osb_la_resmap, ac->ac_resv, start,
+ bits_wanted);
+
while(bits_wanted--)
ocfs2_set_bit(start++, bitmap);
le32_add_cpu(&alloc->id1.bitmap1.i_used, *num_bits);
+ ocfs2_journal_dirty(handle, osb->local_alloc_bh);
- status = ocfs2_journal_dirty(handle, osb->local_alloc_bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
-
- status = 0;
bail:
mlog_exit(status);
return status;
@@ -722,13 +803,17 @@ static u32 ocfs2_local_alloc_count_bits(struct ocfs2_dinode *alloc)
}
static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb,
- struct ocfs2_dinode *alloc,
- u32 numbits)
+ struct ocfs2_dinode *alloc,
+ u32 *numbits,
+ struct ocfs2_alloc_reservation *resv)
{
int numfound, bitoff, left, startoff, lastzero;
+ int local_resv = 0;
+ struct ocfs2_alloc_reservation r;
void *bitmap = NULL;
+ struct ocfs2_reservation_map *resmap = &osb->osb_la_resmap;
- mlog_entry("(numbits wanted = %u)\n", numbits);
+ mlog_entry("(numbits wanted = %u)\n", *numbits);
if (!alloc->id1.bitmap1.i_total) {
mlog(0, "No bits in my window!\n");
@@ -736,6 +821,30 @@ static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb,
goto bail;
}
+ if (!resv) {
+ local_resv = 1;
+ ocfs2_resv_init_once(&r);
+ ocfs2_resv_set_type(&r, OCFS2_RESV_FLAG_TMP);
+ resv = &r;
+ }
+
+ numfound = *numbits;
+ if (ocfs2_resmap_resv_bits(resmap, resv, &bitoff, &numfound) == 0) {
+ if (numfound < *numbits)
+ *numbits = numfound;
+ goto bail;
+ }
+
+ /*
+ * Code error. While reservations are enabled, local
+ * allocation should _always_ go through them.
+ */
+ BUG_ON(osb->osb_resv_level != 0);
+
+ /*
+ * Reservations are disabled. Handle this the old way.
+ */
+
bitmap = OCFS2_LOCAL_ALLOC(alloc)->la_bitmap;
numfound = bitoff = startoff = 0;
@@ -761,7 +870,7 @@ static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb,
startoff = bitoff+1;
}
/* we got everything we needed */
- if (numfound == numbits) {
+ if (numfound == *numbits) {
/* mlog(0, "Found it all!\n"); */
break;
}
@@ -770,12 +879,15 @@ static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb,
mlog(0, "Exiting loop, bitoff = %d, numfound = %d\n", bitoff,
numfound);
- if (numfound == numbits)
+ if (numfound == *numbits)
bitoff = startoff - numfound;
else
bitoff = -1;
bail:
+ if (local_resv)
+ ocfs2_resv_discard(resmap, resv);
+
mlog_exit(bitoff);
return bitoff;
}
@@ -1049,7 +1161,7 @@ static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb,
/* we used the generic suballoc reserve function, but we set
* everything up nicely, so there's no reason why we can't use
* the more specific cluster api to claim bits. */
- status = ocfs2_claim_clusters(osb, handle, ac, osb->local_alloc_bits,
+ status = ocfs2_claim_clusters(handle, ac, osb->local_alloc_bits,
&cluster_off, &cluster_count);
if (status == -ENOSPC) {
retry_enospc:
@@ -1063,7 +1175,7 @@ retry_enospc:
goto bail;
ac->ac_bits_wanted = osb->local_alloc_default_bits;
- status = ocfs2_claim_clusters(osb, handle, ac,
+ status = ocfs2_claim_clusters(handle, ac,
osb->local_alloc_bits,
&cluster_off,
&cluster_count);
@@ -1098,6 +1210,9 @@ retry_enospc:
memset(OCFS2_LOCAL_ALLOC(alloc)->la_bitmap, 0,
le16_to_cpu(la->la_size));
+ ocfs2_resmap_restart(&osb->osb_la_resmap, cluster_count,
+ OCFS2_LOCAL_ALLOC(alloc)->la_bitmap);
+
mlog(0, "New window allocated:\n");
mlog(0, "window la_bm_off = %u\n",
OCFS2_LOCAL_ALLOC(alloc)->la_bm_off);
@@ -1169,12 +1284,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
}
ocfs2_clear_local_alloc(alloc);
-
- status = ocfs2_journal_dirty(handle, osb->local_alloc_bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
+ ocfs2_journal_dirty(handle, osb->local_alloc_bh);
status = ocfs2_sync_local_to_main(osb, handle, alloc_copy,
main_bm_inode, main_bm_bh);
@@ -1192,7 +1302,6 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
atomic_inc(&osb->alloc_stats.moves);
- status = 0;
bail:
if (handle)
ocfs2_commit_trans(osb, handle);
diff --git a/fs/ocfs2/localalloc.h b/fs/ocfs2/localalloc.h
index ac5ea9f8..1be9b58 100644
--- a/fs/ocfs2/localalloc.h
+++ b/fs/ocfs2/localalloc.h
@@ -30,6 +30,9 @@ int ocfs2_load_local_alloc(struct ocfs2_super *osb);
void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb);
+void ocfs2_la_set_sizes(struct ocfs2_super *osb, int requested_mb);
+unsigned int ocfs2_la_default_mb(struct ocfs2_super *osb);
+
int ocfs2_begin_local_alloc_recovery(struct ocfs2_super *osb,
int node_num,
struct ocfs2_dinode **alloc_copy);
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c
index 7898bd3..af2b8fe 100644
--- a/fs/ocfs2/mmap.c
+++ b/fs/ocfs2/mmap.c
@@ -41,44 +41,20 @@
#include "file.h"
#include "inode.h"
#include "mmap.h"
+#include "super.h"
-static inline int ocfs2_vm_op_block_sigs(sigset_t *blocked, sigset_t *oldset)
-{
- /* The best way to deal with signals in the vm path is
- * to block them upfront, rather than allowing the
- * locking paths to return -ERESTARTSYS. */
- sigfillset(blocked);
-
- /* We should technically never get a bad return value
- * from sigprocmask */
- return sigprocmask(SIG_BLOCK, blocked, oldset);
-}
-
-static inline int ocfs2_vm_op_unblock_sigs(sigset_t *oldset)
-{
- return sigprocmask(SIG_SETMASK, oldset, NULL);
-}
static int ocfs2_fault(struct vm_area_struct *area, struct vm_fault *vmf)
{
- sigset_t blocked, oldset;
- int error, ret;
+ sigset_t oldset;
+ int ret;
mlog_entry("(area=%p, page offset=%lu)\n", area, vmf->pgoff);
- error = ocfs2_vm_op_block_sigs(&blocked, &oldset);
- if (error < 0) {
- mlog_errno(error);
- ret = VM_FAULT_SIGBUS;
- goto out;
- }
-
+ ocfs2_block_signals(&oldset);
ret = filemap_fault(area, vmf);
+ ocfs2_unblock_signals(&oldset);
- error = ocfs2_vm_op_unblock_sigs(&oldset);
- if (error < 0)
- mlog_errno(error);
-out:
mlog_exit_ptr(vmf->page);
return ret;
}
@@ -158,14 +134,10 @@ static int ocfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
struct page *page = vmf->page;
struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
struct buffer_head *di_bh = NULL;
- sigset_t blocked, oldset;
- int ret, ret2;
+ sigset_t oldset;
+ int ret;
- ret = ocfs2_vm_op_block_sigs(&blocked, &oldset);
- if (ret < 0) {
- mlog_errno(ret);
- return ret;
- }
+ ocfs2_block_signals(&oldset);
/*
* The cluster locks taken will block a truncate from another
@@ -193,9 +165,7 @@ static int ocfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
ocfs2_inode_unlock(inode, 1);
out:
- ret2 = ocfs2_vm_op_unblock_sigs(&oldset);
- if (ret2 < 0)
- mlog_errno(ret2);
+ ocfs2_unblock_signals(&oldset);
if (ret)
ret = VM_FAULT_SIGBUS;
return ret;
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index b1eb50a..db5dd3e 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -239,6 +239,8 @@ static int ocfs2_mknod(struct inode *dir,
};
int did_quota_inode = 0;
struct ocfs2_dir_lookup_result lookup = { NULL, };
+ sigset_t oldset;
+ int did_block_signals = 0;
mlog_entry("(0x%p, 0x%p, %d, %lu, '%.*s')\n", dir, dentry, mode,
(unsigned long)dev, dentry->d_name.len,
@@ -350,6 +352,10 @@ static int ocfs2_mknod(struct inode *dir,
goto leave;
}
+ /* Starting to change things, restart is no longer possible. */
+ ocfs2_block_signals(&oldset);
+ did_block_signals = 1;
+
status = dquot_alloc_inode(inode);
if (status)
goto leave;
@@ -384,11 +390,7 @@ static int ocfs2_mknod(struct inode *dir,
goto leave;
}
ocfs2_add_links_count(dirfe, 1);
- status = ocfs2_journal_dirty(handle, parent_fe_bh);
- if (status < 0) {
- mlog_errno(status);
- goto leave;
- }
+ ocfs2_journal_dirty(handle, parent_fe_bh);
inc_nlink(dir);
}
@@ -408,23 +410,28 @@ static int ocfs2_mknod(struct inode *dir,
}
}
- status = ocfs2_add_entry(handle, dentry, inode,
- OCFS2_I(inode)->ip_blkno, parent_fe_bh,
- &lookup);
- if (status < 0) {
+ /*
+ * Do this before adding the entry to the directory. We add
+ * also set d_op after success so that ->d_iput() will cleanup
+ * the dentry lock even if ocfs2_add_entry() fails below.
+ */
+ status = ocfs2_dentry_attach_lock(dentry, inode,
+ OCFS2_I(dir)->ip_blkno);
+ if (status) {
mlog_errno(status);
goto leave;
}
+ dentry->d_op = &ocfs2_dentry_ops;
- status = ocfs2_dentry_attach_lock(dentry, inode,
- OCFS2_I(dir)->ip_blkno);
- if (status) {
+ status = ocfs2_add_entry(handle, dentry, inode,
+ OCFS2_I(inode)->ip_blkno, parent_fe_bh,
+ &lookup);
+ if (status < 0) {
mlog_errno(status);
goto leave;
}
insert_inode_hash(inode);
- dentry->d_op = &ocfs2_dentry_ops;
d_instantiate(dentry, inode);
status = 0;
leave:
@@ -434,6 +441,8 @@ leave:
ocfs2_commit_trans(osb, handle);
ocfs2_inode_unlock(dir, 1);
+ if (did_block_signals)
+ ocfs2_unblock_signals(&oldset);
if (status == -ENOSPC)
mlog(0, "Disk is full\n");
@@ -445,11 +454,6 @@ leave:
ocfs2_free_dir_lookup_result(&lookup);
- if ((status < 0) && inode) {
- clear_nlink(inode);
- iput(inode);
- }
-
if (inode_ac)
ocfs2_free_alloc_context(inode_ac);
@@ -459,6 +463,17 @@ leave:
if (meta_ac)
ocfs2_free_alloc_context(meta_ac);
+ /*
+ * We should call iput after the i_mutex of the bitmap been
+ * unlocked in ocfs2_free_alloc_context, or the
+ * ocfs2_delete_inode will mutex_lock again.
+ */
+ if ((status < 0) && inode) {
+ OCFS2_I(inode)->ip_flags |= OCFS2_INODE_SKIP_ORPHAN_DIR;
+ clear_nlink(inode);
+ iput(inode);
+ }
+
mlog_exit(status);
return status;
@@ -476,14 +491,15 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb,
int status = 0;
struct ocfs2_dinode *fe = NULL;
struct ocfs2_extent_list *fel;
- u64 fe_blkno = 0;
+ u64 suballoc_loc, fe_blkno = 0;
u16 suballoc_bit;
u16 feat;
*new_fe_bh = NULL;
- status = ocfs2_claim_new_inode(osb, handle, dir, parent_fe_bh,
- inode_ac, &suballoc_bit, &fe_blkno);
+ status = ocfs2_claim_new_inode(handle, dir, parent_fe_bh,
+ inode_ac, &suballoc_loc,
+ &suballoc_bit, &fe_blkno);
if (status < 0) {
mlog_errno(status);
goto leave;
@@ -520,6 +536,7 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb,
fe->i_generation = cpu_to_le32(inode->i_generation);
fe->i_fs_generation = cpu_to_le32(osb->fs_generation);
fe->i_blkno = cpu_to_le64(fe_blkno);
+ fe->i_suballoc_loc = cpu_to_le64(suballoc_loc);
fe->i_suballoc_bit = cpu_to_le16(suballoc_bit);
fe->i_suballoc_slot = cpu_to_le16(inode_ac->ac_alloc_slot);
fe->i_uid = cpu_to_le32(inode->i_uid);
@@ -556,11 +573,7 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb,
fel->l_count = cpu_to_le16(ocfs2_extent_recs_per_inode(osb->sb));
}
- status = ocfs2_journal_dirty(handle, *new_fe_bh);
- if (status < 0) {
- mlog_errno(status);
- goto leave;
- }
+ ocfs2_journal_dirty(handle, *new_fe_bh);
ocfs2_populate_inode(inode, fe, 1);
ocfs2_ci_set_new(osb, INODE_CACHE(inode));
@@ -626,6 +639,7 @@ static int ocfs2_link(struct dentry *old_dentry,
struct ocfs2_dinode *fe = NULL;
struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
struct ocfs2_dir_lookup_result lookup = { NULL, };
+ sigset_t oldset;
mlog_entry("(inode=%lu, old='%.*s' new='%.*s')\n", inode->i_ino,
old_dentry->d_name.len, old_dentry->d_name.name,
@@ -682,6 +696,9 @@ static int ocfs2_link(struct dentry *old_dentry,
goto out_unlock_inode;
}
+ /* Starting to change things, restart is no longer possible. */
+ ocfs2_block_signals(&oldset);
+
err = ocfs2_journal_access_di(handle, INODE_CACHE(inode), fe_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (err < 0) {
@@ -694,14 +711,7 @@ static int ocfs2_link(struct dentry *old_dentry,
ocfs2_set_links_count(fe, inode->i_nlink);
fe->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
fe->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
-
- err = ocfs2_journal_dirty(handle, fe_bh);
- if (err < 0) {
- ocfs2_add_links_count(fe, -1);
- drop_nlink(inode);
- mlog_errno(err);
- goto out_commit;
- }
+ ocfs2_journal_dirty(handle, fe_bh);
err = ocfs2_add_entry(handle, dentry, inode,
OCFS2_I(inode)->ip_blkno,
@@ -725,6 +735,7 @@ static int ocfs2_link(struct dentry *old_dentry,
out_commit:
ocfs2_commit_trans(osb, handle);
+ ocfs2_unblock_signals(&oldset);
out_unlock_inode:
ocfs2_inode_unlock(inode, 1);
@@ -898,12 +909,7 @@ static int ocfs2_unlink(struct inode *dir,
drop_nlink(inode);
drop_nlink(inode);
ocfs2_set_links_count(fe, inode->i_nlink);
-
- status = ocfs2_journal_dirty(handle, fe_bh);
- if (status < 0) {
- mlog_errno(status);
- goto leave;
- }
+ ocfs2_journal_dirty(handle, fe_bh);
dir->i_ctime = dir->i_mtime = CURRENT_TIME;
if (S_ISDIR(inode->i_mode))
@@ -1321,12 +1327,7 @@ static int ocfs2_rename(struct inode *old_dir,
ocfs2_set_links_count(newfe, 0);
else
ocfs2_add_links_count(newfe, -1);
-
- status = ocfs2_journal_dirty(handle, newfe_bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
+ ocfs2_journal_dirty(handle, newfe_bh);
} else {
/* if the name was not found in new_dir, add it now */
status = ocfs2_add_entry(handle, new_dentry, old_inode,
@@ -1345,10 +1346,7 @@ static int ocfs2_rename(struct inode *old_dir,
old_di->i_ctime = cpu_to_le64(old_inode->i_ctime.tv_sec);
old_di->i_ctime_nsec = cpu_to_le32(old_inode->i_ctime.tv_nsec);
-
- status = ocfs2_journal_dirty(handle, old_inode_bh);
- if (status < 0)
- mlog_errno(status);
+ ocfs2_journal_dirty(handle, old_inode_bh);
} else
mlog_errno(status);
@@ -1420,7 +1418,7 @@ static int ocfs2_rename(struct inode *old_dir,
OCFS2_JOURNAL_ACCESS_WRITE);
fe = (struct ocfs2_dinode *) old_dir_bh->b_data;
ocfs2_set_links_count(fe, old_dir->i_nlink);
- status = ocfs2_journal_dirty(handle, old_dir_bh);
+ ocfs2_journal_dirty(handle, old_dir_bh);
}
}
ocfs2_dentry_move(old_dentry, new_dentry, old_dir, new_dir);
@@ -1552,11 +1550,7 @@ static int ocfs2_create_symlink_data(struct ocfs2_super *osb,
(bytes_left > sb->s_blocksize) ? sb->s_blocksize :
bytes_left);
- status = ocfs2_journal_dirty(handle, bhs[virtual]);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
+ ocfs2_journal_dirty(handle, bhs[virtual]);
virtual++;
p_blkno++;
@@ -1600,6 +1594,8 @@ static int ocfs2_symlink(struct inode *dir,
};
int did_quota = 0, did_quota_inode = 0;
struct ocfs2_dir_lookup_result lookup = { NULL, };
+ sigset_t oldset;
+ int did_block_signals = 0;
mlog_entry("(0x%p, 0x%p, symname='%s' actual='%.*s')\n", dir,
dentry, symname, dentry->d_name.len, dentry->d_name.name);
@@ -1695,6 +1691,10 @@ static int ocfs2_symlink(struct inode *dir,
goto bail;
}
+ /* Starting to change things, restart is no longer possible. */
+ ocfs2_block_signals(&oldset);
+ did_block_signals = 1;
+
status = dquot_alloc_inode(inode);
if (status)
goto bail;
@@ -1771,22 +1771,27 @@ static int ocfs2_symlink(struct inode *dir,
}
}
- status = ocfs2_add_entry(handle, dentry, inode,
- le64_to_cpu(fe->i_blkno), parent_fe_bh,
- &lookup);
- if (status < 0) {
+ /*
+ * Do this before adding the entry to the directory. We add
+ * also set d_op after success so that ->d_iput() will cleanup
+ * the dentry lock even if ocfs2_add_entry() fails below.
+ */
+ status = ocfs2_dentry_attach_lock(dentry, inode, OCFS2_I(dir)->ip_blkno);
+ if (status) {
mlog_errno(status);
goto bail;
}
+ dentry->d_op = &ocfs2_dentry_ops;
- status = ocfs2_dentry_attach_lock(dentry, inode, OCFS2_I(dir)->ip_blkno);
- if (status) {
+ status = ocfs2_add_entry(handle, dentry, inode,
+ le64_to_cpu(fe->i_blkno), parent_fe_bh,
+ &lookup);
+ if (status < 0) {
mlog_errno(status);
goto bail;
}
insert_inode_hash(inode);
- dentry->d_op = &ocfs2_dentry_ops;
d_instantiate(dentry, inode);
bail:
if (status < 0 && did_quota)
@@ -1798,6 +1803,8 @@ bail:
ocfs2_commit_trans(osb, handle);
ocfs2_inode_unlock(dir, 1);
+ if (did_block_signals)
+ ocfs2_unblock_signals(&oldset);
brelse(new_fe_bh);
brelse(parent_fe_bh);
@@ -1811,6 +1818,7 @@ bail:
if (xattr_ac)
ocfs2_free_alloc_context(xattr_ac);
if ((status < 0) && inode) {
+ OCFS2_I(inode)->ip_flags |= OCFS2_INODE_SKIP_ORPHAN_DIR;
clear_nlink(inode);
iput(inode);
}
@@ -1944,12 +1952,7 @@ static int ocfs2_orphan_add(struct ocfs2_super *osb,
if (S_ISDIR(inode->i_mode))
ocfs2_add_links_count(orphan_fe, 1);
orphan_dir_inode->i_nlink = ocfs2_read_links_count(orphan_fe);
-
- status = ocfs2_journal_dirty(handle, orphan_dir_bh);
- if (status < 0) {
- mlog_errno(status);
- goto leave;
- }
+ ocfs2_journal_dirty(handle, orphan_dir_bh);
status = __ocfs2_add_entry(handle, orphan_dir_inode, name,
OCFS2_ORPHAN_NAMELEN, inode,
@@ -1976,6 +1979,7 @@ static int ocfs2_orphan_add(struct ocfs2_super *osb,
}
le32_add_cpu(&fe->i_flags, OCFS2_ORPHANED_FL);
+ OCFS2_I(inode)->ip_flags &= ~OCFS2_INODE_SKIP_ORPHAN_DIR;
/* Record which orphan dir our inode now resides
* in. delete_inode will use this to determine which orphan
@@ -2047,12 +2051,7 @@ int ocfs2_orphan_del(struct ocfs2_super *osb,
if (S_ISDIR(inode->i_mode))
ocfs2_add_links_count(orphan_fe, -1);
orphan_dir_inode->i_nlink = ocfs2_read_links_count(orphan_fe);
-
- status = ocfs2_journal_dirty(handle, orphan_dir_bh);
- if (status < 0) {
- mlog_errno(status);
- goto leave;
- }
+ ocfs2_journal_dirty(handle, orphan_dir_bh);
leave:
ocfs2_free_dir_lookup_result(&lookup);
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index adf5e2e..c67003b 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -47,6 +47,7 @@
/* For struct ocfs2_blockcheck_stats */
#include "blockcheck.h"
+#include "reservations.h"
/* Caching of metadata buffers */
@@ -341,6 +342,9 @@ struct ocfs2_super
*/
unsigned int local_alloc_bits;
unsigned int local_alloc_default_bits;
+ /* osb_clusters_at_boot can become stale! Do not trust it to
+ * be up to date. */
+ unsigned int osb_clusters_at_boot;
enum ocfs2_local_alloc_state local_alloc_state; /* protected
* by osb_lock */
@@ -349,6 +353,11 @@ struct ocfs2_super
u64 la_last_gd;
+ struct ocfs2_reservation_map osb_la_resmap;
+
+ unsigned int osb_resv_level;
+ unsigned int osb_dir_resv_level;
+
/* Next three fields are for local node slot recovery during
* mount. */
int dirty;
@@ -482,6 +491,13 @@ static inline int ocfs2_supports_indexed_dirs(struct ocfs2_super *osb)
return 0;
}
+static inline int ocfs2_supports_discontig_bg(struct ocfs2_super *osb)
+{
+ if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_DISCONTIG_BG)
+ return 1;
+ return 0;
+}
+
static inline unsigned int ocfs2_link_max(struct ocfs2_super *osb)
{
if (ocfs2_supports_indexed_dirs(osb))
@@ -763,6 +779,12 @@ static inline unsigned int ocfs2_megabytes_to_clusters(struct super_block *sb,
return megs << (20 - OCFS2_SB(sb)->s_clustersize_bits);
}
+static inline unsigned int ocfs2_clusters_to_megabytes(struct super_block *sb,
+ unsigned int clusters)
+{
+ return clusters >> (20 - OCFS2_SB(sb)->s_clustersize_bits);
+}
+
static inline void _ocfs2_set_bit(unsigned int bit, unsigned long *bitmap)
{
ext2_set_bit(bit, bitmap);
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
index bb37218..33f1c9a 100644
--- a/fs/ocfs2/ocfs2_fs.h
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -100,7 +100,8 @@
| OCFS2_FEATURE_INCOMPAT_XATTR \
| OCFS2_FEATURE_INCOMPAT_META_ECC \
| OCFS2_FEATURE_INCOMPAT_INDEXED_DIRS \
- | OCFS2_FEATURE_INCOMPAT_REFCOUNT_TREE)
+ | OCFS2_FEATURE_INCOMPAT_REFCOUNT_TREE \
+ | OCFS2_FEATURE_INCOMPAT_DISCONTIG_BG)
#define OCFS2_FEATURE_RO_COMPAT_SUPP (OCFS2_FEATURE_RO_COMPAT_UNWRITTEN \
| OCFS2_FEATURE_RO_COMPAT_USRQUOTA \
| OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)
@@ -165,6 +166,9 @@
/* Refcount tree support */
#define OCFS2_FEATURE_INCOMPAT_REFCOUNT_TREE 0x1000
+/* Discontigous block groups */
+#define OCFS2_FEATURE_INCOMPAT_DISCONTIG_BG 0x2000
+
/*
* backup superblock flag is used to indicate that this volume
* has backup superblocks.
@@ -283,14 +287,6 @@
#define OCFS2_MIN_JOURNAL_SIZE (4 * 1024 * 1024)
/*
- * Default local alloc size (in megabytes)
- *
- * The value chosen should be such that most allocations, including new
- * block groups, use local alloc.
- */
-#define OCFS2_DEFAULT_LOCAL_ALLOC_SIZE 8
-
-/*
* Inline extended attribute size (in bytes)
* The value chosen should be aligned to 16 byte boundaries.
*/
@@ -512,7 +508,10 @@ struct ocfs2_extent_block
block group */
__le32 h_fs_generation; /* Must match super block */
__le64 h_blkno; /* Offset on disk, in blocks */
-/*20*/ __le64 h_reserved3;
+/*20*/ __le64 h_suballoc_loc; /* Suballocator block group this
+ eb belongs to. Only valid
+ if allocated from a
+ discontiguous block group */
__le64 h_next_leaf_blk; /* Offset on disk, in blocks,
of next leaf header pointing
to data */
@@ -679,7 +678,11 @@ struct ocfs2_dinode {
/*80*/ struct ocfs2_block_check i_check; /* Error checking */
/*88*/ __le64 i_dx_root; /* Pointer to dir index root block */
/*90*/ __le64 i_refcount_loc;
- __le64 i_reserved2[4];
+ __le64 i_suballoc_loc; /* Suballocator block group this
+ inode belongs to. Only valid
+ if allocated from a
+ discontiguous block group */
+/*A0*/ __le64 i_reserved2[3];
/*B8*/ union {
__le64 i_pad1; /* Generic way to refer to this
64bit union */
@@ -814,7 +817,12 @@ struct ocfs2_dx_root_block {
__le32 dr_reserved2;
__le64 dr_free_blk; /* Pointer to head of free
* unindexed block list. */
- __le64 dr_reserved3[15];
+ __le64 dr_suballoc_loc; /* Suballocator block group
+ this root belongs to.
+ Only valid if allocated
+ from a discontiguous
+ block group */
+ __le64 dr_reserved3[14];
union {
struct ocfs2_extent_list dr_list; /* Keep this aligned to 128
* bits for maximum space
@@ -840,6 +848,13 @@ struct ocfs2_dx_leaf {
};
/*
+ * Largest bitmap for a block (suballocator) group in bytes. This limit
+ * does not affect cluster groups (global allocator). Cluster group
+ * bitmaps run to the end of the block.
+ */
+#define OCFS2_MAX_BG_BITMAP_SIZE 256
+
+/*
* On disk allocator group structure for OCFS2
*/
struct ocfs2_group_desc
@@ -860,7 +875,29 @@ struct ocfs2_group_desc
__le64 bg_blkno; /* Offset on disk, in blocks */
/*30*/ struct ocfs2_block_check bg_check; /* Error checking */
__le64 bg_reserved2;
-/*40*/ __u8 bg_bitmap[0];
+/*40*/ union {
+ __u8 bg_bitmap[0];
+ struct {
+ /*
+ * Block groups may be discontiguous when
+ * OCFS2_FEATURE_INCOMPAT_DISCONTIG_BG is set.
+ * The extents of a discontigous block group are
+ * stored in bg_list. It is a flat list.
+ * l_tree_depth must always be zero. A
+ * discontiguous group is signified by a non-zero
+ * bg_list->l_next_free_rec. Only block groups
+ * can be discontiguous; Cluster groups cannot.
+ * We've never made a block group with more than
+ * 2048 blocks (256 bytes of bg_bitmap). This
+ * codifies that limit so that we can fit bg_list.
+ * bg_size of a discontiguous block group will
+ * be 256 to match bg_bitmap_filler.
+ */
+ __u8 bg_bitmap_filler[OCFS2_MAX_BG_BITMAP_SIZE];
+/*140*/ struct ocfs2_extent_list bg_list;
+ };
+ };
+/* Actual on-disk size is one block */
};
struct ocfs2_refcount_rec {
@@ -905,7 +942,11 @@ struct ocfs2_refcount_block {
/*40*/ __le32 rf_generation; /* generation number. all be the same
* for the same refcount tree. */
__le32 rf_reserved0;
- __le64 rf_reserved1[7];
+ __le64 rf_suballoc_loc; /* Suballocator block group this
+ refcount block belongs to. Only
+ valid if allocated from a
+ discontiguous block group */
+/*50*/ __le64 rf_reserved1[6];
/*80*/ union {
struct ocfs2_refcount_list rf_records; /* List of refcount
records */
@@ -1017,7 +1058,10 @@ struct ocfs2_xattr_block {
real xattr or a xattr tree. */
__le16 xb_reserved0;
__le32 xb_reserved1;
- __le64 xb_reserved2;
+ __le64 xb_suballoc_loc; /* Suballocator block group this
+ xattr block belongs to. Only
+ valid if allocated from a
+ discontiguous block group */
/*30*/ union {
struct ocfs2_xattr_header xb_header; /* xattr header if this
block contains xattr */
@@ -1254,6 +1298,16 @@ static inline u16 ocfs2_extent_recs_per_eb(struct super_block *sb)
return size / sizeof(struct ocfs2_extent_rec);
}
+static inline u16 ocfs2_extent_recs_per_gd(struct super_block *sb)
+{
+ int size;
+
+ size = sb->s_blocksize -
+ offsetof(struct ocfs2_group_desc, bg_list.l_recs);
+
+ return size / sizeof(struct ocfs2_extent_rec);
+}
+
static inline int ocfs2_dx_entries_per_leaf(struct super_block *sb)
{
int size;
@@ -1284,13 +1338,23 @@ static inline u16 ocfs2_local_alloc_size(struct super_block *sb)
return size;
}
-static inline int ocfs2_group_bitmap_size(struct super_block *sb)
+static inline int ocfs2_group_bitmap_size(struct super_block *sb,
+ int suballocator,
+ u32 feature_incompat)
{
- int size;
-
- size = sb->s_blocksize -
+ int size = sb->s_blocksize -
offsetof(struct ocfs2_group_desc, bg_bitmap);
+ /*
+ * The cluster allocator uses the entire block. Suballocators have
+ * never used more than OCFS2_MAX_BG_BITMAP_SIZE. Unfortunately, older
+ * code expects bg_size set to the maximum. Thus we must keep
+ * bg_size as-is unless discontig_bg is enabled.
+ */
+ if (suballocator &&
+ (feature_incompat & OCFS2_FEATURE_INCOMPAT_DISCONTIG_BG))
+ size = OCFS2_MAX_BG_BITMAP_SIZE;
+
return size;
}
@@ -1402,23 +1466,43 @@ static inline int ocfs2_extent_recs_per_eb(int blocksize)
return size / sizeof(struct ocfs2_extent_rec);
}
-static inline int ocfs2_local_alloc_size(int blocksize)
+static inline int ocfs2_extent_recs_per_gd(int blocksize)
{
int size;
size = blocksize -
- offsetof(struct ocfs2_dinode, id2.i_lab.la_bitmap);
+ offsetof(struct ocfs2_group_desc, bg_list.l_recs);
- return size;
+ return size / sizeof(struct ocfs2_extent_rec);
}
-static inline int ocfs2_group_bitmap_size(int blocksize)
+static inline int ocfs2_local_alloc_size(int blocksize)
{
int size;
size = blocksize -
+ offsetof(struct ocfs2_dinode, id2.i_lab.la_bitmap);
+
+ return size;
+}
+
+static inline int ocfs2_group_bitmap_size(int blocksize,
+ int suballocator,
+ uint32_t feature_incompat)
+{
+ int size = sb->s_blocksize -
offsetof(struct ocfs2_group_desc, bg_bitmap);
+ /*
+ * The cluster allocator uses the entire block. Suballocators have
+ * never used more than OCFS2_MAX_BG_BITMAP_SIZE. Unfortunately, older
+ * code expects bg_size set to the maximum. Thus we must keep
+ * bg_size as-is unless discontig_bg is enabled.
+ */
+ if (suballocator &&
+ (feature_incompat & OCFS2_FEATURE_INCOMPAT_DISCONTIG_BG))
+ size = OCFS2_MAX_BG_BITMAP_SIZE;
+
return size;
}
@@ -1491,5 +1575,19 @@ static inline void ocfs2_set_de_type(struct ocfs2_dir_entry *de,
de->file_type = ocfs2_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
}
+static inline int ocfs2_gd_is_discontig(struct ocfs2_group_desc *gd)
+{
+ if ((offsetof(struct ocfs2_group_desc, bg_bitmap) +
+ le16_to_cpu(gd->bg_size)) !=
+ offsetof(struct ocfs2_group_desc, bg_list))
+ return 0;
+ /*
+ * Only valid to check l_next_free_rec if
+ * bg_bitmap + bg_size == bg_list.
+ */
+ if (!gd->bg_list.l_next_free_rec)
+ return 0;
+ return 1;
+}
#endif /* _OCFS2_FS_H */
diff --git a/fs/ocfs2/quota.h b/fs/ocfs2/quota.h
index 123bc52..196fcb5 100644
--- a/fs/ocfs2/quota.h
+++ b/fs/ocfs2/quota.h
@@ -23,6 +23,7 @@
struct ocfs2_dquot {
struct dquot dq_dquot; /* Generic VFS dquot */
loff_t dq_local_off; /* Offset in the local quota file */
+ u64 dq_local_phys_blk; /* Physical block carrying quota structure */
struct ocfs2_quota_chunk *dq_chunk; /* Chunk dquot is in */
unsigned int dq_use_count; /* Number of nodes having reference to this entry in global quota file */
s64 dq_origspace; /* Last globally synced space usage */
@@ -51,8 +52,9 @@ struct ocfs2_mem_dqinfo {
struct ocfs2_lock_res dqi_gqlock; /* Lock protecting quota information structure */
struct buffer_head *dqi_gqi_bh; /* Buffer head with global quota file inode - set only if inode lock is obtained */
int dqi_gqi_count; /* Number of holders of dqi_gqi_bh */
+ u64 dqi_giblk; /* Number of block with global information header */
struct buffer_head *dqi_lqi_bh; /* Buffer head with local quota file inode */
- struct buffer_head *dqi_ibh; /* Buffer with information header */
+ struct buffer_head *dqi_libh; /* Buffer with local information header */
struct qtree_mem_dqinfo dqi_gi; /* Info about global file */
struct delayed_work dqi_sync_work; /* Work for syncing dquots */
struct ocfs2_quota_recovery *dqi_rec; /* Pointer to recovery
@@ -102,8 +104,12 @@ static inline int ocfs2_global_release_dquot(struct dquot *dquot)
int ocfs2_lock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex);
void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex);
-int ocfs2_read_quota_block(struct inode *inode, u64 v_block,
- struct buffer_head **bh);
+int ocfs2_validate_quota_block(struct super_block *sb, struct buffer_head *bh);
+int ocfs2_read_quota_phys_block(struct inode *inode, u64 p_block,
+ struct buffer_head **bh);
+int ocfs2_create_local_dquot(struct dquot *dquot);
+int ocfs2_local_release_dquot(handle_t *handle, struct dquot *dquot);
+int ocfs2_local_write_dquot(struct dquot *dquot);
extern const struct dquot_operations ocfs2_quota_operations;
extern struct quota_format_type ocfs2_quota_format;
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index ab42a74..2bb35fe 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -25,8 +25,44 @@
#include "dlmglue.h"
#include "uptodate.h"
#include "super.h"
+#include "buffer_head_io.h"
#include "quota.h"
+/*
+ * Locking of quotas with OCFS2 is rather complex. Here are rules that
+ * should be obeyed by all the functions:
+ * - any write of quota structure (either to local or global file) is protected
+ * by dqio_mutex or dquot->dq_lock.
+ * - any modification of global quota file holds inode cluster lock, i_mutex,
+ * and ip_alloc_sem of the global quota file (achieved by
+ * ocfs2_lock_global_qf). It also has to hold qinfo_lock.
+ * - an allocation of new blocks for local quota file is protected by
+ * its ip_alloc_sem
+ *
+ * A rough sketch of locking dependencies (lf = local file, gf = global file):
+ * Normal filesystem operation:
+ * start_trans -> dqio_mutex -> write to lf
+ * Syncing of local and global file:
+ * ocfs2_lock_global_qf -> start_trans -> dqio_mutex -> qinfo_lock ->
+ * write to gf
+ * -> write to lf
+ * Acquire dquot for the first time:
+ * dq_lock -> ocfs2_lock_global_qf -> qinfo_lock -> read from gf
+ * -> alloc space for gf
+ * -> start_trans -> qinfo_lock -> write to gf
+ * -> ip_alloc_sem of lf -> alloc space for lf
+ * -> write to lf
+ * Release last reference to dquot:
+ * dq_lock -> ocfs2_lock_global_qf -> start_trans -> qinfo_lock -> write to gf
+ * -> write to lf
+ * Note that all the above operations also hold the inode cluster lock of lf.
+ * Recovery:
+ * inode cluster lock of recovered lf
+ * -> read bitmaps -> ip_alloc_sem of lf
+ * -> ocfs2_lock_global_qf -> start_trans -> dqio_mutex -> qinfo_lock ->
+ * write to gf
+ */
+
static struct workqueue_struct *ocfs2_quota_wq = NULL;
static void qsync_work_fn(struct work_struct *work);
@@ -91,8 +127,7 @@ struct qtree_fmt_operations ocfs2_global_ops = {
.is_id = ocfs2_global_is_id,
};
-static int ocfs2_validate_quota_block(struct super_block *sb,
- struct buffer_head *bh)
+int ocfs2_validate_quota_block(struct super_block *sb, struct buffer_head *bh)
{
struct ocfs2_disk_dqtrailer *dqt =
ocfs2_block_dqtrailer(sb->s_blocksize, bh->b_data);
@@ -110,54 +145,19 @@ static int ocfs2_validate_quota_block(struct super_block *sb,
return ocfs2_validate_meta_ecc(sb, bh->b_data, &dqt->dq_check);
}
-int ocfs2_read_quota_block(struct inode *inode, u64 v_block,
- struct buffer_head **bh)
+int ocfs2_read_quota_phys_block(struct inode *inode, u64 p_block,
+ struct buffer_head **bhp)
{
- int rc = 0;
- struct buffer_head *tmp = *bh;
-
- if (i_size_read(inode) >> inode->i_sb->s_blocksize_bits <= v_block) {
- ocfs2_error(inode->i_sb,
- "Quota file %llu is probably corrupted! Requested "
- "to read block %Lu but file has size only %Lu\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno,
- (unsigned long long)v_block,
- (unsigned long long)i_size_read(inode));
- return -EIO;
- }
- rc = ocfs2_read_virt_blocks(inode, v_block, 1, &tmp, 0,
- ocfs2_validate_quota_block);
+ int rc;
+
+ *bhp = NULL;
+ rc = ocfs2_read_blocks(INODE_CACHE(inode), p_block, 1, bhp, 0,
+ ocfs2_validate_quota_block);
if (rc)
mlog_errno(rc);
-
- /* If ocfs2_read_virt_blocks() got us a new bh, pass it up. */
- if (!rc && !*bh)
- *bh = tmp;
-
return rc;
}
-static int ocfs2_get_quota_block(struct inode *inode, int block,
- struct buffer_head **bh)
-{
- u64 pblock, pcount;
- int err;
-
- down_read(&OCFS2_I(inode)->ip_alloc_sem);
- err = ocfs2_extent_map_get_blocks(inode, block, &pblock, &pcount, NULL);
- up_read(&OCFS2_I(inode)->ip_alloc_sem);
- if (err) {
- mlog_errno(err);
- return err;
- }
- *bh = sb_getblk(inode->i_sb, pblock);
- if (!*bh) {
- err = -EIO;
- mlog_errno(err);
- }
- return err;
-}
-
/* Read data from global quotafile - avoid pagecache and such because we cannot
* afford acquiring the locks... We use quota cluster lock to serialize
* operations. Caller is responsible for acquiring it. */
@@ -172,6 +172,7 @@ ssize_t ocfs2_quota_read(struct super_block *sb, int type, char *data,
int err = 0;
struct buffer_head *bh;
size_t toread, tocopy;
+ u64 pblock = 0, pcount = 0;
if (off > i_size)
return 0;
@@ -180,8 +181,19 @@ ssize_t ocfs2_quota_read(struct super_block *sb, int type, char *data,
toread = len;
while (toread > 0) {
tocopy = min_t(size_t, (sb->s_blocksize - offset), toread);
+ if (!pcount) {
+ err = ocfs2_extent_map_get_blocks(gqinode, blk, &pblock,
+ &pcount, NULL);
+ if (err) {
+ mlog_errno(err);
+ return err;
+ }
+ } else {
+ pcount--;
+ pblock++;
+ }
bh = NULL;
- err = ocfs2_read_quota_block(gqinode, blk, &bh);
+ err = ocfs2_read_quota_phys_block(gqinode, pblock, &bh);
if (err) {
mlog_errno(err);
return err;
@@ -209,6 +221,7 @@ ssize_t ocfs2_quota_write(struct super_block *sb, int type,
int err = 0, new = 0, ja_type;
struct buffer_head *bh = NULL;
handle_t *handle = journal_current_handle();
+ u64 pblock, pcount;
if (!handle) {
mlog(ML_ERROR, "Quota write (off=%llu, len=%llu) cancelled "
@@ -221,12 +234,11 @@ ssize_t ocfs2_quota_write(struct super_block *sb, int type,
len = sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset;
}
- mutex_lock_nested(&gqinode->i_mutex, I_MUTEX_QUOTA);
if (gqinode->i_size < off + len) {
loff_t rounded_end =
ocfs2_align_bytes_to_blocks(sb, off + len);
- /* Space is already allocated in ocfs2_global_read_dquot() */
+ /* Space is already allocated in ocfs2_acquire_dquot() */
err = ocfs2_simple_size_update(gqinode,
oinfo->dqi_gqi_bh,
rounded_end);
@@ -234,13 +246,20 @@ ssize_t ocfs2_quota_write(struct super_block *sb, int type,
goto out;
new = 1;
}
+ err = ocfs2_extent_map_get_blocks(gqinode, blk, &pblock, &pcount, NULL);
+ if (err) {
+ mlog_errno(err);
+ goto out;
+ }
/* Not rewriting whole block? */
if ((offset || len < sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE) &&
!new) {
- err = ocfs2_read_quota_block(gqinode, blk, &bh);
+ err = ocfs2_read_quota_phys_block(gqinode, pblock, &bh);
ja_type = OCFS2_JOURNAL_ACCESS_WRITE;
} else {
- err = ocfs2_get_quota_block(gqinode, blk, &bh);
+ bh = sb_getblk(sb, pblock);
+ if (!bh)
+ err = -ENOMEM;
ja_type = OCFS2_JOURNAL_ACCESS_CREATE;
}
if (err) {
@@ -261,19 +280,15 @@ ssize_t ocfs2_quota_write(struct super_block *sb, int type,
brelse(bh);
goto out;
}
- err = ocfs2_journal_dirty(handle, bh);
+ ocfs2_journal_dirty(handle, bh);
brelse(bh);
- if (err < 0)
- goto out;
out:
if (err) {
- mutex_unlock(&gqinode->i_mutex);
mlog_errno(err);
return err;
}
gqinode->i_version++;
ocfs2_mark_inode_dirty(handle, gqinode, oinfo->dqi_gqi_bh);
- mutex_unlock(&gqinode->i_mutex);
return len;
}
@@ -291,11 +306,23 @@ int ocfs2_lock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
else
WARN_ON(bh != oinfo->dqi_gqi_bh);
spin_unlock(&dq_data_lock);
+ if (ex) {
+ mutex_lock(&oinfo->dqi_gqinode->i_mutex);
+ down_write(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem);
+ } else {
+ down_read(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem);
+ }
return 0;
}
void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
{
+ if (ex) {
+ up_write(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem);
+ mutex_unlock(&oinfo->dqi_gqinode->i_mutex);
+ } else {
+ up_read(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem);
+ }
ocfs2_inode_unlock(oinfo->dqi_gqinode, ex);
brelse(oinfo->dqi_gqi_bh);
spin_lock(&dq_data_lock);
@@ -313,6 +340,7 @@ int ocfs2_global_read_info(struct super_block *sb, int type)
struct ocfs2_global_disk_dqinfo dinfo;
struct mem_dqinfo *info = sb_dqinfo(sb, type);
struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
+ u64 pcount;
int status;
mlog_entry_void();
@@ -339,9 +367,19 @@ int ocfs2_global_read_info(struct super_block *sb, int type)
mlog_errno(status);
goto out_err;
}
+
+ status = ocfs2_extent_map_get_blocks(gqinode, 0, &oinfo->dqi_giblk,
+ &pcount, NULL);
+ if (status < 0)
+ goto out_unlock;
+
+ status = ocfs2_qinfo_lock(oinfo, 0);
+ if (status < 0)
+ goto out_unlock;
status = sb->s_op->quota_read(sb, type, (char *)&dinfo,
sizeof(struct ocfs2_global_disk_dqinfo),
OCFS2_GLOBAL_INFO_OFF);
+ ocfs2_qinfo_unlock(oinfo, 0);
ocfs2_unlock_global_qf(oinfo, 0);
if (status != sizeof(struct ocfs2_global_disk_dqinfo)) {
mlog(ML_ERROR, "Cannot read global quota info (%d).\n",
@@ -368,6 +406,10 @@ int ocfs2_global_read_info(struct super_block *sb, int type)
out_err:
mlog_exit(status);
return status;
+out_unlock:
+ ocfs2_unlock_global_qf(oinfo, 0);
+ mlog_errno(status);
+ goto out_err;
}
/* Write information to global quota file. Expects exlusive lock on quota
@@ -426,78 +468,10 @@ static int ocfs2_global_qinit_alloc(struct super_block *sb, int type)
static int ocfs2_calc_global_qinit_credits(struct super_block *sb, int type)
{
- /* We modify all the allocated blocks, tree root, and info block */
+ /* We modify all the allocated blocks, tree root, info block and
+ * the inode */
return (ocfs2_global_qinit_alloc(sb, type) + 2) *
- OCFS2_QUOTA_BLOCK_UPDATE_CREDITS;
-}
-
-/* Read in information from global quota file and acquire a reference to it.
- * dquot_acquire() has already started the transaction and locked quota file */
-int ocfs2_global_read_dquot(struct dquot *dquot)
-{
- int err, err2, ex = 0;
- struct super_block *sb = dquot->dq_sb;
- int type = dquot->dq_type;
- struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
- struct ocfs2_super *osb = OCFS2_SB(sb);
- struct inode *gqinode = info->dqi_gqinode;
- int need_alloc = ocfs2_global_qinit_alloc(sb, type);
- handle_t *handle = NULL;
-
- err = ocfs2_qinfo_lock(info, 0);
- if (err < 0)
- goto out;
- err = qtree_read_dquot(&info->dqi_gi, dquot);
- if (err < 0)
- goto out_qlock;
- OCFS2_DQUOT(dquot)->dq_use_count++;
- OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
- OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
- ocfs2_qinfo_unlock(info, 0);
-
- if (!dquot->dq_off) { /* No real quota entry? */
- ex = 1;
- /*
- * Add blocks to quota file before we start a transaction since
- * locking allocators ranks above a transaction start
- */
- WARN_ON(journal_current_handle());
- down_write(&OCFS2_I(gqinode)->ip_alloc_sem);
- err = ocfs2_extend_no_holes(gqinode,
- gqinode->i_size + (need_alloc << sb->s_blocksize_bits),
- gqinode->i_size);
- up_write(&OCFS2_I(gqinode)->ip_alloc_sem);
- if (err < 0)
- goto out;
- }
-
- handle = ocfs2_start_trans(osb,
- ocfs2_calc_global_qinit_credits(sb, type));
- if (IS_ERR(handle)) {
- err = PTR_ERR(handle);
- goto out;
- }
- err = ocfs2_qinfo_lock(info, ex);
- if (err < 0)
- goto out_trans;
- err = qtree_write_dquot(&info->dqi_gi, dquot);
- if (ex && info_dirty(sb_dqinfo(dquot->dq_sb, dquot->dq_type))) {
- err2 = __ocfs2_global_write_info(dquot->dq_sb, dquot->dq_type);
- if (!err)
- err = err2;
- }
-out_qlock:
- if (ex)
- ocfs2_qinfo_unlock(info, 1);
- else
- ocfs2_qinfo_unlock(info, 0);
-out_trans:
- if (handle)
- ocfs2_commit_trans(osb, handle);
-out:
- if (err < 0)
- mlog_errno(err);
- return err;
+ OCFS2_QUOTA_BLOCK_UPDATE_CREDITS + 1;
}
/* Sync local information about quota modifications with global quota file.
@@ -638,14 +612,13 @@ static int ocfs2_sync_dquot_helper(struct dquot *dquot, unsigned long type)
}
mutex_lock(&sb_dqopt(sb)->dqio_mutex);
status = ocfs2_sync_dquot(dquot);
- mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
if (status < 0)
mlog_errno(status);
/* We have to write local structure as well... */
- dquot_mark_dquot_dirty(dquot);
- status = dquot_commit(dquot);
+ status = ocfs2_local_write_dquot(dquot);
if (status < 0)
mlog_errno(status);
+ mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
ocfs2_commit_trans(osb, handle);
out_ilock:
ocfs2_unlock_global_qf(oinfo, 1);
@@ -684,7 +657,9 @@ static int ocfs2_write_dquot(struct dquot *dquot)
mlog_errno(status);
goto out;
}
- status = dquot_commit(dquot);
+ mutex_lock(&sb_dqopt(dquot->dq_sb)->dqio_mutex);
+ status = ocfs2_local_write_dquot(dquot);
+ mutex_unlock(&sb_dqopt(dquot->dq_sb)->dqio_mutex);
ocfs2_commit_trans(osb, handle);
out:
mlog_exit(status);
@@ -715,6 +690,10 @@ static int ocfs2_release_dquot(struct dquot *dquot)
mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
+ mutex_lock(&dquot->dq_lock);
+ /* Check whether we are not racing with some other dqget() */
+ if (atomic_read(&dquot->dq_count) > 1)
+ goto out;
status = ocfs2_lock_global_qf(oinfo, 1);
if (status < 0)
goto out;
@@ -725,30 +704,113 @@ static int ocfs2_release_dquot(struct dquot *dquot)
mlog_errno(status);
goto out_ilock;
}
- status = dquot_release(dquot);
+
+ status = ocfs2_global_release_dquot(dquot);
+ if (status < 0) {
+ mlog_errno(status);
+ goto out_trans;
+ }
+ status = ocfs2_local_release_dquot(handle, dquot);
+ /*
+ * If we fail here, we cannot do much as global structure is
+ * already released. So just complain...
+ */
+ if (status < 0)
+ mlog_errno(status);
+ clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
+out_trans:
ocfs2_commit_trans(osb, handle);
out_ilock:
ocfs2_unlock_global_qf(oinfo, 1);
out:
+ mutex_unlock(&dquot->dq_lock);
mlog_exit(status);
return status;
}
+/*
+ * Read global dquot structure from disk or create it if it does
+ * not exist. Also update use count of the global structure and
+ * create structure in node-local quota file.
+ */
static int ocfs2_acquire_dquot(struct dquot *dquot)
{
- struct ocfs2_mem_dqinfo *oinfo =
- sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
- int status = 0;
+ int status = 0, err;
+ int ex = 0;
+ struct super_block *sb = dquot->dq_sb;
+ struct ocfs2_super *osb = OCFS2_SB(sb);
+ int type = dquot->dq_type;
+ struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
+ struct inode *gqinode = info->dqi_gqinode;
+ int need_alloc = ocfs2_global_qinit_alloc(sb, type);
+ handle_t *handle;
- mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
- /* We need an exclusive lock, because we're going to update use count
- * and instantiate possibly new dquot structure */
- status = ocfs2_lock_global_qf(oinfo, 1);
+ mlog_entry("id=%u, type=%d", dquot->dq_id, type);
+ mutex_lock(&dquot->dq_lock);
+ /*
+ * We need an exclusive lock, because we're going to update use count
+ * and instantiate possibly new dquot structure
+ */
+ status = ocfs2_lock_global_qf(info, 1);
if (status < 0)
goto out;
- status = dquot_acquire(dquot);
- ocfs2_unlock_global_qf(oinfo, 1);
+ if (!test_bit(DQ_READ_B, &dquot->dq_flags)) {
+ status = ocfs2_qinfo_lock(info, 0);
+ if (status < 0)
+ goto out_dq;
+ status = qtree_read_dquot(&info->dqi_gi, dquot);
+ ocfs2_qinfo_unlock(info, 0);
+ if (status < 0)
+ goto out_dq;
+ }
+ set_bit(DQ_READ_B, &dquot->dq_flags);
+
+ OCFS2_DQUOT(dquot)->dq_use_count++;
+ OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
+ OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
+ if (!dquot->dq_off) { /* No real quota entry? */
+ ex = 1;
+ /*
+ * Add blocks to quota file before we start a transaction since
+ * locking allocators ranks above a transaction start
+ */
+ WARN_ON(journal_current_handle());
+ status = ocfs2_extend_no_holes(gqinode,
+ gqinode->i_size + (need_alloc << sb->s_blocksize_bits),
+ gqinode->i_size);
+ if (status < 0)
+ goto out_dq;
+ }
+
+ handle = ocfs2_start_trans(osb,
+ ocfs2_calc_global_qinit_credits(sb, type));
+ if (IS_ERR(handle)) {
+ status = PTR_ERR(handle);
+ goto out_dq;
+ }
+ status = ocfs2_qinfo_lock(info, ex);
+ if (status < 0)
+ goto out_trans;
+ status = qtree_write_dquot(&info->dqi_gi, dquot);
+ if (ex && info_dirty(sb_dqinfo(sb, type))) {
+ err = __ocfs2_global_write_info(sb, type);
+ if (!status)
+ status = err;
+ }
+ ocfs2_qinfo_unlock(info, ex);
+out_trans:
+ ocfs2_commit_trans(osb, handle);
+out_dq:
+ ocfs2_unlock_global_qf(info, 1);
+ if (status < 0)
+ goto out;
+
+ status = ocfs2_create_local_dquot(dquot);
+ if (status < 0)
+ goto out;
+ set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
out:
+ mutex_unlock(&dquot->dq_lock);
mlog_exit(status);
return status;
}
@@ -770,7 +832,6 @@ static int ocfs2_mark_dquot_dirty(struct dquot *dquot)
struct ocfs2_super *osb = OCFS2_SB(sb);
mlog_entry("id=%u, type=%d", dquot->dq_id, type);
- dquot_mark_dquot_dirty(dquot);
/* In case user set some limits, sync dquot immediately to global
* quota file so that information propagates quicker */
@@ -793,14 +854,16 @@ static int ocfs2_mark_dquot_dirty(struct dquot *dquot)
mlog_errno(status);
goto out_ilock;
}
+ mutex_lock(&sb_dqopt(sb)->dqio_mutex);
status = ocfs2_sync_dquot(dquot);
if (status < 0) {
mlog_errno(status);
- goto out_trans;
+ goto out_dlock;
}
/* Now write updated local dquot structure */
- status = dquot_commit(dquot);
-out_trans:
+ status = ocfs2_local_write_dquot(dquot);
+out_dlock:
+ mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
ocfs2_commit_trans(osb, handle);
out_ilock:
ocfs2_unlock_global_qf(oinfo, 1);
@@ -852,7 +915,7 @@ static void ocfs2_destroy_dquot(struct dquot *dquot)
}
const struct dquot_operations ocfs2_quota_operations = {
- .write_dquot = ocfs2_write_dquot,
+ /* We never make dquot dirty so .write_dquot is never called */
.acquire_dquot = ocfs2_acquire_dquot,
.release_dquot = ocfs2_release_dquot,
.mark_dirty = ocfs2_mark_dquot_dirty,
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index 9ad4930..8bd70d4 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -22,6 +22,7 @@
#include "dlmglue.h"
#include "quota.h"
#include "uptodate.h"
+#include "super.h"
/* Number of local quota structures per block */
static inline unsigned int ol_quota_entries_per_block(struct super_block *sb)
@@ -119,12 +120,8 @@ static int ocfs2_modify_bh(struct inode *inode, struct buffer_head *bh,
lock_buffer(bh);
modify(bh, private);
unlock_buffer(bh);
- status = ocfs2_journal_dirty(handle, bh);
- if (status < 0) {
- mlog_errno(status);
- ocfs2_commit_trans(OCFS2_SB(sb), handle);
- return status;
- }
+ ocfs2_journal_dirty(handle, bh);
+
status = ocfs2_commit_trans(OCFS2_SB(sb), handle);
if (status < 0) {
mlog_errno(status);
@@ -133,6 +130,39 @@ static int ocfs2_modify_bh(struct inode *inode, struct buffer_head *bh,
return 0;
}
+/*
+ * Read quota block from a given logical offset.
+ *
+ * This function acquires ip_alloc_sem and thus it must not be called with a
+ * transaction started.
+ */
+static int ocfs2_read_quota_block(struct inode *inode, u64 v_block,
+ struct buffer_head **bh)
+{
+ int rc = 0;
+ struct buffer_head *tmp = *bh;
+
+ if (i_size_read(inode) >> inode->i_sb->s_blocksize_bits <= v_block) {
+ ocfs2_error(inode->i_sb,
+ "Quota file %llu is probably corrupted! Requested "
+ "to read block %Lu but file has size only %Lu\n",
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ (unsigned long long)v_block,
+ (unsigned long long)i_size_read(inode));
+ return -EIO;
+ }
+ rc = ocfs2_read_virt_blocks(inode, v_block, 1, &tmp, 0,
+ ocfs2_validate_quota_block);
+ if (rc)
+ mlog_errno(rc);
+
+ /* If ocfs2_read_virt_blocks() got us a new bh, pass it up. */
+ if (!rc && !*bh)
+ *bh = tmp;
+
+ return rc;
+}
+
/* Check whether we understand format of quota files */
static int ocfs2_local_check_quota_file(struct super_block *sb, int type)
{
@@ -523,9 +553,7 @@ static int ocfs2_recover_local_quota_file(struct inode *lqinode,
ocfs2_clear_bit(bit, dchunk->dqc_bitmap);
le32_add_cpu(&dchunk->dqc_free, 1);
unlock_buffer(qbh);
- status = ocfs2_journal_dirty(handle, qbh);
- if (status < 0)
- mlog_errno(status);
+ ocfs2_journal_dirty(handle, qbh);
out_commit:
mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
ocfs2_commit_trans(OCFS2_SB(sb), handle);
@@ -631,9 +659,7 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb,
lock_buffer(bh);
ldinfo->dqi_flags = cpu_to_le32(flags | OLQF_CLEAN);
unlock_buffer(bh);
- status = ocfs2_journal_dirty(handle, bh);
- if (status < 0)
- mlog_errno(status);
+ ocfs2_journal_dirty(handle, bh);
out_trans:
ocfs2_commit_trans(osb, handle);
out_bh:
@@ -679,7 +705,7 @@ static int ocfs2_local_read_info(struct super_block *sb, int type)
INIT_LIST_HEAD(&oinfo->dqi_chunk);
oinfo->dqi_rec = NULL;
oinfo->dqi_lqi_bh = NULL;
- oinfo->dqi_ibh = NULL;
+ oinfo->dqi_libh = NULL;
status = ocfs2_global_read_info(sb, type);
if (status < 0)
@@ -705,7 +731,7 @@ static int ocfs2_local_read_info(struct super_block *sb, int type)
info->dqi_flags = le32_to_cpu(ldinfo->dqi_flags);
oinfo->dqi_chunks = le32_to_cpu(ldinfo->dqi_chunks);
oinfo->dqi_blocks = le32_to_cpu(ldinfo->dqi_blocks);
- oinfo->dqi_ibh = bh;
+ oinfo->dqi_libh = bh;
/* We crashed when using local quota file? */
if (!(info->dqi_flags & OLQF_CLEAN)) {
@@ -767,7 +793,7 @@ static int ocfs2_local_write_info(struct super_block *sb, int type)
{
struct mem_dqinfo *info = sb_dqinfo(sb, type);
struct buffer_head *bh = ((struct ocfs2_mem_dqinfo *)info->dqi_priv)
- ->dqi_ibh;
+ ->dqi_libh;
int status;
status = ocfs2_modify_bh(sb_dqopt(sb)->files[type], bh, olq_update_info,
@@ -790,10 +816,6 @@ static int ocfs2_local_free_info(struct super_block *sb, int type)
int mark_clean = 1, len;
int status;
- /* At this point we know there are no more dquots and thus
- * even if there's some sync in the pdflush queue, it won't
- * find any dquots and return without doing anything */
- cancel_delayed_work_sync(&oinfo->dqi_sync_work);
iput(oinfo->dqi_gqinode);
ocfs2_simple_drop_lockres(OCFS2_SB(sb), &oinfo->dqi_gqlock);
ocfs2_lock_res_free(&oinfo->dqi_gqlock);
@@ -828,7 +850,7 @@ static int ocfs2_local_free_info(struct super_block *sb, int type)
/* Mark local file as clean */
info->dqi_flags |= OLQF_CLEAN;
status = ocfs2_modify_bh(sb_dqopt(sb)->files[type],
- oinfo->dqi_ibh,
+ oinfo->dqi_libh,
olq_update_info,
info);
if (status < 0) {
@@ -838,7 +860,7 @@ static int ocfs2_local_free_info(struct super_block *sb, int type)
out:
ocfs2_inode_unlock(sb_dqopt(sb)->files[type], 1);
- brelse(oinfo->dqi_ibh);
+ brelse(oinfo->dqi_libh);
brelse(oinfo->dqi_lqi_bh);
kfree(oinfo);
return 0;
@@ -866,22 +888,21 @@ static void olq_set_dquot(struct buffer_head *bh, void *private)
}
/* Write dquot to local quota file */
-static int ocfs2_local_write_dquot(struct dquot *dquot)
+int ocfs2_local_write_dquot(struct dquot *dquot)
{
struct super_block *sb = dquot->dq_sb;
struct ocfs2_dquot *od = OCFS2_DQUOT(dquot);
- struct buffer_head *bh = NULL;
+ struct buffer_head *bh;
+ struct inode *lqinode = sb_dqopt(sb)->files[dquot->dq_type];
int status;
- status = ocfs2_read_quota_block(sb_dqopt(sb)->files[dquot->dq_type],
- ol_dqblk_file_block(sb, od->dq_local_off),
- &bh);
+ status = ocfs2_read_quota_phys_block(lqinode, od->dq_local_phys_blk,
+ &bh);
if (status) {
mlog_errno(status);
goto out;
}
- status = ocfs2_modify_bh(sb_dqopt(sb)->files[dquot->dq_type], bh,
- olq_set_dquot, od);
+ status = ocfs2_modify_bh(lqinode, bh, olq_set_dquot, od);
if (status < 0) {
mlog_errno(status);
goto out;
@@ -981,10 +1002,8 @@ static struct ocfs2_quota_chunk *ocfs2_local_quota_add_chunk(
}
/* Initialize chunk header */
- down_read(&OCFS2_I(lqinode)->ip_alloc_sem);
status = ocfs2_extent_map_get_blocks(lqinode, oinfo->dqi_blocks,
&p_blkno, NULL, NULL);
- up_read(&OCFS2_I(lqinode)->ip_alloc_sem);
if (status < 0) {
mlog_errno(status);
goto out_trans;
@@ -1009,17 +1028,11 @@ static struct ocfs2_quota_chunk *ocfs2_local_quota_add_chunk(
sb->s_blocksize - sizeof(struct ocfs2_local_disk_chunk) -
OCFS2_QBLK_RESERVED_SPACE);
unlock_buffer(bh);
- status = ocfs2_journal_dirty(handle, bh);
- if (status < 0) {
- mlog_errno(status);
- goto out_trans;
- }
+ ocfs2_journal_dirty(handle, bh);
/* Initialize new block with structures */
- down_read(&OCFS2_I(lqinode)->ip_alloc_sem);
status = ocfs2_extent_map_get_blocks(lqinode, oinfo->dqi_blocks + 1,
&p_blkno, NULL, NULL);
- up_read(&OCFS2_I(lqinode)->ip_alloc_sem);
if (status < 0) {
mlog_errno(status);
goto out_trans;
@@ -1040,11 +1053,7 @@ static struct ocfs2_quota_chunk *ocfs2_local_quota_add_chunk(
lock_buffer(dbh);
memset(dbh->b_data, 0, sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE);
unlock_buffer(dbh);
- status = ocfs2_journal_dirty(handle, dbh);
- if (status < 0) {
- mlog_errno(status);
- goto out_trans;
- }
+ ocfs2_journal_dirty(handle, dbh);
/* Update local quotafile info */
oinfo->dqi_blocks += 2;
@@ -1120,10 +1129,8 @@ static struct ocfs2_quota_chunk *ocfs2_extend_local_quota_file(
}
/* Get buffer from the just added block */
- down_read(&OCFS2_I(lqinode)->ip_alloc_sem);
status = ocfs2_extent_map_get_blocks(lqinode, oinfo->dqi_blocks,
&p_blkno, NULL, NULL);
- up_read(&OCFS2_I(lqinode)->ip_alloc_sem);
if (status < 0) {
mlog_errno(status);
goto out;
@@ -1155,11 +1162,8 @@ static struct ocfs2_quota_chunk *ocfs2_extend_local_quota_file(
lock_buffer(bh);
memset(bh->b_data, 0, sb->s_blocksize);
unlock_buffer(bh);
- status = ocfs2_journal_dirty(handle, bh);
- if (status < 0) {
- mlog_errno(status);
- goto out_trans;
- }
+ ocfs2_journal_dirty(handle, bh);
+
/* Update chunk header */
status = ocfs2_journal_access_dq(handle, INODE_CACHE(lqinode),
chunk->qc_headerbh,
@@ -1173,11 +1177,8 @@ static struct ocfs2_quota_chunk *ocfs2_extend_local_quota_file(
lock_buffer(chunk->qc_headerbh);
le32_add_cpu(&dchunk->dqc_free, ol_quota_entries_per_block(sb));
unlock_buffer(chunk->qc_headerbh);
- status = ocfs2_journal_dirty(handle, chunk->qc_headerbh);
- if (status < 0) {
- mlog_errno(status);
- goto out_trans;
- }
+ ocfs2_journal_dirty(handle, chunk->qc_headerbh);
+
/* Update file header */
oinfo->dqi_blocks++;
status = ocfs2_local_write_info(sb, type);
@@ -1210,7 +1211,7 @@ static void olq_alloc_dquot(struct buffer_head *bh, void *private)
}
/* Create dquot in the local file for given id */
-static int ocfs2_create_local_dquot(struct dquot *dquot)
+int ocfs2_create_local_dquot(struct dquot *dquot)
{
struct super_block *sb = dquot->dq_sb;
int type = dquot->dq_type;
@@ -1219,17 +1220,27 @@ static int ocfs2_create_local_dquot(struct dquot *dquot)
struct ocfs2_dquot *od = OCFS2_DQUOT(dquot);
int offset;
int status;
+ u64 pcount;
+ down_write(&OCFS2_I(lqinode)->ip_alloc_sem);
chunk = ocfs2_find_free_entry(sb, type, &offset);
if (!chunk) {
chunk = ocfs2_extend_local_quota_file(sb, type, &offset);
- if (IS_ERR(chunk))
- return PTR_ERR(chunk);
+ if (IS_ERR(chunk)) {
+ status = PTR_ERR(chunk);
+ goto out;
+ }
} else if (IS_ERR(chunk)) {
- return PTR_ERR(chunk);
+ status = PTR_ERR(chunk);
+ goto out;
}
od->dq_local_off = ol_dqblk_off(sb, chunk->qc_num, offset);
od->dq_chunk = chunk;
+ status = ocfs2_extent_map_get_blocks(lqinode,
+ ol_dqblk_block(sb, chunk->qc_num, offset),
+ &od->dq_local_phys_blk,
+ &pcount,
+ NULL);
/* Initialize dquot structure on disk */
status = ocfs2_local_write_dquot(dquot);
@@ -1246,39 +1257,15 @@ static int ocfs2_create_local_dquot(struct dquot *dquot)
goto out;
}
out:
+ up_write(&OCFS2_I(lqinode)->ip_alloc_sem);
return status;
}
-/* Create entry in local file for dquot, load data from the global file */
-static int ocfs2_local_read_dquot(struct dquot *dquot)
-{
- int status;
-
- mlog_entry("id=%u, type=%d\n", dquot->dq_id, dquot->dq_type);
-
- status = ocfs2_global_read_dquot(dquot);
- if (status < 0) {
- mlog_errno(status);
- goto out_err;
- }
-
- /* Now create entry in the local quota file */
- status = ocfs2_create_local_dquot(dquot);
- if (status < 0) {
- mlog_errno(status);
- goto out_err;
- }
- mlog_exit(0);
- return 0;
-out_err:
- mlog_exit(status);
- return status;
-}
-
-/* Release dquot structure from local quota file. ocfs2_release_dquot() has
- * already started a transaction and obtained exclusive lock for global
- * quota file. */
-static int ocfs2_local_release_dquot(struct dquot *dquot)
+/*
+ * Release dquot structure from local quota file. ocfs2_release_dquot() has
+ * already started a transaction and written all changes to global quota file
+ */
+int ocfs2_local_release_dquot(handle_t *handle, struct dquot *dquot)
{
int status;
int type = dquot->dq_type;
@@ -1286,15 +1273,6 @@ static int ocfs2_local_release_dquot(struct dquot *dquot)
struct super_block *sb = dquot->dq_sb;
struct ocfs2_local_disk_chunk *dchunk;
int offset;
- handle_t *handle = journal_current_handle();
-
- BUG_ON(!handle);
- /* First write all local changes to global file */
- status = ocfs2_global_release_dquot(dquot);
- if (status < 0) {
- mlog_errno(status);
- goto out;
- }
status = ocfs2_journal_access_dq(handle,
INODE_CACHE(sb_dqopt(sb)->files[type]),
@@ -1312,12 +1290,8 @@ static int ocfs2_local_release_dquot(struct dquot *dquot)
ocfs2_clear_bit(offset, dchunk->dqc_bitmap);
le32_add_cpu(&dchunk->dqc_free, 1);
unlock_buffer(od->dq_chunk->qc_headerbh);
- status = ocfs2_journal_dirty(handle, od->dq_chunk->qc_headerbh);
- if (status < 0) {
- mlog_errno(status);
- goto out;
- }
- status = 0;
+ ocfs2_journal_dirty(handle, od->dq_chunk->qc_headerbh);
+
out:
/* Clear the read bit so that next time someone uses this
* dquot he reads fresh info from disk and allocates local
@@ -1331,9 +1305,6 @@ static const struct quota_format_ops ocfs2_format_ops = {
.read_file_info = ocfs2_local_read_info,
.write_file_info = ocfs2_global_write_info,
.free_file_info = ocfs2_local_free_info,
- .read_dqblk = ocfs2_local_read_dquot,
- .commit_dqblk = ocfs2_local_write_dquot,
- .release_dqblk = ocfs2_local_release_dquot,
};
struct quota_format_type ocfs2_quota_format = {
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index bd96f6c..4793f36 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -570,7 +570,7 @@ static int ocfs2_create_refcount_tree(struct inode *inode,
struct ocfs2_refcount_tree *new_tree = NULL, *tree = NULL;
u16 suballoc_bit_start;
u32 num_got;
- u64 first_blkno;
+ u64 suballoc_loc, first_blkno;
BUG_ON(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL);
@@ -596,7 +596,7 @@ static int ocfs2_create_refcount_tree(struct inode *inode,
goto out_commit;
}
- ret = ocfs2_claim_metadata(osb, handle, meta_ac, 1,
+ ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc,
&suballoc_bit_start, &num_got,
&first_blkno);
if (ret) {
@@ -626,6 +626,7 @@ static int ocfs2_create_refcount_tree(struct inode *inode,
memset(rb, 0, inode->i_sb->s_blocksize);
strcpy((void *)rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE);
rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
+ rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc);
rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
rb->rf_fs_generation = cpu_to_le32(osb->fs_generation);
rb->rf_blkno = cpu_to_le64(first_blkno);
@@ -790,7 +791,10 @@ int ocfs2_remove_refcount_tree(struct inode *inode, struct buffer_head *di_bh)
if (le32_to_cpu(rb->rf_count) == 1) {
blk = le64_to_cpu(rb->rf_blkno);
bit = le16_to_cpu(rb->rf_suballoc_bit);
- bg_blkno = ocfs2_which_suballoc_group(blk, bit);
+ if (rb->rf_suballoc_loc)
+ bg_blkno = le64_to_cpu(rb->rf_suballoc_loc);
+ else
+ bg_blkno = ocfs2_which_suballoc_group(blk, bit);
alloc_inode = ocfs2_get_system_file_inode(osb,
EXTENT_ALLOC_SYSTEM_INODE,
@@ -1268,9 +1272,7 @@ static int ocfs2_change_refcount_rec(handle_t *handle,
} else if (merge)
ocfs2_refcount_rec_merge(rb, index);
- ret = ocfs2_journal_dirty(handle, ref_leaf_bh);
- if (ret)
- mlog_errno(ret);
+ ocfs2_journal_dirty(handle, ref_leaf_bh);
out:
return ret;
}
@@ -1284,7 +1286,7 @@ static int ocfs2_expand_inline_ref_root(handle_t *handle,
int ret;
u16 suballoc_bit_start;
u32 num_got;
- u64 blkno;
+ u64 suballoc_loc, blkno;
struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
struct buffer_head *new_bh = NULL;
struct ocfs2_refcount_block *new_rb;
@@ -1298,7 +1300,7 @@ static int ocfs2_expand_inline_ref_root(handle_t *handle,
goto out;
}
- ret = ocfs2_claim_metadata(OCFS2_SB(sb), handle, meta_ac, 1,
+ ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc,
&suballoc_bit_start, &num_got,
&blkno);
if (ret) {
@@ -1330,6 +1332,7 @@ static int ocfs2_expand_inline_ref_root(handle_t *handle,
new_rb = (struct ocfs2_refcount_block *)new_bh->b_data;
new_rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
+ new_rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc);
new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
new_rb->rf_blkno = cpu_to_le64(blkno);
new_rb->rf_cpos = cpu_to_le32(0);
@@ -1524,7 +1527,7 @@ static int ocfs2_new_leaf_refcount_block(handle_t *handle,
int ret;
u16 suballoc_bit_start;
u32 num_got, new_cpos;
- u64 blkno;
+ u64 suballoc_loc, blkno;
struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
struct ocfs2_refcount_block *root_rb =
(struct ocfs2_refcount_block *)ref_root_bh->b_data;
@@ -1548,7 +1551,7 @@ static int ocfs2_new_leaf_refcount_block(handle_t *handle,
goto out;
}
- ret = ocfs2_claim_metadata(OCFS2_SB(sb), handle, meta_ac, 1,
+ ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc,
&suballoc_bit_start, &num_got,
&blkno);
if (ret) {
@@ -1576,6 +1579,7 @@ static int ocfs2_new_leaf_refcount_block(handle_t *handle,
memset(new_rb, 0, sb->s_blocksize);
strcpy((void *)new_rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE);
new_rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
+ new_rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc);
new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
new_rb->rf_fs_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation);
new_rb->rf_blkno = cpu_to_le64(blkno);
@@ -1694,7 +1698,7 @@ static int ocfs2_adjust_refcount_rec(handle_t *handle,
* 2 more credits, one for the leaf refcount block, one for
* the extent block contains the extent rec.
*/
- ret = ocfs2_extend_trans(handle, handle->h_buffer_credits + 2);
+ ret = ocfs2_extend_trans(handle, 2);
if (ret < 0) {
mlog_errno(ret);
goto out;
@@ -1802,11 +1806,7 @@ static int ocfs2_insert_refcount_rec(handle_t *handle,
if (merge)
ocfs2_refcount_rec_merge(rb, index);
- ret = ocfs2_journal_dirty(handle, ref_leaf_bh);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
+ ocfs2_journal_dirty(handle, ref_leaf_bh);
if (index == 0) {
ret = ocfs2_adjust_refcount_rec(handle, ci,
@@ -1977,9 +1977,7 @@ static int ocfs2_split_refcount_rec(handle_t *handle,
ocfs2_refcount_rec_merge(rb, index);
}
- ret = ocfs2_journal_dirty(handle, ref_leaf_bh);
- if (ret)
- mlog_errno(ret);
+ ocfs2_journal_dirty(handle, ref_leaf_bh);
out:
brelse(new_bh);
@@ -2112,6 +2110,7 @@ static int ocfs2_remove_refcount_extent(handle_t *handle,
*/
ret = ocfs2_cache_block_dealloc(dealloc, EXTENT_ALLOC_SYSTEM_INODE,
le16_to_cpu(rb->rf_suballoc_slot),
+ le64_to_cpu(rb->rf_suballoc_loc),
le64_to_cpu(rb->rf_blkno),
le16_to_cpu(rb->rf_suballoc_bit));
if (ret) {
@@ -2516,20 +2515,19 @@ out:
*
* Normally the refcount blocks store these refcount should be
* contiguous also, so that we can get the number easily.
- * As for meta_ac, we will at most add split 2 refcount record and
- * 2 more refcount block, so just check it in a rough way.
+ * We will at most add split 2 refcount records and 2 more
+ * refcount blocks, so just check it in a rough way.
*
* Caller must hold refcount tree lock.
*/
int ocfs2_prepare_refcount_change_for_del(struct inode *inode,
- struct buffer_head *di_bh,
+ u64 refcount_loc,
u64 phys_blkno,
u32 clusters,
int *credits,
- struct ocfs2_alloc_context **meta_ac)
+ int *ref_blocks)
{
- int ret, ref_blocks = 0;
- struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
+ int ret;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct buffer_head *ref_root_bh = NULL;
struct ocfs2_refcount_tree *tree;
@@ -2546,14 +2544,13 @@ int ocfs2_prepare_refcount_change_for_del(struct inode *inode,
BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb),
- le64_to_cpu(di->i_refcount_loc), &tree);
+ refcount_loc, &tree);
if (ret) {
mlog_errno(ret);
goto out;
}
- ret = ocfs2_read_refcount_block(&tree->rf_ci,
- le64_to_cpu(di->i_refcount_loc),
+ ret = ocfs2_read_refcount_block(&tree->rf_ci, refcount_loc,
&ref_root_bh);
if (ret) {
mlog_errno(ret);
@@ -2564,21 +2561,14 @@ int ocfs2_prepare_refcount_change_for_del(struct inode *inode,
&tree->rf_ci,
ref_root_bh,
start_cpos, clusters,
- &ref_blocks, credits);
+ ref_blocks, credits);
if (ret) {
mlog_errno(ret);
goto out;
}
- mlog(0, "reserve new metadata %d, credits = %d\n",
- ref_blocks, *credits);
-
- if (ref_blocks) {
- ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(inode->i_sb),
- ref_blocks, meta_ac);
- if (ret)
- mlog_errno(ret);
- }
+ mlog(0, "reserve new metadata %d blocks, credits = %d\n",
+ *ref_blocks, *credits);
out:
brelse(ref_root_bh);
@@ -3040,11 +3030,7 @@ static int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
}
memcpy(new_bh->b_data, old_bh->b_data, sb->s_blocksize);
- ret = ocfs2_journal_dirty(handle, new_bh);
- if (ret) {
- mlog_errno(ret);
- break;
- }
+ ocfs2_journal_dirty(handle, new_bh);
brelse(new_bh);
brelse(old_bh);
@@ -3282,7 +3268,7 @@ static int ocfs2_make_clusters_writable(struct super_block *sb,
} else {
delete = 1;
- ret = __ocfs2_claim_clusters(osb, handle,
+ ret = __ocfs2_claim_clusters(handle,
context->data_ac,
1, set_len,
&new_bit, &new_len);
@@ -4083,6 +4069,9 @@ static int ocfs2_complete_reflink(struct inode *s_inode,
di->i_attr = s_di->i_attr;
if (preserve) {
+ t_inode->i_uid = s_inode->i_uid;
+ t_inode->i_gid = s_inode->i_gid;
+ t_inode->i_mode = s_inode->i_mode;
di->i_uid = s_di->i_uid;
di->i_gid = s_di->i_gid;
di->i_mode = s_di->i_mode;
diff --git a/fs/ocfs2/refcounttree.h b/fs/ocfs2/refcounttree.h
index c1d19b1..9983ba1 100644
--- a/fs/ocfs2/refcounttree.h
+++ b/fs/ocfs2/refcounttree.h
@@ -47,11 +47,11 @@ int ocfs2_decrease_refcount(struct inode *inode,
struct ocfs2_cached_dealloc_ctxt *dealloc,
int delete);
int ocfs2_prepare_refcount_change_for_del(struct inode *inode,
- struct buffer_head *di_bh,
+ u64 refcount_loc,
u64 phys_blkno,
u32 clusters,
int *credits,
- struct ocfs2_alloc_context **meta_ac);
+ int *ref_blocks);
int ocfs2_refcount_cow(struct inode *inode, struct buffer_head *di_bh,
u32 cpos, u32 write_len, u32 max_cpos);
diff --git a/fs/ocfs2/reservations.c b/fs/ocfs2/reservations.c
new file mode 100644
index 0000000..4065002
--- /dev/null
+++ b/fs/ocfs2/reservations.c
@@ -0,0 +1,847 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * reservations.c
+ *
+ * Allocation reservations implementation
+ *
+ * Some code borrowed from fs/ext3/balloc.c and is:
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ * The rest is copyright (C) 2010 Novell. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/bitops.h>
+#include <linux/list.h>
+
+#define MLOG_MASK_PREFIX ML_RESERVATIONS
+#include <cluster/masklog.h>
+
+#include "ocfs2.h"
+
+#ifdef CONFIG_OCFS2_DEBUG_FS
+#define OCFS2_CHECK_RESERVATIONS
+#endif
+
+DEFINE_SPINLOCK(resv_lock);
+
+#define OCFS2_MIN_RESV_WINDOW_BITS 8
+#define OCFS2_MAX_RESV_WINDOW_BITS 1024
+
+int ocfs2_dir_resv_allowed(struct ocfs2_super *osb)
+{
+ return (osb->osb_resv_level && osb->osb_dir_resv_level);
+}
+
+static unsigned int ocfs2_resv_window_bits(struct ocfs2_reservation_map *resmap,
+ struct ocfs2_alloc_reservation *resv)
+{
+ struct ocfs2_super *osb = resmap->m_osb;
+ unsigned int bits;
+
+ if (!(resv->r_flags & OCFS2_RESV_FLAG_DIR)) {
+ /* 8, 16, 32, 64, 128, 256, 512, 1024 */
+ bits = 4 << osb->osb_resv_level;
+ } else {
+ bits = 4 << osb->osb_dir_resv_level;
+ }
+ return bits;
+}
+
+static inline unsigned int ocfs2_resv_end(struct ocfs2_alloc_reservation *resv)
+{
+ if (resv->r_len)
+ return resv->r_start + resv->r_len - 1;
+ return resv->r_start;
+}
+
+static inline int ocfs2_resv_empty(struct ocfs2_alloc_reservation *resv)
+{
+ return !!(resv->r_len == 0);
+}
+
+static inline int ocfs2_resmap_disabled(struct ocfs2_reservation_map *resmap)
+{
+ if (resmap->m_osb->osb_resv_level == 0)
+ return 1;
+ return 0;
+}
+
+static void ocfs2_dump_resv(struct ocfs2_reservation_map *resmap)
+{
+ struct ocfs2_super *osb = resmap->m_osb;
+ struct rb_node *node;
+ struct ocfs2_alloc_reservation *resv;
+ int i = 0;
+
+ mlog(ML_NOTICE, "Dumping resmap for device %s. Bitmap length: %u\n",
+ osb->dev_str, resmap->m_bitmap_len);
+
+ node = rb_first(&resmap->m_reservations);
+ while (node) {
+ resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node);
+
+ mlog(ML_NOTICE, "start: %u\tend: %u\tlen: %u\tlast_start: %u"
+ "\tlast_len: %u\n", resv->r_start,
+ ocfs2_resv_end(resv), resv->r_len, resv->r_last_start,
+ resv->r_last_len);
+
+ node = rb_next(node);
+ i++;
+ }
+
+ mlog(ML_NOTICE, "%d reservations found. LRU follows\n", i);
+
+ i = 0;
+ list_for_each_entry(resv, &resmap->m_lru, r_lru) {
+ mlog(ML_NOTICE, "LRU(%d) start: %u\tend: %u\tlen: %u\t"
+ "last_start: %u\tlast_len: %u\n", i, resv->r_start,
+ ocfs2_resv_end(resv), resv->r_len, resv->r_last_start,
+ resv->r_last_len);
+
+ i++;
+ }
+}
+
+#ifdef OCFS2_CHECK_RESERVATIONS
+static int ocfs2_validate_resmap_bits(struct ocfs2_reservation_map *resmap,
+ int i,
+ struct ocfs2_alloc_reservation *resv)
+{
+ char *disk_bitmap = resmap->m_disk_bitmap;
+ unsigned int start = resv->r_start;
+ unsigned int end = ocfs2_resv_end(resv);
+
+ while (start <= end) {
+ if (ocfs2_test_bit(start, disk_bitmap)) {
+ mlog(ML_ERROR,
+ "reservation %d covers an allocated area "
+ "starting at bit %u!\n", i, start);
+ return 1;
+ }
+
+ start++;
+ }
+ return 0;
+}
+
+static void ocfs2_check_resmap(struct ocfs2_reservation_map *resmap)
+{
+ unsigned int off = 0;
+ int i = 0;
+ struct rb_node *node;
+ struct ocfs2_alloc_reservation *resv;
+
+ node = rb_first(&resmap->m_reservations);
+ while (node) {
+ resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node);
+
+ if (i > 0 && resv->r_start <= off) {
+ mlog(ML_ERROR, "reservation %d has bad start off!\n",
+ i);
+ goto bad;
+ }
+
+ if (resv->r_len == 0) {
+ mlog(ML_ERROR, "reservation %d has no length!\n",
+ i);
+ goto bad;
+ }
+
+ if (resv->r_start > ocfs2_resv_end(resv)) {
+ mlog(ML_ERROR, "reservation %d has invalid range!\n",
+ i);
+ goto bad;
+ }
+
+ if (ocfs2_resv_end(resv) >= resmap->m_bitmap_len) {
+ mlog(ML_ERROR, "reservation %d extends past bitmap!\n",
+ i);
+ goto bad;
+ }
+
+ if (ocfs2_validate_resmap_bits(resmap, i, resv))
+ goto bad;
+
+ off = ocfs2_resv_end(resv);
+ node = rb_next(node);
+
+ i++;
+ }
+ return;
+
+bad:
+ ocfs2_dump_resv(resmap);
+ BUG();
+}
+#else
+static inline void ocfs2_check_resmap(struct ocfs2_reservation_map *resmap)
+{
+
+}
+#endif
+
+void ocfs2_resv_init_once(struct ocfs2_alloc_reservation *resv)
+{
+ memset(resv, 0, sizeof(*resv));
+ INIT_LIST_HEAD(&resv->r_lru);
+}
+
+void ocfs2_resv_set_type(struct ocfs2_alloc_reservation *resv,
+ unsigned int flags)
+{
+ BUG_ON(flags & ~OCFS2_RESV_TYPES);
+
+ resv->r_flags |= flags;
+}
+
+int ocfs2_resmap_init(struct ocfs2_super *osb,
+ struct ocfs2_reservation_map *resmap)
+{
+ memset(resmap, 0, sizeof(*resmap));
+
+ resmap->m_osb = osb;
+ resmap->m_reservations = RB_ROOT;
+ /* m_bitmap_len is initialized to zero by the above memset. */
+ INIT_LIST_HEAD(&resmap->m_lru);
+
+ return 0;
+}
+
+static void ocfs2_resv_mark_lru(struct ocfs2_reservation_map *resmap,
+ struct ocfs2_alloc_reservation *resv)
+{
+ assert_spin_locked(&resv_lock);
+
+ if (!list_empty(&resv->r_lru))
+ list_del_init(&resv->r_lru);
+
+ list_add_tail(&resv->r_lru, &resmap->m_lru);
+}
+
+static void __ocfs2_resv_trunc(struct ocfs2_alloc_reservation *resv)
+{
+ resv->r_len = 0;
+ resv->r_start = 0;
+}
+
+static void ocfs2_resv_remove(struct ocfs2_reservation_map *resmap,
+ struct ocfs2_alloc_reservation *resv)
+{
+ if (resv->r_flags & OCFS2_RESV_FLAG_INUSE) {
+ list_del_init(&resv->r_lru);
+ rb_erase(&resv->r_node, &resmap->m_reservations);
+ resv->r_flags &= ~OCFS2_RESV_FLAG_INUSE;
+ }
+}
+
+static void __ocfs2_resv_discard(struct ocfs2_reservation_map *resmap,
+ struct ocfs2_alloc_reservation *resv)
+{
+ assert_spin_locked(&resv_lock);
+
+ __ocfs2_resv_trunc(resv);
+ /*
+ * last_len and last_start no longer make sense if
+ * we're changing the range of our allocations.
+ */
+ resv->r_last_len = resv->r_last_start = 0;
+
+ ocfs2_resv_remove(resmap, resv);
+}
+
+/* does nothing if 'resv' is null */
+void ocfs2_resv_discard(struct ocfs2_reservation_map *resmap,
+ struct ocfs2_alloc_reservation *resv)
+{
+ if (resv) {
+ spin_lock(&resv_lock);
+ __ocfs2_resv_discard(resmap, resv);
+ spin_unlock(&resv_lock);
+ }
+}
+
+static void ocfs2_resmap_clear_all_resv(struct ocfs2_reservation_map *resmap)
+{
+ struct rb_node *node;
+ struct ocfs2_alloc_reservation *resv;
+
+ assert_spin_locked(&resv_lock);
+
+ while ((node = rb_last(&resmap->m_reservations)) != NULL) {
+ resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node);
+
+ __ocfs2_resv_discard(resmap, resv);
+ }
+}
+
+void ocfs2_resmap_restart(struct ocfs2_reservation_map *resmap,
+ unsigned int clen, char *disk_bitmap)
+{
+ if (ocfs2_resmap_disabled(resmap))
+ return;
+
+ spin_lock(&resv_lock);
+
+ ocfs2_resmap_clear_all_resv(resmap);
+ resmap->m_bitmap_len = clen;
+ resmap->m_disk_bitmap = disk_bitmap;
+
+ spin_unlock(&resv_lock);
+}
+
+void ocfs2_resmap_uninit(struct ocfs2_reservation_map *resmap)
+{
+ /* Does nothing for now. Keep this around for API symmetry */
+}
+
+static void ocfs2_resv_insert(struct ocfs2_reservation_map *resmap,
+ struct ocfs2_alloc_reservation *new)
+{
+ struct rb_root *root = &resmap->m_reservations;
+ struct rb_node *parent = NULL;
+ struct rb_node **p = &root->rb_node;
+ struct ocfs2_alloc_reservation *tmp;
+
+ assert_spin_locked(&resv_lock);
+
+ mlog(0, "Insert reservation start: %u len: %u\n", new->r_start,
+ new->r_len);
+
+ while (*p) {
+ parent = *p;
+
+ tmp = rb_entry(parent, struct ocfs2_alloc_reservation, r_node);
+
+ if (new->r_start < tmp->r_start) {
+ p = &(*p)->rb_left;
+
+ /*
+ * This is a good place to check for
+ * overlapping reservations.
+ */
+ BUG_ON(ocfs2_resv_end(new) >= tmp->r_start);
+ } else if (new->r_start > ocfs2_resv_end(tmp)) {
+ p = &(*p)->rb_right;
+ } else {
+ /* This should never happen! */
+ mlog(ML_ERROR, "Duplicate reservation window!\n");
+ BUG();
+ }
+ }
+
+ rb_link_node(&new->r_node, parent, p);
+ rb_insert_color(&new->r_node, root);
+ new->r_flags |= OCFS2_RESV_FLAG_INUSE;
+
+ ocfs2_resv_mark_lru(resmap, new);
+
+ ocfs2_check_resmap(resmap);
+}
+
+/**
+ * ocfs2_find_resv_lhs() - find the window which contains goal
+ * @resmap: reservation map to search
+ * @goal: which bit to search for
+ *
+ * If a window containing that goal is not found, we return the window
+ * which comes before goal. Returns NULL on empty rbtree or no window
+ * before goal.
+ */
+static struct ocfs2_alloc_reservation *
+ocfs2_find_resv_lhs(struct ocfs2_reservation_map *resmap, unsigned int goal)
+{
+ struct ocfs2_alloc_reservation *resv = NULL;
+ struct ocfs2_alloc_reservation *prev_resv = NULL;
+ struct rb_node *node = resmap->m_reservations.rb_node;
+
+ assert_spin_locked(&resv_lock);
+
+ if (!node)
+ return NULL;
+
+ node = rb_first(&resmap->m_reservations);
+ while (node) {
+ resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node);
+
+ if (resv->r_start <= goal && ocfs2_resv_end(resv) >= goal)
+ break;
+
+ /* Check if we overshot the reservation just before goal? */
+ if (resv->r_start > goal) {
+ resv = prev_resv;
+ break;
+ }
+
+ prev_resv = resv;
+ node = rb_next(node);
+ }
+
+ return resv;
+}
+
+/*
+ * We are given a range within the bitmap, which corresponds to a gap
+ * inside the reservations tree (search_start, search_len). The range
+ * can be anything from the whole bitmap, to a gap between
+ * reservations.
+ *
+ * The start value of *rstart is insignificant.
+ *
+ * This function searches the bitmap range starting at search_start
+ * with length search_len for a set of contiguous free bits. We try
+ * to find up to 'wanted' bits, but can sometimes return less.
+ *
+ * Returns the length of allocation, 0 if no free bits are found.
+ *
+ * *cstart and *clen will also be populated with the result.
+ */
+static int ocfs2_resmap_find_free_bits(struct ocfs2_reservation_map *resmap,
+ unsigned int wanted,
+ unsigned int search_start,
+ unsigned int search_len,
+ unsigned int *rstart,
+ unsigned int *rlen)
+{
+ void *bitmap = resmap->m_disk_bitmap;
+ unsigned int best_start, best_len = 0;
+ int offset, start, found;
+
+ mlog(0, "Find %u bits within range (%u, len %u) resmap len: %u\n",
+ wanted, search_start, search_len, resmap->m_bitmap_len);
+
+ found = best_start = best_len = 0;
+
+ start = search_start;
+ while ((offset = ocfs2_find_next_zero_bit(bitmap, resmap->m_bitmap_len,
+ start)) != -1) {
+ /* Search reached end of the region */
+ if (offset >= (search_start + search_len))
+ break;
+
+ if (offset == start) {
+ /* we found a zero */
+ found++;
+ /* move start to the next bit to test */
+ start++;
+ } else {
+ /* got a zero after some ones */
+ found = 1;
+ start = offset + 1;
+ }
+ if (found > best_len) {
+ best_len = found;
+ best_start = start - found;
+ }
+
+ if (found >= wanted)
+ break;
+ }
+
+ if (best_len == 0)
+ return 0;
+
+ if (best_len >= wanted)
+ best_len = wanted;
+
+ *rlen = best_len;
+ *rstart = best_start;
+
+ mlog(0, "Found start: %u len: %u\n", best_start, best_len);
+
+ return *rlen;
+}
+
+static void __ocfs2_resv_find_window(struct ocfs2_reservation_map *resmap,
+ struct ocfs2_alloc_reservation *resv,
+ unsigned int goal, unsigned int wanted)
+{
+ struct rb_root *root = &resmap->m_reservations;
+ unsigned int gap_start, gap_end, gap_len;
+ struct ocfs2_alloc_reservation *prev_resv, *next_resv;
+ struct rb_node *prev, *next;
+ unsigned int cstart, clen;
+ unsigned int best_start = 0, best_len = 0;
+
+ /*
+ * Nasty cases to consider:
+ *
+ * - rbtree is empty
+ * - our window should be first in all reservations
+ * - our window should be last in all reservations
+ * - need to make sure we don't go past end of bitmap
+ */
+
+ mlog(0, "resv start: %u resv end: %u goal: %u wanted: %u\n",
+ resv->r_start, ocfs2_resv_end(resv), goal, wanted);
+
+ assert_spin_locked(&resv_lock);
+
+ if (RB_EMPTY_ROOT(root)) {
+ /*
+ * Easiest case - empty tree. We can just take
+ * whatever window of free bits we want.
+ */
+
+ mlog(0, "Empty root\n");
+
+ clen = ocfs2_resmap_find_free_bits(resmap, wanted, goal,
+ resmap->m_bitmap_len - goal,
+ &cstart, &clen);
+
+ /*
+ * This should never happen - the local alloc window
+ * will always have free bits when we're called.
+ */
+ BUG_ON(goal == 0 && clen == 0);
+
+ if (clen == 0)
+ return;
+
+ resv->r_start = cstart;
+ resv->r_len = clen;
+
+ ocfs2_resv_insert(resmap, resv);
+ return;
+ }
+
+ prev_resv = ocfs2_find_resv_lhs(resmap, goal);
+
+ if (prev_resv == NULL) {
+ mlog(0, "Goal on LHS of leftmost window\n");
+
+ /*
+ * A NULL here means that the search code couldn't
+ * find a window that starts before goal.
+ *
+ * However, we can take the first window after goal,
+ * which is also by definition, the leftmost window in
+ * the entire tree. If we can find free bits in the
+ * gap between goal and the LHS window, then the
+ * reservation can safely be placed there.
+ *
+ * Otherwise we fall back to a linear search, checking
+ * the gaps in between windows for a place to
+ * allocate.
+ */
+
+ next = rb_first(root);
+ next_resv = rb_entry(next, struct ocfs2_alloc_reservation,
+ r_node);
+
+ /*
+ * The search should never return such a window. (see
+ * comment above
+ */
+ if (next_resv->r_start <= goal) {
+ mlog(ML_ERROR, "goal: %u next_resv: start %u len %u\n",
+ goal, next_resv->r_start, next_resv->r_len);
+ ocfs2_dump_resv(resmap);
+ BUG();
+ }
+
+ clen = ocfs2_resmap_find_free_bits(resmap, wanted, goal,
+ next_resv->r_start - goal,
+ &cstart, &clen);
+ if (clen) {
+ best_len = clen;
+ best_start = cstart;
+ if (best_len == wanted)
+ goto out_insert;
+ }
+
+ prev_resv = next_resv;
+ next_resv = NULL;
+ }
+
+ prev = &prev_resv->r_node;
+
+ /* Now we do a linear search for a window, starting at 'prev_rsv' */
+ while (1) {
+ next = rb_next(prev);
+ if (next) {
+ mlog(0, "One more resv found in linear search\n");
+ next_resv = rb_entry(next,
+ struct ocfs2_alloc_reservation,
+ r_node);
+
+ gap_start = ocfs2_resv_end(prev_resv) + 1;
+ gap_end = next_resv->r_start - 1;
+ gap_len = gap_end - gap_start + 1;
+ } else {
+ mlog(0, "No next node\n");
+ /*
+ * We're at the rightmost edge of the
+ * tree. See if a reservation between this
+ * window and the end of the bitmap will work.
+ */
+ gap_start = ocfs2_resv_end(prev_resv) + 1;
+ gap_len = resmap->m_bitmap_len - gap_start;
+ gap_end = resmap->m_bitmap_len - 1;
+ }
+
+ /*
+ * No need to check this gap if we have already found
+ * a larger region of free bits.
+ */
+ if (gap_len <= best_len)
+ goto next_resv;
+
+ clen = ocfs2_resmap_find_free_bits(resmap, wanted, gap_start,
+ gap_len, &cstart, &clen);
+ if (clen == wanted) {
+ best_len = clen;
+ best_start = cstart;
+ goto out_insert;
+ } else if (clen > best_len) {
+ best_len = clen;
+ best_start = cstart;
+ }
+
+next_resv:
+ if (!next)
+ break;
+
+ prev = next;
+ prev_resv = rb_entry(prev, struct ocfs2_alloc_reservation,
+ r_node);
+ }
+
+out_insert:
+ if (best_len) {
+ resv->r_start = best_start;
+ resv->r_len = best_len;
+ ocfs2_resv_insert(resmap, resv);
+ }
+}
+
+static void ocfs2_cannibalize_resv(struct ocfs2_reservation_map *resmap,
+ struct ocfs2_alloc_reservation *resv,
+ unsigned int wanted)
+{
+ struct ocfs2_alloc_reservation *lru_resv;
+ int tmpwindow = !!(resv->r_flags & OCFS2_RESV_FLAG_TMP);
+ unsigned int min_bits;
+
+ if (!tmpwindow)
+ min_bits = ocfs2_resv_window_bits(resmap, resv) >> 1;
+ else
+ min_bits = wanted; /* We at know the temp window will use all
+ * of these bits */
+
+ /*
+ * Take the first reservation off the LRU as our 'target'. We
+ * don't try to be smart about it. There might be a case for
+ * searching based on size but I don't have enough data to be
+ * sure. --Mark (3/16/2010)
+ */
+ lru_resv = list_first_entry(&resmap->m_lru,
+ struct ocfs2_alloc_reservation, r_lru);
+
+ mlog(0, "lru resv: start: %u len: %u end: %u\n", lru_resv->r_start,
+ lru_resv->r_len, ocfs2_resv_end(lru_resv));
+
+ /*
+ * Cannibalize (some or all) of the target reservation and
+ * feed it to the current window.
+ */
+ if (lru_resv->r_len <= min_bits) {
+ /*
+ * Discard completely if size is less than or equal to a
+ * reasonable threshold - 50% of window bits for non temporary
+ * windows.
+ */
+ resv->r_start = lru_resv->r_start;
+ resv->r_len = lru_resv->r_len;
+
+ __ocfs2_resv_discard(resmap, lru_resv);
+ } else {
+ unsigned int shrink;
+ if (tmpwindow)
+ shrink = min_bits;
+ else
+ shrink = lru_resv->r_len / 2;
+
+ lru_resv->r_len -= shrink;
+
+ resv->r_start = ocfs2_resv_end(lru_resv) + 1;
+ resv->r_len = shrink;
+ }
+
+ mlog(0, "Reservation now looks like: r_start: %u r_end: %u "
+ "r_len: %u r_last_start: %u r_last_len: %u\n",
+ resv->r_start, ocfs2_resv_end(resv), resv->r_len,
+ resv->r_last_start, resv->r_last_len);
+
+ ocfs2_resv_insert(resmap, resv);
+}
+
+static void ocfs2_resv_find_window(struct ocfs2_reservation_map *resmap,
+ struct ocfs2_alloc_reservation *resv,
+ unsigned int wanted)
+{
+ unsigned int goal = 0;
+
+ BUG_ON(!ocfs2_resv_empty(resv));
+
+ /*
+ * Begin by trying to get a window as close to the previous
+ * one as possible. Using the most recent allocation as a
+ * start goal makes sense.
+ */
+ if (resv->r_last_len) {
+ goal = resv->r_last_start + resv->r_last_len;
+ if (goal >= resmap->m_bitmap_len)
+ goal = 0;
+ }
+
+ __ocfs2_resv_find_window(resmap, resv, goal, wanted);
+
+ /* Search from last alloc didn't work, try once more from beginning. */
+ if (ocfs2_resv_empty(resv) && goal != 0)
+ __ocfs2_resv_find_window(resmap, resv, 0, wanted);
+
+ if (ocfs2_resv_empty(resv)) {
+ /*
+ * Still empty? Pull oldest one off the LRU, remove it from
+ * tree, put this one in it's place.
+ */
+ ocfs2_cannibalize_resv(resmap, resv, wanted);
+ }
+
+ BUG_ON(ocfs2_resv_empty(resv));
+}
+
+int ocfs2_resmap_resv_bits(struct ocfs2_reservation_map *resmap,
+ struct ocfs2_alloc_reservation *resv,
+ int *cstart, int *clen)
+{
+ unsigned int wanted = *clen;
+
+ if (resv == NULL || ocfs2_resmap_disabled(resmap))
+ return -ENOSPC;
+
+ spin_lock(&resv_lock);
+
+ /*
+ * We don't want to over-allocate for temporary
+ * windows. Otherwise, we run the risk of fragmenting the
+ * allocation space.
+ */
+ wanted = ocfs2_resv_window_bits(resmap, resv);
+ if ((resv->r_flags & OCFS2_RESV_FLAG_TMP) || wanted < *clen)
+ wanted = *clen;
+
+ if (ocfs2_resv_empty(resv)) {
+ mlog(0, "empty reservation, find new window\n");
+
+ /*
+ * Try to get a window here. If it works, we must fall
+ * through and test the bitmap . This avoids some
+ * ping-ponging of windows due to non-reserved space
+ * being allocation before we initialize a window for
+ * that inode.
+ */
+ ocfs2_resv_find_window(resmap, resv, wanted);
+ }
+
+ BUG_ON(ocfs2_resv_empty(resv));
+
+ *cstart = resv->r_start;
+ *clen = resv->r_len;
+
+ spin_unlock(&resv_lock);
+ return 0;
+}
+
+static void
+ ocfs2_adjust_resv_from_alloc(struct ocfs2_reservation_map *resmap,
+ struct ocfs2_alloc_reservation *resv,
+ unsigned int start, unsigned int end)
+{
+ unsigned int rhs = 0;
+ unsigned int old_end = ocfs2_resv_end(resv);
+
+ BUG_ON(start != resv->r_start || old_end < end);
+
+ /*
+ * Completely used? We can remove it then.
+ */
+ if (old_end == end) {
+ __ocfs2_resv_discard(resmap, resv);
+ return;
+ }
+
+ rhs = old_end - end;
+
+ /*
+ * This should have been trapped above.
+ */
+ BUG_ON(rhs == 0);
+
+ resv->r_start = end + 1;
+ resv->r_len = old_end - resv->r_start + 1;
+}
+
+void ocfs2_resmap_claimed_bits(struct ocfs2_reservation_map *resmap,
+ struct ocfs2_alloc_reservation *resv,
+ u32 cstart, u32 clen)
+{
+ unsigned int cend = cstart + clen - 1;
+
+ if (resmap == NULL || ocfs2_resmap_disabled(resmap))
+ return;
+
+ if (resv == NULL)
+ return;
+
+ BUG_ON(cstart != resv->r_start);
+
+ spin_lock(&resv_lock);
+
+ mlog(0, "claim bits: cstart: %u cend: %u clen: %u r_start: %u "
+ "r_end: %u r_len: %u, r_last_start: %u r_last_len: %u\n",
+ cstart, cend, clen, resv->r_start, ocfs2_resv_end(resv),
+ resv->r_len, resv->r_last_start, resv->r_last_len);
+
+ BUG_ON(cstart < resv->r_start);
+ BUG_ON(cstart > ocfs2_resv_end(resv));
+ BUG_ON(cend > ocfs2_resv_end(resv));
+
+ ocfs2_adjust_resv_from_alloc(resmap, resv, cstart, cend);
+ resv->r_last_start = cstart;
+ resv->r_last_len = clen;
+
+ /*
+ * May have been discarded above from
+ * ocfs2_adjust_resv_from_alloc().
+ */
+ if (!ocfs2_resv_empty(resv))
+ ocfs2_resv_mark_lru(resmap, resv);
+
+ mlog(0, "Reservation now looks like: r_start: %u r_end: %u "
+ "r_len: %u r_last_start: %u r_last_len: %u\n",
+ resv->r_start, ocfs2_resv_end(resv), resv->r_len,
+ resv->r_last_start, resv->r_last_len);
+
+ ocfs2_check_resmap(resmap);
+
+ spin_unlock(&resv_lock);
+}
diff --git a/fs/ocfs2/reservations.h b/fs/ocfs2/reservations.h
new file mode 100644
index 0000000..1e49cc2
--- /dev/null
+++ b/fs/ocfs2/reservations.h
@@ -0,0 +1,159 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * reservations.h
+ *
+ * Allocation reservations function prototypes and structures.
+ *
+ * Copyright (C) 2010 Novell. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef OCFS2_RESERVATIONS_H
+#define OCFS2_RESERVATIONS_H
+
+#include <linux/rbtree.h>
+
+#define OCFS2_DEFAULT_RESV_LEVEL 2
+#define OCFS2_MAX_RESV_LEVEL 9
+#define OCFS2_MIN_RESV_LEVEL 0
+
+struct ocfs2_alloc_reservation {
+ struct rb_node r_node;
+
+ unsigned int r_start; /* Begining of current window */
+ unsigned int r_len; /* Length of the window */
+
+ unsigned int r_last_len; /* Length of most recent alloc */
+ unsigned int r_last_start; /* Start of most recent alloc */
+ struct list_head r_lru; /* LRU list head */
+
+ unsigned int r_flags;
+};
+
+#define OCFS2_RESV_FLAG_INUSE 0x01 /* Set when r_node is part of a btree */
+#define OCFS2_RESV_FLAG_TMP 0x02 /* Temporary reservation, will be
+ * destroyed immedately after use */
+#define OCFS2_RESV_FLAG_DIR 0x04 /* Reservation is for an unindexed
+ * directory btree */
+
+struct ocfs2_reservation_map {
+ struct rb_root m_reservations;
+ char *m_disk_bitmap;
+
+ struct ocfs2_super *m_osb;
+
+ /* The following are not initialized to meaningful values until a disk
+ * bitmap is provided. */
+ u32 m_bitmap_len; /* Number of valid
+ * bits available */
+
+ struct list_head m_lru; /* LRU of reservations
+ * structures. */
+
+};
+
+void ocfs2_resv_init_once(struct ocfs2_alloc_reservation *resv);
+
+#define OCFS2_RESV_TYPES (OCFS2_RESV_FLAG_TMP|OCFS2_RESV_FLAG_DIR)
+void ocfs2_resv_set_type(struct ocfs2_alloc_reservation *resv,
+ unsigned int flags);
+
+int ocfs2_dir_resv_allowed(struct ocfs2_super *osb);
+
+/**
+ * ocfs2_resv_discard() - truncate a reservation
+ * @resmap:
+ * @resv: the reservation to truncate.
+ *
+ * After this function is called, the reservation will be empty, and
+ * unlinked from the rbtree.
+ */
+void ocfs2_resv_discard(struct ocfs2_reservation_map *resmap,
+ struct ocfs2_alloc_reservation *resv);
+
+
+/**
+ * ocfs2_resmap_init() - Initialize fields of a reservations bitmap
+ * @resmap: struct ocfs2_reservation_map to initialize
+ * @obj: unused for now
+ * @ops: unused for now
+ * @max_bitmap_bytes: Maximum size of the bitmap (typically blocksize)
+ *
+ * Only possible return value other than '0' is -ENOMEM for failure to
+ * allocation mirror bitmap.
+ */
+int ocfs2_resmap_init(struct ocfs2_super *osb,
+ struct ocfs2_reservation_map *resmap);
+
+/**
+ * ocfs2_resmap_restart() - "restart" a reservation bitmap
+ * @resmap: reservations bitmap
+ * @clen: Number of valid bits in the bitmap
+ * @disk_bitmap: the disk bitmap this resmap should refer to.
+ *
+ * Re-initialize the parameters of a reservation bitmap. This is
+ * useful for local alloc window slides.
+ *
+ * This function will call ocfs2_trunc_resv against all existing
+ * reservations. A future version will recalculate existing
+ * reservations based on the new bitmap.
+ */
+void ocfs2_resmap_restart(struct ocfs2_reservation_map *resmap,
+ unsigned int clen, char *disk_bitmap);
+
+/**
+ * ocfs2_resmap_uninit() - uninitialize a reservation bitmap structure
+ * @resmap: the struct ocfs2_reservation_map to uninitialize
+ */
+void ocfs2_resmap_uninit(struct ocfs2_reservation_map *resmap);
+
+/**
+ * ocfs2_resmap_resv_bits() - Return still-valid reservation bits
+ * @resmap: reservations bitmap
+ * @resv: reservation to base search from
+ * @cstart: start of proposed allocation
+ * @clen: length (in clusters) of proposed allocation
+ *
+ * Using the reservation data from resv, this function will compare
+ * resmap and resmap->m_disk_bitmap to determine what part (if any) of
+ * the reservation window is still clear to use. If resv is empty,
+ * this function will try to allocate a window for it.
+ *
+ * On success, zero is returned and the valid allocation area is set in cstart
+ * and clen.
+ *
+ * Returns -ENOSPC if reservations are disabled.
+ */
+int ocfs2_resmap_resv_bits(struct ocfs2_reservation_map *resmap,
+ struct ocfs2_alloc_reservation *resv,
+ int *cstart, int *clen);
+
+/**
+ * ocfs2_resmap_claimed_bits() - Tell the reservation code that bits were used.
+ * @resmap: reservations bitmap
+ * @resv: optional reservation to recalulate based on new bitmap
+ * @cstart: start of allocation in clusters
+ * @clen: end of allocation in clusters.
+ *
+ * Tell the reservation code that bits were used to fulfill allocation in
+ * resmap. The bits don't have to have been part of any existing
+ * reservation. But we must always call this function when bits are claimed.
+ * Internally, the reservations code will use this information to mark the
+ * reservations bitmap. If resv is passed, it's next allocation window will be
+ * calculated. It also expects that 'cstart' is the same as we passed back
+ * from ocfs2_resmap_resv_bits().
+ */
+void ocfs2_resmap_claimed_bits(struct ocfs2_reservation_map *resmap,
+ struct ocfs2_alloc_reservation *resv,
+ u32 cstart, u32 clen);
+
+#endif /* OCFS2_RESERVATIONS_H */
diff --git a/fs/ocfs2/resize.c b/fs/ocfs2/resize.c
index 3c3d673..dacd553 100644
--- a/fs/ocfs2/resize.c
+++ b/fs/ocfs2/resize.c
@@ -134,11 +134,7 @@ static int ocfs2_update_last_group_and_inode(handle_t *handle,
le16_add_cpu(&group->bg_free_bits_count, -1 * backups);
}
- ret = ocfs2_journal_dirty(handle, group_bh);
- if (ret < 0) {
- mlog_errno(ret);
- goto out_rollback;
- }
+ ocfs2_journal_dirty(handle, group_bh);
/* update the inode accordingly. */
ret = ocfs2_journal_access_di(handle, INODE_CACHE(bm_inode), bm_bh,
@@ -319,7 +315,8 @@ int ocfs2_group_extend(struct inode * inode, int new_clusters)
BUG_ON(!OCFS2_IS_VALID_DINODE(fe));
if (le16_to_cpu(fe->id2.i_chain.cl_cpg) !=
- ocfs2_group_bitmap_size(osb->sb) * 8) {
+ ocfs2_group_bitmap_size(osb->sb, 0,
+ osb->s_feature_incompat) * 8) {
mlog(ML_ERROR, "The disk is too old and small. "
"Force to do offline resize.");
ret = -EINVAL;
@@ -500,7 +497,8 @@ int ocfs2_group_add(struct inode *inode, struct ocfs2_new_group_input *input)
fe = (struct ocfs2_dinode *)main_bm_bh->b_data;
if (le16_to_cpu(fe->id2.i_chain.cl_cpg) !=
- ocfs2_group_bitmap_size(osb->sb) * 8) {
+ ocfs2_group_bitmap_size(osb->sb, 0,
+ osb->s_feature_incompat) * 8) {
mlog(ML_ERROR, "The disk is too old and small."
" Force to do offline resize.");
ret = -EINVAL;
@@ -545,12 +543,7 @@ int ocfs2_group_add(struct inode *inode, struct ocfs2_new_group_input *input)
group = (struct ocfs2_group_desc *)group_bh->b_data;
group->bg_next_group = cr->c_blkno;
-
- ret = ocfs2_journal_dirty(handle, group_bh);
- if (ret < 0) {
- mlog_errno(ret);
- goto out_commit;
- }
+ ocfs2_journal_dirty(handle, group_bh);
ret = ocfs2_journal_access_di(handle, INODE_CACHE(main_bm_inode),
main_bm_bh, OCFS2_JOURNAL_ACCESS_WRITE);
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index 19ba00f..f4c2a9e 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -53,6 +53,15 @@
#define OCFS2_MAX_TO_STEAL 1024
+struct ocfs2_suballoc_result {
+ u64 sr_bg_blkno; /* The bg we allocated from. Set
+ to 0 when a block group is
+ contiguous. */
+ u64 sr_blkno; /* The first allocated block */
+ unsigned int sr_bit_offset; /* The bit in the bg */
+ unsigned int sr_bits; /* How many bits we claimed */
+};
+
static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg);
static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe);
static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl);
@@ -60,6 +69,7 @@ static int ocfs2_block_group_fill(handle_t *handle,
struct inode *alloc_inode,
struct buffer_head *bg_bh,
u64 group_blkno,
+ unsigned int group_clusters,
u16 my_chain,
struct ocfs2_chain_list *cl);
static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
@@ -73,20 +83,17 @@ static int ocfs2_cluster_group_search(struct inode *inode,
struct buffer_head *group_bh,
u32 bits_wanted, u32 min_bits,
u64 max_block,
- u16 *bit_off, u16 *bits_found);
+ struct ocfs2_suballoc_result *res);
static int ocfs2_block_group_search(struct inode *inode,
struct buffer_head *group_bh,
u32 bits_wanted, u32 min_bits,
u64 max_block,
- u16 *bit_off, u16 *bits_found);
-static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb,
- struct ocfs2_alloc_context *ac,
+ struct ocfs2_suballoc_result *res);
+static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
handle_t *handle,
u32 bits_wanted,
u32 min_bits,
- u16 *bit_off,
- unsigned int *num_bits,
- u64 *bg_blkno);
+ struct ocfs2_suballoc_result *res);
static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh,
int nr);
static inline int ocfs2_block_group_set_bits(handle_t *handle,
@@ -130,6 +137,7 @@ void ocfs2_free_ac_resource(struct ocfs2_alloc_context *ac)
}
brelse(ac->ac_bh);
ac->ac_bh = NULL;
+ ac->ac_resv = NULL;
}
void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac)
@@ -325,14 +333,38 @@ out:
return rc;
}
+static void ocfs2_bg_discontig_add_extent(struct ocfs2_super *osb,
+ struct ocfs2_group_desc *bg,
+ struct ocfs2_chain_list *cl,
+ u64 p_blkno, u32 clusters)
+{
+ struct ocfs2_extent_list *el = &bg->bg_list;
+ struct ocfs2_extent_rec *rec;
+
+ BUG_ON(!ocfs2_supports_discontig_bg(osb));
+ if (!el->l_next_free_rec)
+ el->l_count = cpu_to_le16(ocfs2_extent_recs_per_gd(osb->sb));
+ rec = &el->l_recs[le16_to_cpu(el->l_next_free_rec)];
+ rec->e_blkno = cpu_to_le64(p_blkno);
+ rec->e_cpos = cpu_to_le32(le16_to_cpu(bg->bg_bits) /
+ le16_to_cpu(cl->cl_bpc));
+ rec->e_leaf_clusters = cpu_to_le32(clusters);
+ le16_add_cpu(&bg->bg_bits, clusters * le16_to_cpu(cl->cl_bpc));
+ le16_add_cpu(&bg->bg_free_bits_count,
+ clusters * le16_to_cpu(cl->cl_bpc));
+ le16_add_cpu(&el->l_next_free_rec, 1);
+}
+
static int ocfs2_block_group_fill(handle_t *handle,
struct inode *alloc_inode,
struct buffer_head *bg_bh,
u64 group_blkno,
+ unsigned int group_clusters,
u16 my_chain,
struct ocfs2_chain_list *cl)
{
int status = 0;
+ struct ocfs2_super *osb = OCFS2_SB(alloc_inode->i_sb);
struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
struct super_block * sb = alloc_inode->i_sb;
@@ -359,19 +391,23 @@ static int ocfs2_block_group_fill(handle_t *handle,
memset(bg, 0, sb->s_blocksize);
strcpy(bg->bg_signature, OCFS2_GROUP_DESC_SIGNATURE);
bg->bg_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation);
- bg->bg_size = cpu_to_le16(ocfs2_group_bitmap_size(sb));
- bg->bg_bits = cpu_to_le16(ocfs2_bits_per_group(cl));
+ bg->bg_size = cpu_to_le16(ocfs2_group_bitmap_size(sb, 1,
+ osb->s_feature_incompat));
bg->bg_chain = cpu_to_le16(my_chain);
bg->bg_next_group = cl->cl_recs[my_chain].c_blkno;
bg->bg_parent_dinode = cpu_to_le64(OCFS2_I(alloc_inode)->ip_blkno);
bg->bg_blkno = cpu_to_le64(group_blkno);
+ if (group_clusters == le16_to_cpu(cl->cl_cpg))
+ bg->bg_bits = cpu_to_le16(ocfs2_bits_per_group(cl));
+ else
+ ocfs2_bg_discontig_add_extent(osb, bg, cl, group_blkno,
+ group_clusters);
+
/* set the 1st bit in the bitmap to account for the descriptor block */
ocfs2_set_bit(0, (unsigned long *)bg->bg_bitmap);
bg->bg_free_bits_count = cpu_to_le16(le16_to_cpu(bg->bg_bits) - 1);
- status = ocfs2_journal_dirty(handle, bg_bh);
- if (status < 0)
- mlog_errno(status);
+ ocfs2_journal_dirty(handle, bg_bh);
/* There is no need to zero out or otherwise initialize the
* other blocks in a group - All valid FS metadata in a block
@@ -397,6 +433,238 @@ static inline u16 ocfs2_find_smallest_chain(struct ocfs2_chain_list *cl)
return best;
}
+static struct buffer_head *
+ocfs2_block_group_alloc_contig(struct ocfs2_super *osb, handle_t *handle,
+ struct inode *alloc_inode,
+ struct ocfs2_alloc_context *ac,
+ struct ocfs2_chain_list *cl)
+{
+ int status;
+ u32 bit_off, num_bits;
+ u64 bg_blkno;
+ struct buffer_head *bg_bh;
+ unsigned int alloc_rec = ocfs2_find_smallest_chain(cl);
+
+ status = ocfs2_claim_clusters(handle, ac,
+ le16_to_cpu(cl->cl_cpg), &bit_off,
+ &num_bits);
+ if (status < 0) {
+ if (status != -ENOSPC)
+ mlog_errno(status);
+ goto bail;
+ }
+
+ /* setup the group */
+ bg_blkno = ocfs2_clusters_to_blocks(osb->sb, bit_off);
+ mlog(0, "new descriptor, record %u, at block %llu\n",
+ alloc_rec, (unsigned long long)bg_blkno);
+
+ bg_bh = sb_getblk(osb->sb, bg_blkno);
+ if (!bg_bh) {
+ status = -EIO;
+ mlog_errno(status);
+ goto bail;
+ }
+ ocfs2_set_new_buffer_uptodate(INODE_CACHE(alloc_inode), bg_bh);
+
+ status = ocfs2_block_group_fill(handle, alloc_inode, bg_bh,
+ bg_blkno, num_bits, alloc_rec, cl);
+ if (status < 0) {
+ brelse(bg_bh);
+ mlog_errno(status);
+ }
+
+bail:
+ return status ? ERR_PTR(status) : bg_bh;
+}
+
+static int ocfs2_block_group_claim_bits(struct ocfs2_super *osb,
+ handle_t *handle,
+ struct ocfs2_alloc_context *ac,
+ unsigned int min_bits,
+ u32 *bit_off, u32 *num_bits)
+{
+ int status = 0;
+
+ while (min_bits) {
+ status = ocfs2_claim_clusters(handle, ac, min_bits,
+ bit_off, num_bits);
+ if (status != -ENOSPC)
+ break;
+
+ min_bits >>= 1;
+ }
+
+ return status;
+}
+
+static int ocfs2_block_group_grow_discontig(handle_t *handle,
+ struct inode *alloc_inode,
+ struct buffer_head *bg_bh,
+ struct ocfs2_alloc_context *ac,
+ struct ocfs2_chain_list *cl,
+ unsigned int min_bits)
+{
+ int status;
+ struct ocfs2_super *osb = OCFS2_SB(alloc_inode->i_sb);
+ struct ocfs2_group_desc *bg =
+ (struct ocfs2_group_desc *)bg_bh->b_data;
+ unsigned int needed = le16_to_cpu(cl->cl_cpg) -
+ le16_to_cpu(bg->bg_bits) / le16_to_cpu(cl->cl_bpc);
+ u32 p_cpos, clusters;
+ u64 p_blkno;
+ struct ocfs2_extent_list *el = &bg->bg_list;
+
+ status = ocfs2_journal_access_gd(handle,
+ INODE_CACHE(alloc_inode),
+ bg_bh,
+ OCFS2_JOURNAL_ACCESS_CREATE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ while ((needed > 0) && (le16_to_cpu(el->l_next_free_rec) <
+ le16_to_cpu(el->l_count))) {
+ if (min_bits > needed)
+ min_bits = needed;
+ status = ocfs2_block_group_claim_bits(osb, handle, ac,
+ min_bits, &p_cpos,
+ &clusters);
+ if (status < 0) {
+ if (status != -ENOSPC)
+ mlog_errno(status);
+ goto bail;
+ }
+ p_blkno = ocfs2_clusters_to_blocks(osb->sb, p_cpos);
+ ocfs2_bg_discontig_add_extent(osb, bg, cl, p_blkno,
+ clusters);
+
+ min_bits = clusters;
+ needed = le16_to_cpu(cl->cl_cpg) -
+ le16_to_cpu(bg->bg_bits) / le16_to_cpu(cl->cl_bpc);
+ }
+
+ if (needed > 0) {
+ /*
+ * We have used up all the extent rec but can't fill up
+ * the cpg. So bail out.
+ */
+ status = -ENOSPC;
+ goto bail;
+ }
+
+ ocfs2_journal_dirty(handle, bg_bh);
+
+bail:
+ return status;
+}
+
+static void ocfs2_bg_alloc_cleanup(handle_t *handle,
+ struct ocfs2_alloc_context *cluster_ac,
+ struct inode *alloc_inode,
+ struct buffer_head *bg_bh)
+{
+ int i, ret;
+ struct ocfs2_group_desc *bg;
+ struct ocfs2_extent_list *el;
+ struct ocfs2_extent_rec *rec;
+
+ if (!bg_bh)
+ return;
+
+ bg = (struct ocfs2_group_desc *)bg_bh->b_data;
+ el = &bg->bg_list;
+ for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
+ rec = &el->l_recs[i];
+ ret = ocfs2_free_clusters(handle, cluster_ac->ac_inode,
+ cluster_ac->ac_bh,
+ le64_to_cpu(rec->e_blkno),
+ le32_to_cpu(rec->e_leaf_clusters));
+ if (ret)
+ mlog_errno(ret);
+ /* Try all the clusters to free */
+ }
+
+ ocfs2_remove_from_cache(INODE_CACHE(alloc_inode), bg_bh);
+ brelse(bg_bh);
+}
+
+static struct buffer_head *
+ocfs2_block_group_alloc_discontig(handle_t *handle,
+ struct inode *alloc_inode,
+ struct ocfs2_alloc_context *ac,
+ struct ocfs2_chain_list *cl)
+{
+ int status;
+ u32 bit_off, num_bits;
+ u64 bg_blkno;
+ unsigned int min_bits = le16_to_cpu(cl->cl_cpg) >> 1;
+ struct buffer_head *bg_bh = NULL;
+ unsigned int alloc_rec = ocfs2_find_smallest_chain(cl);
+ struct ocfs2_super *osb = OCFS2_SB(alloc_inode->i_sb);
+
+ if (!ocfs2_supports_discontig_bg(osb)) {
+ status = -ENOSPC;
+ goto bail;
+ }
+
+ status = ocfs2_extend_trans(handle,
+ ocfs2_calc_bg_discontig_credits(osb->sb));
+ if (status) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ /*
+ * We're going to be grabbing from multiple cluster groups.
+ * We don't have enough credits to relink them all, and the
+ * cluster groups will be staying in cache for the duration of
+ * this operation.
+ */
+ ac->ac_allow_chain_relink = 0;
+
+ /* Claim the first region */
+ status = ocfs2_block_group_claim_bits(osb, handle, ac, min_bits,
+ &bit_off, &num_bits);
+ if (status < 0) {
+ if (status != -ENOSPC)
+ mlog_errno(status);
+ goto bail;
+ }
+ min_bits = num_bits;
+
+ /* setup the group */
+ bg_blkno = ocfs2_clusters_to_blocks(osb->sb, bit_off);
+ mlog(0, "new descriptor, record %u, at block %llu\n",
+ alloc_rec, (unsigned long long)bg_blkno);
+
+ bg_bh = sb_getblk(osb->sb, bg_blkno);
+ if (!bg_bh) {
+ status = -EIO;
+ mlog_errno(status);
+ goto bail;
+ }
+ ocfs2_set_new_buffer_uptodate(INODE_CACHE(alloc_inode), bg_bh);
+
+ status = ocfs2_block_group_fill(handle, alloc_inode, bg_bh,
+ bg_blkno, num_bits, alloc_rec, cl);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = ocfs2_block_group_grow_discontig(handle, alloc_inode,
+ bg_bh, ac, cl, min_bits);
+ if (status)
+ mlog_errno(status);
+
+bail:
+ if (status)
+ ocfs2_bg_alloc_cleanup(handle, ac, alloc_inode, bg_bh);
+ return status ? ERR_PTR(status) : bg_bh;
+}
+
/*
* We expect the block group allocator to already be locked.
*/
@@ -412,9 +680,7 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
struct ocfs2_chain_list *cl;
struct ocfs2_alloc_context *ac = NULL;
handle_t *handle = NULL;
- u32 bit_off, num_bits;
u16 alloc_rec;
- u64 bg_blkno;
struct buffer_head *bg_bh = NULL;
struct ocfs2_group_desc *bg;
@@ -447,44 +713,20 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
(unsigned long long)*last_alloc_group);
ac->ac_last_group = *last_alloc_group;
}
- status = ocfs2_claim_clusters(osb,
- handle,
- ac,
- le16_to_cpu(cl->cl_cpg),
- &bit_off,
- &num_bits);
- if (status < 0) {
+
+ bg_bh = ocfs2_block_group_alloc_contig(osb, handle, alloc_inode,
+ ac, cl);
+ if (IS_ERR(bg_bh) && (PTR_ERR(bg_bh) == -ENOSPC))
+ bg_bh = ocfs2_block_group_alloc_discontig(handle,
+ alloc_inode,
+ ac, cl);
+ if (IS_ERR(bg_bh)) {
+ status = PTR_ERR(bg_bh);
+ bg_bh = NULL;
if (status != -ENOSPC)
mlog_errno(status);
goto bail;
}
-
- alloc_rec = ocfs2_find_smallest_chain(cl);
-
- /* setup the group */
- bg_blkno = ocfs2_clusters_to_blocks(osb->sb, bit_off);
- mlog(0, "new descriptor, record %u, at block %llu\n",
- alloc_rec, (unsigned long long)bg_blkno);
-
- bg_bh = sb_getblk(osb->sb, bg_blkno);
- if (!bg_bh) {
- status = -EIO;
- mlog_errno(status);
- goto bail;
- }
- ocfs2_set_new_buffer_uptodate(INODE_CACHE(alloc_inode), bg_bh);
-
- status = ocfs2_block_group_fill(handle,
- alloc_inode,
- bg_bh,
- bg_blkno,
- alloc_rec,
- cl);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
-
bg = (struct ocfs2_group_desc *) bg_bh->b_data;
status = ocfs2_journal_access_di(handle, INODE_CACHE(alloc_inode),
@@ -494,10 +736,12 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
goto bail;
}
+ alloc_rec = le16_to_cpu(bg->bg_chain);
le32_add_cpu(&cl->cl_recs[alloc_rec].c_free,
le16_to_cpu(bg->bg_free_bits_count));
- le32_add_cpu(&cl->cl_recs[alloc_rec].c_total, le16_to_cpu(bg->bg_bits));
- cl->cl_recs[alloc_rec].c_blkno = cpu_to_le64(bg_blkno);
+ le32_add_cpu(&cl->cl_recs[alloc_rec].c_total,
+ le16_to_cpu(bg->bg_bits));
+ cl->cl_recs[alloc_rec].c_blkno = cpu_to_le64(bg->bg_blkno);
if (le16_to_cpu(cl->cl_next_free_rec) < le16_to_cpu(cl->cl_count))
le16_add_cpu(&cl->cl_next_free_rec, 1);
@@ -506,11 +750,7 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
le32_add_cpu(&fe->id1.bitmap1.i_total, le16_to_cpu(bg->bg_bits));
le32_add_cpu(&fe->i_clusters, le16_to_cpu(cl->cl_cpg));
- status = ocfs2_journal_dirty(handle, bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
+ ocfs2_journal_dirty(handle, bh);
spin_lock(&OCFS2_I(alloc_inode)->ip_lock);
OCFS2_I(alloc_inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
@@ -760,7 +1000,7 @@ int ocfs2_reserve_new_metadata_blocks(struct ocfs2_super *osb,
status = ocfs2_reserve_suballoc_bits(osb, (*ac),
EXTENT_ALLOC_SYSTEM_INODE,
(u32)osb->slot_num, NULL,
- ALLOC_NEW_GROUP);
+ ALLOC_GROUPS_FROM_GLOBAL|ALLOC_NEW_GROUP);
if (status >= 0) {
@@ -946,11 +1186,7 @@ static int ocfs2_reserve_clusters_with_limit(struct ocfs2_super *osb,
status = ocfs2_reserve_local_alloc_bits(osb,
bits_wanted,
*ac);
- if (status == -EFBIG) {
- /* The local alloc window is outside ac_max_block.
- * use the main bitmap. */
- status = -ENOSPC;
- } else if ((status < 0) && (status != -ENOSPC)) {
+ if ((status < 0) && (status != -ENOSPC)) {
mlog_errno(status);
goto bail;
}
@@ -1033,8 +1269,7 @@ static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb,
struct buffer_head *bg_bh,
unsigned int bits_wanted,
unsigned int total_bits,
- u16 *bit_off,
- u16 *bits_found)
+ struct ocfs2_suballoc_result *res)
{
void *bitmap;
u16 best_offset, best_size;
@@ -1078,14 +1313,9 @@ static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb,
}
}
- /* XXX: I think the first clause is equivalent to the second
- * - jlbec */
- if (found == bits_wanted) {
- *bit_off = start - found;
- *bits_found = found;
- } else if (best_size) {
- *bit_off = best_offset;
- *bits_found = best_size;
+ if (best_size) {
+ res->sr_bit_offset = best_offset;
+ res->sr_bits = best_size;
} else {
status = -ENOSPC;
/* No error log here -- see the comment above
@@ -1129,16 +1359,10 @@ static inline int ocfs2_block_group_set_bits(handle_t *handle,
}
le16_add_cpu(&bg->bg_free_bits_count, -num_bits);
-
while(num_bits--)
ocfs2_set_bit(bit_off++, bitmap);
- status = ocfs2_journal_dirty(handle,
- group_bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
+ ocfs2_journal_dirty(handle, group_bh);
bail:
mlog_exit(status);
@@ -1202,12 +1426,7 @@ static int ocfs2_relink_block_group(handle_t *handle,
}
prev_bg->bg_next_group = bg->bg_next_group;
-
- status = ocfs2_journal_dirty(handle, prev_bg_bh);
- if (status < 0) {
- mlog_errno(status);
- goto out_rollback;
- }
+ ocfs2_journal_dirty(handle, prev_bg_bh);
status = ocfs2_journal_access_gd(handle, INODE_CACHE(alloc_inode),
bg_bh, OCFS2_JOURNAL_ACCESS_WRITE);
@@ -1217,12 +1436,7 @@ static int ocfs2_relink_block_group(handle_t *handle,
}
bg->bg_next_group = fe->id2.i_chain.cl_recs[chain].c_blkno;
-
- status = ocfs2_journal_dirty(handle, bg_bh);
- if (status < 0) {
- mlog_errno(status);
- goto out_rollback;
- }
+ ocfs2_journal_dirty(handle, bg_bh);
status = ocfs2_journal_access_di(handle, INODE_CACHE(alloc_inode),
fe_bh, OCFS2_JOURNAL_ACCESS_WRITE);
@@ -1232,14 +1446,8 @@ static int ocfs2_relink_block_group(handle_t *handle,
}
fe->id2.i_chain.cl_recs[chain].c_blkno = bg->bg_blkno;
+ ocfs2_journal_dirty(handle, fe_bh);
- status = ocfs2_journal_dirty(handle, fe_bh);
- if (status < 0) {
- mlog_errno(status);
- goto out_rollback;
- }
-
- status = 0;
out_rollback:
if (status < 0) {
fe->id2.i_chain.cl_recs[chain].c_blkno = cpu_to_le64(fe_ptr);
@@ -1263,14 +1471,13 @@ static int ocfs2_cluster_group_search(struct inode *inode,
struct buffer_head *group_bh,
u32 bits_wanted, u32 min_bits,
u64 max_block,
- u16 *bit_off, u16 *bits_found)
+ struct ocfs2_suballoc_result *res)
{
int search = -ENOSPC;
int ret;
u64 blkoff;
struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *) group_bh->b_data;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
- u16 tmp_off, tmp_found;
unsigned int max_bits, gd_cluster_off;
BUG_ON(!ocfs2_is_cluster_bitmap(inode));
@@ -1297,15 +1504,15 @@ static int ocfs2_cluster_group_search(struct inode *inode,
ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb),
group_bh, bits_wanted,
- max_bits,
- &tmp_off, &tmp_found);
+ max_bits, res);
if (ret)
return ret;
if (max_block) {
blkoff = ocfs2_clusters_to_blocks(inode->i_sb,
gd_cluster_off +
- tmp_off + tmp_found);
+ res->sr_bit_offset +
+ res->sr_bits);
mlog(0, "Checking %llu against %llu\n",
(unsigned long long)blkoff,
(unsigned long long)max_block);
@@ -1317,16 +1524,14 @@ static int ocfs2_cluster_group_search(struct inode *inode,
* return success, but we still want to return
* -ENOSPC unless it found the minimum number
* of bits. */
- if (min_bits <= tmp_found) {
- *bit_off = tmp_off;
- *bits_found = tmp_found;
+ if (min_bits <= res->sr_bits)
search = 0; /* success */
- } else if (tmp_found) {
+ else if (res->sr_bits) {
/*
* Don't show bits which we'll be returning
* for allocation to the local alloc bitmap.
*/
- ocfs2_local_alloc_seen_free_bits(osb, tmp_found);
+ ocfs2_local_alloc_seen_free_bits(osb, res->sr_bits);
}
}
@@ -1337,7 +1542,7 @@ static int ocfs2_block_group_search(struct inode *inode,
struct buffer_head *group_bh,
u32 bits_wanted, u32 min_bits,
u64 max_block,
- u16 *bit_off, u16 *bits_found)
+ struct ocfs2_suballoc_result *res)
{
int ret = -ENOSPC;
u64 blkoff;
@@ -1350,10 +1555,10 @@ static int ocfs2_block_group_search(struct inode *inode,
ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb),
group_bh, bits_wanted,
le16_to_cpu(bg->bg_bits),
- bit_off, bits_found);
+ res);
if (!ret && max_block) {
- blkoff = le64_to_cpu(bg->bg_blkno) + *bit_off +
- *bits_found;
+ blkoff = le64_to_cpu(bg->bg_blkno) +
+ res->sr_bit_offset + res->sr_bits;
mlog(0, "Checking %llu against %llu\n",
(unsigned long long)blkoff,
(unsigned long long)max_block);
@@ -1386,33 +1591,76 @@ static int ocfs2_alloc_dinode_update_counts(struct inode *inode,
tmp_used = le32_to_cpu(di->id1.bitmap1.i_used);
di->id1.bitmap1.i_used = cpu_to_le32(num_bits + tmp_used);
le32_add_cpu(&cl->cl_recs[chain].c_free, -num_bits);
-
- ret = ocfs2_journal_dirty(handle, di_bh);
- if (ret < 0)
- mlog_errno(ret);
+ ocfs2_journal_dirty(handle, di_bh);
out:
return ret;
}
+static int ocfs2_bg_discontig_fix_by_rec(struct ocfs2_suballoc_result *res,
+ struct ocfs2_extent_rec *rec,
+ struct ocfs2_chain_list *cl)
+{
+ unsigned int bpc = le16_to_cpu(cl->cl_bpc);
+ unsigned int bitoff = le32_to_cpu(rec->e_cpos) * bpc;
+ unsigned int bitcount = le32_to_cpu(rec->e_leaf_clusters) * bpc;
+
+ if (res->sr_bit_offset < bitoff)
+ return 0;
+ if (res->sr_bit_offset >= (bitoff + bitcount))
+ return 0;
+ res->sr_blkno = le64_to_cpu(rec->e_blkno) +
+ (res->sr_bit_offset - bitoff);
+ if ((res->sr_bit_offset + res->sr_bits) > (bitoff + bitcount))
+ res->sr_bits = (bitoff + bitcount) - res->sr_bit_offset;
+ return 1;
+}
+
+static void ocfs2_bg_discontig_fix_result(struct ocfs2_alloc_context *ac,
+ struct ocfs2_group_desc *bg,
+ struct ocfs2_suballoc_result *res)
+{
+ int i;
+ u64 bg_blkno = res->sr_bg_blkno; /* Save off */
+ struct ocfs2_extent_rec *rec;
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)ac->ac_bh->b_data;
+ struct ocfs2_chain_list *cl = &di->id2.i_chain;
+
+ if (ocfs2_is_cluster_bitmap(ac->ac_inode)) {
+ res->sr_blkno = 0;
+ return;
+ }
+
+ res->sr_blkno = res->sr_bg_blkno + res->sr_bit_offset;
+ res->sr_bg_blkno = 0; /* Clear it for contig block groups */
+ if (!ocfs2_supports_discontig_bg(OCFS2_SB(ac->ac_inode->i_sb)) ||
+ !bg->bg_list.l_next_free_rec)
+ return;
+
+ for (i = 0; i < le16_to_cpu(bg->bg_list.l_next_free_rec); i++) {
+ rec = &bg->bg_list.l_recs[i];
+ if (ocfs2_bg_discontig_fix_by_rec(res, rec, cl)) {
+ res->sr_bg_blkno = bg_blkno; /* Restore */
+ break;
+ }
+ }
+}
+
static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac,
handle_t *handle,
u32 bits_wanted,
u32 min_bits,
- u16 *bit_off,
- unsigned int *num_bits,
- u64 gd_blkno,
+ struct ocfs2_suballoc_result *res,
u16 *bits_left)
{
int ret;
- u16 found;
struct buffer_head *group_bh = NULL;
struct ocfs2_group_desc *gd;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)ac->ac_bh->b_data;
struct inode *alloc_inode = ac->ac_inode;
- ret = ocfs2_read_group_descriptor(alloc_inode, di, gd_blkno,
- &group_bh);
+ ret = ocfs2_read_group_descriptor(alloc_inode, di,
+ res->sr_bg_blkno, &group_bh);
if (ret < 0) {
mlog_errno(ret);
return ret;
@@ -1420,17 +1668,18 @@ static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac,
gd = (struct ocfs2_group_desc *) group_bh->b_data;
ret = ac->ac_group_search(alloc_inode, group_bh, bits_wanted, min_bits,
- ac->ac_max_block, bit_off, &found);
+ ac->ac_max_block, res);
if (ret < 0) {
if (ret != -ENOSPC)
mlog_errno(ret);
goto out;
}
- *num_bits = found;
+ if (!ret)
+ ocfs2_bg_discontig_fix_result(ac, gd, res);
ret = ocfs2_alloc_dinode_update_counts(alloc_inode, handle, ac->ac_bh,
- *num_bits,
+ res->sr_bits,
le16_to_cpu(gd->bg_chain));
if (ret < 0) {
mlog_errno(ret);
@@ -1438,7 +1687,7 @@ static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac,
}
ret = ocfs2_block_group_set_bits(handle, alloc_inode, gd, group_bh,
- *bit_off, *num_bits);
+ res->sr_bit_offset, res->sr_bits);
if (ret < 0)
mlog_errno(ret);
@@ -1454,13 +1703,11 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
handle_t *handle,
u32 bits_wanted,
u32 min_bits,
- u16 *bit_off,
- unsigned int *num_bits,
- u64 *bg_blkno,
+ struct ocfs2_suballoc_result *res,
u16 *bits_left)
{
int status;
- u16 chain, tmp_bits;
+ u16 chain;
u32 tmp_used;
u64 next_group;
struct inode *alloc_inode = ac->ac_inode;
@@ -1489,8 +1736,8 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
* the 1st group with any empty bits. */
while ((status = ac->ac_group_search(alloc_inode, group_bh,
bits_wanted, min_bits,
- ac->ac_max_block, bit_off,
- &tmp_bits)) == -ENOSPC) {
+ ac->ac_max_block,
+ res)) == -ENOSPC) {
if (!bg->bg_next_group)
break;
@@ -1515,11 +1762,14 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
}
mlog(0, "alloc succeeds: we give %u bits from block group %llu\n",
- tmp_bits, (unsigned long long)le64_to_cpu(bg->bg_blkno));
+ res->sr_bits, (unsigned long long)le64_to_cpu(bg->bg_blkno));
- *num_bits = tmp_bits;
+ res->sr_bg_blkno = le64_to_cpu(bg->bg_blkno);
+
+ BUG_ON(res->sr_bits == 0);
+ if (!status)
+ ocfs2_bg_discontig_fix_result(ac, bg, res);
- BUG_ON(*num_bits == 0);
/*
* Keep track of previous block descriptor read. When
@@ -1536,7 +1786,7 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
*/
if (ac->ac_allow_chain_relink &&
(prev_group_bh) &&
- (ocfs2_block_group_reasonably_empty(bg, *num_bits))) {
+ (ocfs2_block_group_reasonably_empty(bg, res->sr_bits))) {
status = ocfs2_relink_block_group(handle, alloc_inode,
ac->ac_bh, group_bh,
prev_group_bh, chain);
@@ -1558,31 +1808,24 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
}
tmp_used = le32_to_cpu(fe->id1.bitmap1.i_used);
- fe->id1.bitmap1.i_used = cpu_to_le32(*num_bits + tmp_used);
- le32_add_cpu(&cl->cl_recs[chain].c_free, -(*num_bits));
-
- status = ocfs2_journal_dirty(handle,
- ac->ac_bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
+ fe->id1.bitmap1.i_used = cpu_to_le32(res->sr_bits + tmp_used);
+ le32_add_cpu(&cl->cl_recs[chain].c_free, -res->sr_bits);
+ ocfs2_journal_dirty(handle, ac->ac_bh);
status = ocfs2_block_group_set_bits(handle,
alloc_inode,
bg,
group_bh,
- *bit_off,
- *num_bits);
+ res->sr_bit_offset,
+ res->sr_bits);
if (status < 0) {
mlog_errno(status);
goto bail;
}
- mlog(0, "Allocated %u bits from suballocator %llu\n", *num_bits,
+ mlog(0, "Allocated %u bits from suballocator %llu\n", res->sr_bits,
(unsigned long long)le64_to_cpu(fe->i_blkno));
- *bg_blkno = le64_to_cpu(bg->bg_blkno);
*bits_left = le16_to_cpu(bg->bg_free_bits_count);
bail:
brelse(group_bh);
@@ -1593,19 +1836,15 @@ bail:
}
/* will give out up to bits_wanted contiguous bits. */
-static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb,
- struct ocfs2_alloc_context *ac,
+static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
handle_t *handle,
u32 bits_wanted,
u32 min_bits,
- u16 *bit_off,
- unsigned int *num_bits,
- u64 *bg_blkno)
+ struct ocfs2_suballoc_result *res)
{
int status;
u16 victim, i;
u16 bits_left = 0;
- u64 hint_blkno = ac->ac_last_group;
struct ocfs2_chain_list *cl;
struct ocfs2_dinode *fe;
@@ -1623,7 +1862,8 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb,
if (le32_to_cpu(fe->id1.bitmap1.i_used) >=
le32_to_cpu(fe->id1.bitmap1.i_total)) {
- ocfs2_error(osb->sb, "Chain allocator dinode %llu has %u used "
+ ocfs2_error(ac->ac_inode->i_sb,
+ "Chain allocator dinode %llu has %u used "
"bits but only %u total.",
(unsigned long long)le64_to_cpu(fe->i_blkno),
le32_to_cpu(fe->id1.bitmap1.i_used),
@@ -1632,22 +1872,16 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb,
goto bail;
}
- if (hint_blkno) {
+ res->sr_bg_blkno = ac->ac_last_group;
+ if (res->sr_bg_blkno) {
/* Attempt to short-circuit the usual search mechanism
* by jumping straight to the most recently used
* allocation group. This helps us mantain some
* contiguousness across allocations. */
status = ocfs2_search_one_group(ac, handle, bits_wanted,
- min_bits, bit_off, num_bits,
- hint_blkno, &bits_left);
- if (!status) {
- /* Be careful to update *bg_blkno here as the
- * caller is expecting it to be filled in, and
- * ocfs2_search_one_group() won't do that for
- * us. */
- *bg_blkno = hint_blkno;
+ min_bits, res, &bits_left);
+ if (!status)
goto set_hint;
- }
if (status < 0 && status != -ENOSPC) {
mlog_errno(status);
goto bail;
@@ -1660,8 +1894,8 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb,
ac->ac_chain = victim;
ac->ac_allow_chain_relink = 1;
- status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits, bit_off,
- num_bits, bg_blkno, &bits_left);
+ status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits,
+ res, &bits_left);
if (!status)
goto set_hint;
if (status < 0 && status != -ENOSPC) {
@@ -1685,8 +1919,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb,
ac->ac_chain = i;
status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits,
- bit_off, num_bits, bg_blkno,
- &bits_left);
+ res, &bits_left);
if (!status)
break;
if (status < 0 && status != -ENOSPC) {
@@ -1703,7 +1936,7 @@ set_hint:
if (bits_left < min_bits)
ac->ac_last_group = 0;
else
- ac->ac_last_group = *bg_blkno;
+ ac->ac_last_group = res->sr_bg_blkno;
}
bail:
@@ -1711,37 +1944,37 @@ bail:
return status;
}
-int ocfs2_claim_metadata(struct ocfs2_super *osb,
- handle_t *handle,
+int ocfs2_claim_metadata(handle_t *handle,
struct ocfs2_alloc_context *ac,
u32 bits_wanted,
+ u64 *suballoc_loc,
u16 *suballoc_bit_start,
unsigned int *num_bits,
u64 *blkno_start)
{
int status;
- u64 bg_blkno;
+ struct ocfs2_suballoc_result res = { .sr_blkno = 0, };
BUG_ON(!ac);
BUG_ON(ac->ac_bits_wanted < (ac->ac_bits_given + bits_wanted));
BUG_ON(ac->ac_which != OCFS2_AC_USE_META);
- status = ocfs2_claim_suballoc_bits(osb,
- ac,
+ status = ocfs2_claim_suballoc_bits(ac,
handle,
bits_wanted,
1,
- suballoc_bit_start,
- num_bits,
- &bg_blkno);
+ &res);
if (status < 0) {
mlog_errno(status);
goto bail;
}
- atomic_inc(&osb->alloc_stats.bg_allocs);
+ atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
- *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
- ac->ac_bits_given += (*num_bits);
+ *suballoc_loc = res.sr_bg_blkno;
+ *suballoc_bit_start = res.sr_bit_offset;
+ *blkno_start = res.sr_blkno;
+ ac->ac_bits_given += res.sr_bits;
+ *num_bits = res.sr_bits;
status = 0;
bail:
mlog_exit(status);
@@ -1749,10 +1982,10 @@ bail:
}
static void ocfs2_init_inode_ac_group(struct inode *dir,
- struct buffer_head *parent_fe_bh,
+ struct buffer_head *parent_di_bh,
struct ocfs2_alloc_context *ac)
{
- struct ocfs2_dinode *fe = (struct ocfs2_dinode *)parent_fe_bh->b_data;
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)parent_di_bh->b_data;
/*
* Try to allocate inodes from some specific group.
*
@@ -1766,10 +1999,14 @@ static void ocfs2_init_inode_ac_group(struct inode *dir,
if (OCFS2_I(dir)->ip_last_used_group &&
OCFS2_I(dir)->ip_last_used_slot == ac->ac_alloc_slot)
ac->ac_last_group = OCFS2_I(dir)->ip_last_used_group;
- else if (le16_to_cpu(fe->i_suballoc_slot) == ac->ac_alloc_slot)
- ac->ac_last_group = ocfs2_which_suballoc_group(
- le64_to_cpu(fe->i_blkno),
- le16_to_cpu(fe->i_suballoc_bit));
+ else if (le16_to_cpu(di->i_suballoc_slot) == ac->ac_alloc_slot) {
+ if (di->i_suballoc_loc)
+ ac->ac_last_group = le64_to_cpu(di->i_suballoc_loc);
+ else
+ ac->ac_last_group = ocfs2_which_suballoc_group(
+ le64_to_cpu(di->i_blkno),
+ le16_to_cpu(di->i_suballoc_bit));
+ }
}
static inline void ocfs2_save_inode_ac_group(struct inode *dir,
@@ -1779,17 +2016,16 @@ static inline void ocfs2_save_inode_ac_group(struct inode *dir,
OCFS2_I(dir)->ip_last_used_slot = ac->ac_alloc_slot;
}
-int ocfs2_claim_new_inode(struct ocfs2_super *osb,
- handle_t *handle,
+int ocfs2_claim_new_inode(handle_t *handle,
struct inode *dir,
struct buffer_head *parent_fe_bh,
struct ocfs2_alloc_context *ac,
+ u64 *suballoc_loc,
u16 *suballoc_bit,
u64 *fe_blkno)
{
int status;
- unsigned int num_bits;
- u64 bg_blkno;
+ struct ocfs2_suballoc_result res;
mlog_entry_void();
@@ -1800,23 +2036,22 @@ int ocfs2_claim_new_inode(struct ocfs2_super *osb,
ocfs2_init_inode_ac_group(dir, parent_fe_bh, ac);
- status = ocfs2_claim_suballoc_bits(osb,
- ac,
+ status = ocfs2_claim_suballoc_bits(ac,
handle,
1,
1,
- suballoc_bit,
- &num_bits,
- &bg_blkno);
+ &res);
if (status < 0) {
mlog_errno(status);
goto bail;
}
- atomic_inc(&osb->alloc_stats.bg_allocs);
+ atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
- BUG_ON(num_bits != 1);
+ BUG_ON(res.sr_bits != 1);
- *fe_blkno = bg_blkno + (u64) (*suballoc_bit);
+ *suballoc_loc = res.sr_bg_blkno;
+ *suballoc_bit = res.sr_bit_offset;
+ *fe_blkno = res.sr_blkno;
ac->ac_bits_given++;
ocfs2_save_inode_ac_group(dir, ac);
status = 0;
@@ -1886,8 +2121,7 @@ static inline void ocfs2_block_to_cluster_group(struct inode *inode,
* contig. allocation, set to '1' to indicate we can deal with extents
* of any size.
*/
-int __ocfs2_claim_clusters(struct ocfs2_super *osb,
- handle_t *handle,
+int __ocfs2_claim_clusters(handle_t *handle,
struct ocfs2_alloc_context *ac,
u32 min_clusters,
u32 max_clusters,
@@ -1896,8 +2130,8 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
{
int status;
unsigned int bits_wanted = max_clusters;
- u64 bg_blkno = 0;
- u16 bg_bit_off;
+ struct ocfs2_suballoc_result res = { .sr_blkno = 0, };
+ struct ocfs2_super *osb = OCFS2_SB(ac->ac_inode->i_sb);
mlog_entry_void();
@@ -1907,6 +2141,8 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
&& ac->ac_which != OCFS2_AC_USE_MAIN);
if (ac->ac_which == OCFS2_AC_USE_LOCAL) {
+ WARN_ON(min_clusters > 1);
+
status = ocfs2_claim_local_alloc_bits(osb,
handle,
ac,
@@ -1929,20 +2165,19 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
if (bits_wanted > (osb->bitmap_cpg - 1))
bits_wanted = osb->bitmap_cpg - 1;
- status = ocfs2_claim_suballoc_bits(osb,
- ac,
+ status = ocfs2_claim_suballoc_bits(ac,
handle,
bits_wanted,
min_clusters,
- &bg_bit_off,
- num_clusters,
- &bg_blkno);
+ &res);
if (!status) {
+ BUG_ON(res.sr_blkno); /* cluster alloc can't set */
*cluster_start =
ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
- bg_blkno,
- bg_bit_off);
+ res.sr_bg_blkno,
+ res.sr_bit_offset);
atomic_inc(&osb->alloc_stats.bitmap_data);
+ *num_clusters = res.sr_bits;
}
}
if (status < 0) {
@@ -1958,8 +2193,7 @@ bail:
return status;
}
-int ocfs2_claim_clusters(struct ocfs2_super *osb,
- handle_t *handle,
+int ocfs2_claim_clusters(handle_t *handle,
struct ocfs2_alloc_context *ac,
u32 min_clusters,
u32 *cluster_start,
@@ -1967,7 +2201,7 @@ int ocfs2_claim_clusters(struct ocfs2_super *osb,
{
unsigned int bits_wanted = ac->ac_bits_wanted - ac->ac_bits_given;
- return __ocfs2_claim_clusters(osb, handle, ac, min_clusters,
+ return __ocfs2_claim_clusters(handle, ac, min_clusters,
bits_wanted, cluster_start, num_clusters);
}
@@ -2023,9 +2257,7 @@ static int ocfs2_block_group_clear_bits(handle_t *handle,
if (undo_fn)
jbd_unlock_bh_state(group_bh);
- status = ocfs2_journal_dirty(handle, group_bh);
- if (status < 0)
- mlog_errno(status);
+ ocfs2_journal_dirty(handle, group_bh);
bail:
return status;
}
@@ -2092,12 +2324,7 @@ static int _ocfs2_free_suballoc_bits(handle_t *handle,
count);
tmp_used = le32_to_cpu(fe->id1.bitmap1.i_used);
fe->id1.bitmap1.i_used = cpu_to_le32(tmp_used - count);
-
- status = ocfs2_journal_dirty(handle, alloc_bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
+ ocfs2_journal_dirty(handle, alloc_bh);
bail:
brelse(group_bh);
@@ -2126,6 +2353,8 @@ int ocfs2_free_dinode(handle_t *handle,
u16 bit = le16_to_cpu(di->i_suballoc_bit);
u64 bg_blkno = ocfs2_which_suballoc_group(blk, bit);
+ if (di->i_suballoc_loc)
+ bg_blkno = le64_to_cpu(di->i_suballoc_loc);
return ocfs2_free_suballoc_bits(handle, inode_alloc_inode,
inode_alloc_bh, bit, bg_blkno, 1);
}
@@ -2395,7 +2624,7 @@ static int ocfs2_test_suballoc_bit(struct ocfs2_super *osb,
struct buffer_head *alloc_bh, u64 blkno,
u16 bit, int *res)
{
- struct ocfs2_dinode *alloc_fe;
+ struct ocfs2_dinode *alloc_di;
struct ocfs2_group_desc *group;
struct buffer_head *group_bh = NULL;
u64 bg_blkno;
@@ -2404,17 +2633,20 @@ static int ocfs2_test_suballoc_bit(struct ocfs2_super *osb,
mlog_entry("blkno: %llu bit: %u\n", (unsigned long long)blkno,
(unsigned int)bit);
- alloc_fe = (struct ocfs2_dinode *)alloc_bh->b_data;
- if ((bit + 1) > ocfs2_bits_per_group(&alloc_fe->id2.i_chain)) {
+ alloc_di = (struct ocfs2_dinode *)alloc_bh->b_data;
+ if ((bit + 1) > ocfs2_bits_per_group(&alloc_di->id2.i_chain)) {
mlog(ML_ERROR, "suballoc bit %u out of range of %u\n",
(unsigned int)bit,
- ocfs2_bits_per_group(&alloc_fe->id2.i_chain));
+ ocfs2_bits_per_group(&alloc_di->id2.i_chain));
status = -EINVAL;
goto bail;
}
- bg_blkno = ocfs2_which_suballoc_group(blkno, bit);
- status = ocfs2_read_group_descriptor(suballoc, alloc_fe, bg_blkno,
+ if (alloc_di->i_suballoc_loc)
+ bg_blkno = le64_to_cpu(alloc_di->i_suballoc_loc);
+ else
+ bg_blkno = ocfs2_which_suballoc_group(blkno, bit);
+ status = ocfs2_read_group_descriptor(suballoc, alloc_di, bg_blkno,
&group_bh);
if (status < 0) {
mlog(ML_ERROR, "read group %llu failed %d\n",
diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h
index e0f46df..a017dd3 100644
--- a/fs/ocfs2/suballoc.h
+++ b/fs/ocfs2/suballoc.h
@@ -26,13 +26,14 @@
#ifndef _CHAINALLOC_H_
#define _CHAINALLOC_H_
+struct ocfs2_suballoc_result;
typedef int (group_search_t)(struct inode *,
struct buffer_head *,
u32, /* bits_wanted */
u32, /* min_bits */
u64, /* max_block */
- u16 *, /* *bit_off */
- u16 *); /* *bits_found */
+ struct ocfs2_suballoc_result *);
+ /* found bits */
struct ocfs2_alloc_context {
struct inode *ac_inode; /* which bitmap are we allocating from? */
@@ -54,6 +55,8 @@ struct ocfs2_alloc_context {
u64 ac_last_group;
u64 ac_max_block; /* Highest block number to allocate. 0 is
is the same as ~0 - unlimited */
+
+ struct ocfs2_alloc_reservation *ac_resv;
};
void ocfs2_init_steal_slots(struct ocfs2_super *osb);
@@ -80,22 +83,21 @@ int ocfs2_reserve_clusters(struct ocfs2_super *osb,
u32 bits_wanted,
struct ocfs2_alloc_context **ac);
-int ocfs2_claim_metadata(struct ocfs2_super *osb,
- handle_t *handle,
+int ocfs2_claim_metadata(handle_t *handle,
struct ocfs2_alloc_context *ac,
u32 bits_wanted,
+ u64 *suballoc_loc,
u16 *suballoc_bit_start,
u32 *num_bits,
u64 *blkno_start);
-int ocfs2_claim_new_inode(struct ocfs2_super *osb,
- handle_t *handle,
+int ocfs2_claim_new_inode(handle_t *handle,
struct inode *dir,
struct buffer_head *parent_fe_bh,
struct ocfs2_alloc_context *ac,
+ u64 *suballoc_loc,
u16 *suballoc_bit,
u64 *fe_blkno);
-int ocfs2_claim_clusters(struct ocfs2_super *osb,
- handle_t *handle,
+int ocfs2_claim_clusters(handle_t *handle,
struct ocfs2_alloc_context *ac,
u32 min_clusters,
u32 *cluster_start,
@@ -104,8 +106,7 @@ int ocfs2_claim_clusters(struct ocfs2_super *osb,
* Use this variant of ocfs2_claim_clusters to specify a maxiumum
* number of clusters smaller than the allocation reserved.
*/
-int __ocfs2_claim_clusters(struct ocfs2_super *osb,
- handle_t *handle,
+int __ocfs2_claim_clusters(handle_t *handle,
struct ocfs2_alloc_context *ac,
u32 min_clusters,
u32 max_clusters,
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index dee0319..2c26ce2 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -94,7 +94,9 @@ struct mount_options
unsigned long mount_opt;
unsigned int atime_quantum;
signed short slot;
- unsigned int localalloc_opt;
+ int localalloc_opt;
+ unsigned int resv_level;
+ int dir_resv_level;
char cluster_stack[OCFS2_STACK_LABEL_LEN + 1];
};
@@ -176,6 +178,8 @@ enum {
Opt_noacl,
Opt_usrquota,
Opt_grpquota,
+ Opt_resv_level,
+ Opt_dir_resv_level,
Opt_err,
};
@@ -202,6 +206,8 @@ static const match_table_t tokens = {
{Opt_noacl, "noacl"},
{Opt_usrquota, "usrquota"},
{Opt_grpquota, "grpquota"},
+ {Opt_resv_level, "resv_level=%u"},
+ {Opt_dir_resv_level, "dir_resv_level=%u"},
{Opt_err, NULL}
};
@@ -932,12 +938,16 @@ static void ocfs2_disable_quotas(struct ocfs2_super *osb)
int type;
struct inode *inode;
struct super_block *sb = osb->sb;
+ struct ocfs2_mem_dqinfo *oinfo;
/* We mostly ignore errors in this function because there's not much
* we can do when we see them */
for (type = 0; type < MAXQUOTAS; type++) {
if (!sb_has_quota_loaded(sb, type))
continue;
+ /* Cancel periodic syncing before we grab dqonoff_mutex */
+ oinfo = sb_dqinfo(sb, type)->dqi_priv;
+ cancel_delayed_work_sync(&oinfo->dqi_sync_work);
inode = igrab(sb->s_dquot.files[type]);
/* Turn off quotas. This will remove all dquot structures from
* memory and so they will be automatically synced to global
@@ -1028,8 +1038,14 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
osb->s_atime_quantum = parsed_options.atime_quantum;
osb->preferred_slot = parsed_options.slot;
osb->osb_commit_interval = parsed_options.commit_interval;
- osb->local_alloc_default_bits = ocfs2_megabytes_to_clusters(sb, parsed_options.localalloc_opt);
- osb->local_alloc_bits = osb->local_alloc_default_bits;
+
+ ocfs2_la_set_sizes(osb, parsed_options.localalloc_opt);
+ osb->osb_resv_level = parsed_options.resv_level;
+ osb->osb_dir_resv_level = parsed_options.resv_level;
+ if (parsed_options.dir_resv_level == -1)
+ osb->osb_dir_resv_level = parsed_options.resv_level;
+ else
+ osb->osb_dir_resv_level = parsed_options.dir_resv_level;
status = ocfs2_verify_userspace_stack(osb, &parsed_options);
if (status)
@@ -1285,11 +1301,13 @@ static int ocfs2_parse_options(struct super_block *sb,
options ? options : "(none)");
mopt->commit_interval = 0;
- mopt->mount_opt = 0;
+ mopt->mount_opt = OCFS2_MOUNT_NOINTR;
mopt->atime_quantum = OCFS2_DEFAULT_ATIME_QUANTUM;
mopt->slot = OCFS2_INVALID_SLOT;
- mopt->localalloc_opt = OCFS2_DEFAULT_LOCAL_ALLOC_SIZE;
+ mopt->localalloc_opt = -1;
mopt->cluster_stack[0] = '\0';
+ mopt->resv_level = OCFS2_DEFAULT_RESV_LEVEL;
+ mopt->dir_resv_level = -1;
if (!options) {
status = 1;
@@ -1380,7 +1398,7 @@ static int ocfs2_parse_options(struct super_block *sb,
status = 0;
goto bail;
}
- if (option >= 0 && (option <= ocfs2_local_alloc_size(sb) * 8))
+ if (option >= 0)
mopt->localalloc_opt = option;
break;
case Opt_localflocks:
@@ -1433,6 +1451,28 @@ static int ocfs2_parse_options(struct super_block *sb,
mopt->mount_opt |= OCFS2_MOUNT_NO_POSIX_ACL;
mopt->mount_opt &= ~OCFS2_MOUNT_POSIX_ACL;
break;
+ case Opt_resv_level:
+ if (is_remount)
+ break;
+ if (match_int(&args[0], &option)) {
+ status = 0;
+ goto bail;
+ }
+ if (option >= OCFS2_MIN_RESV_LEVEL &&
+ option < OCFS2_MAX_RESV_LEVEL)
+ mopt->resv_level = option;
+ break;
+ case Opt_dir_resv_level:
+ if (is_remount)
+ break;
+ if (match_int(&args[0], &option)) {
+ status = 0;
+ goto bail;
+ }
+ if (option >= OCFS2_MIN_RESV_LEVEL &&
+ option < OCFS2_MAX_RESV_LEVEL)
+ mopt->dir_resv_level = option;
+ break;
default:
mlog(ML_ERROR,
"Unrecognized mount option \"%s\" "
@@ -1487,7 +1527,7 @@ static int ocfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
(unsigned) (osb->osb_commit_interval / HZ));
local_alloc_megs = osb->local_alloc_bits >> (20 - osb->s_clustersize_bits);
- if (local_alloc_megs != OCFS2_DEFAULT_LOCAL_ALLOC_SIZE)
+ if (local_alloc_megs != ocfs2_la_default_mb(osb))
seq_printf(s, ",localalloc=%d", local_alloc_megs);
if (opts & OCFS2_MOUNT_LOCALFLOCKS)
@@ -1514,6 +1554,12 @@ static int ocfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
else
seq_printf(s, ",noacl");
+ if (osb->osb_resv_level != OCFS2_DEFAULT_RESV_LEVEL)
+ seq_printf(s, ",resv_level=%d", osb->osb_resv_level);
+
+ if (osb->osb_dir_resv_level != osb->osb_resv_level)
+ seq_printf(s, ",dir_resv_level=%d", osb->osb_resv_level);
+
return 0;
}
@@ -1688,6 +1734,8 @@ static void ocfs2_inode_init_once(void *data)
oi->ip_blkno = 0ULL;
oi->ip_clusters = 0;
+ ocfs2_resv_init_once(&oi->ip_la_data_resv);
+
ocfs2_lock_res_init_once(&oi->ip_rw_lockres);
ocfs2_lock_res_init_once(&oi->ip_inode_lockres);
ocfs2_lock_res_init_once(&oi->ip_open_lockres);
@@ -2042,6 +2090,12 @@ static int ocfs2_initialize_super(struct super_block *sb,
init_waitqueue_head(&osb->osb_mount_event);
+ status = ocfs2_resmap_init(osb, &osb->osb_la_resmap);
+ if (status) {
+ mlog_errno(status);
+ goto bail;
+ }
+
osb->vol_label = kmalloc(OCFS2_MAX_VOL_LABEL_LEN, GFP_KERNEL);
if (!osb->vol_label) {
mlog(ML_ERROR, "unable to alloc vol label\n");
@@ -2224,9 +2278,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
}
osb->bitmap_blkno = OCFS2_I(inode)->ip_blkno;
+ osb->osb_clusters_at_boot = OCFS2_I(inode)->ip_clusters;
iput(inode);
- osb->bitmap_cpg = ocfs2_group_bitmap_size(sb) * 8;
+ osb->bitmap_cpg = ocfs2_group_bitmap_size(sb, 0,
+ osb->s_feature_incompat) * 8;
status = ocfs2_init_slot_info(osb);
if (status < 0) {
@@ -2509,5 +2565,25 @@ void __ocfs2_abort(struct super_block* sb,
ocfs2_handle_error(sb);
}
+/*
+ * Void signal blockers, because in-kernel sigprocmask() only fails
+ * when SIG_* is wrong.
+ */
+void ocfs2_block_signals(sigset_t *oldset)
+{
+ int rc;
+ sigset_t blocked;
+
+ sigfillset(&blocked);
+ rc = sigprocmask(SIG_BLOCK, &blocked, oldset);
+ BUG_ON(rc);
+}
+
+void ocfs2_unblock_signals(sigset_t *oldset)
+{
+ int rc = sigprocmask(SIG_SETMASK, oldset, NULL);
+ BUG_ON(rc);
+}
+
module_init(ocfs2_init);
module_exit(ocfs2_exit);
diff --git a/fs/ocfs2/super.h b/fs/ocfs2/super.h
index 783f527..40c7de0 100644
--- a/fs/ocfs2/super.h
+++ b/fs/ocfs2/super.h
@@ -45,4 +45,11 @@ void __ocfs2_abort(struct super_block *sb,
#define ocfs2_abort(sb, fmt, args...) __ocfs2_abort(sb, __PRETTY_FUNCTION__, fmt, ##args)
+/*
+ * Void signal blockers, because in-kernel sigprocmask() only fails
+ * when SIG_* is wrong.
+ */
+void ocfs2_block_signals(sigset_t *oldset);
+void ocfs2_unblock_signals(sigset_t *oldset);
+
#endif /* OCFS2_SUPER_H */
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 3e77730..98ee6c4 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -79,6 +79,7 @@ struct ocfs2_xattr_set_ctxt {
struct ocfs2_alloc_context *meta_ac;
struct ocfs2_alloc_context *data_ac;
struct ocfs2_cached_dealloc_ctxt dealloc;
+ int set_abort;
};
#define OCFS2_XATTR_ROOT_SIZE (sizeof(struct ocfs2_xattr_def_value_root))
@@ -739,11 +740,7 @@ static int ocfs2_xattr_extend_allocation(struct inode *inode,
goto leave;
}
- status = ocfs2_journal_dirty(handle, vb->vb_bh);
- if (status < 0) {
- mlog_errno(status);
- goto leave;
- }
+ ocfs2_journal_dirty(handle, vb->vb_bh);
clusters_to_add -= le32_to_cpu(vb->vb_xv->xr_clusters) - prev_clusters;
@@ -786,12 +783,7 @@ static int __ocfs2_remove_xattr_range(struct inode *inode,
}
le32_add_cpu(&vb->vb_xv->xr_clusters, -len);
-
- ret = ocfs2_journal_dirty(handle, vb->vb_bh);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
+ ocfs2_journal_dirty(handle, vb->vb_bh);
if (ext_flags & OCFS2_EXT_REFCOUNTED)
ret = ocfs2_decrease_refcount(inode, handle,
@@ -1374,11 +1366,7 @@ static int __ocfs2_xattr_set_value_outside(struct inode *inode,
memset(bh->b_data + cp_len, 0,
blocksize - cp_len);
- ret = ocfs2_journal_dirty(handle, bh);
- if (ret < 0) {
- mlog_errno(ret);
- goto out;
- }
+ ocfs2_journal_dirty(handle, bh);
brelse(bh);
bh = NULL;
@@ -2148,15 +2136,19 @@ alloc_value:
orig_clusters = ocfs2_xa_value_clusters(loc);
rc = ocfs2_xa_value_truncate(loc, xi->xi_value_len, ctxt);
if (rc < 0) {
- /*
- * If we tried to grow an existing external value,
- * ocfs2_xa_cleanuP-value_truncate() is going to
- * let it stand. We have to restore its original
- * value size.
- */
- loc->xl_entry->xe_value_size = orig_value_size;
+ ctxt->set_abort = 1;
ocfs2_xa_cleanup_value_truncate(loc, "growing",
orig_clusters);
+ /*
+ * If we were growing an existing value,
+ * ocfs2_xa_cleanup_value_truncate() won't remove
+ * the entry. We need to restore the original value
+ * size.
+ */
+ if (loc->xl_entry) {
+ BUG_ON(!orig_value_size);
+ loc->xl_entry->xe_value_size = orig_value_size;
+ }
mlog_errno(rc);
}
}
@@ -2479,7 +2471,10 @@ static int ocfs2_xattr_free_block(struct inode *inode,
xb = (struct ocfs2_xattr_block *)blk_bh->b_data;
blk = le64_to_cpu(xb->xb_blkno);
bit = le16_to_cpu(xb->xb_suballoc_bit);
- bg_blkno = ocfs2_which_suballoc_group(blk, bit);
+ if (xb->xb_suballoc_loc)
+ bg_blkno = le64_to_cpu(xb->xb_suballoc_loc);
+ else
+ bg_blkno = ocfs2_which_suballoc_group(blk, bit);
xb_alloc_inode = ocfs2_get_system_file_inode(osb,
EXTENT_ALLOC_SYSTEM_INODE,
@@ -2594,9 +2589,7 @@ int ocfs2_xattr_remove(struct inode *inode, struct buffer_head *di_bh)
di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
spin_unlock(&oi->ip_lock);
- ret = ocfs2_journal_dirty(handle, di_bh);
- if (ret < 0)
- mlog_errno(ret);
+ ocfs2_journal_dirty(handle, di_bh);
out_commit:
ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
out:
@@ -2724,9 +2717,7 @@ static int ocfs2_xattr_ibody_init(struct inode *inode,
di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
spin_unlock(&oi->ip_lock);
- ret = ocfs2_journal_dirty(ctxt->handle, di_bh);
- if (ret < 0)
- mlog_errno(ret);
+ ocfs2_journal_dirty(ctxt->handle, di_bh);
out:
return ret;
@@ -2846,9 +2837,8 @@ static int ocfs2_create_xattr_block(struct inode *inode,
int ret;
u16 suballoc_bit_start;
u32 num_got;
- u64 first_blkno;
+ u64 suballoc_loc, first_blkno;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)inode_bh->b_data;
- struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct buffer_head *new_bh = NULL;
struct ocfs2_xattr_block *xblk;
@@ -2859,9 +2849,9 @@ static int ocfs2_create_xattr_block(struct inode *inode,
goto end;
}
- ret = ocfs2_claim_metadata(osb, ctxt->handle, ctxt->meta_ac, 1,
- &suballoc_bit_start, &num_got,
- &first_blkno);
+ ret = ocfs2_claim_metadata(ctxt->handle, ctxt->meta_ac, 1,
+ &suballoc_loc, &suballoc_bit_start,
+ &num_got, &first_blkno);
if (ret < 0) {
mlog_errno(ret);
goto end;
@@ -2883,8 +2873,10 @@ static int ocfs2_create_xattr_block(struct inode *inode,
memset(xblk, 0, inode->i_sb->s_blocksize);
strcpy((void *)xblk, OCFS2_XATTR_BLOCK_SIGNATURE);
xblk->xb_suballoc_slot = cpu_to_le16(ctxt->meta_ac->ac_alloc_slot);
+ xblk->xb_suballoc_loc = cpu_to_le64(suballoc_loc);
xblk->xb_suballoc_bit = cpu_to_le16(suballoc_bit_start);
- xblk->xb_fs_generation = cpu_to_le32(osb->fs_generation);
+ xblk->xb_fs_generation =
+ cpu_to_le32(OCFS2_SB(inode->i_sb)->fs_generation);
xblk->xb_blkno = cpu_to_le64(first_blkno);
if (indexed) {
struct ocfs2_xattr_tree_root *xr = &xblk->xb_attrs.xb_root;
@@ -2956,7 +2948,7 @@ static int ocfs2_xattr_block_set(struct inode *inode,
ret = ocfs2_xa_set(&loc, xi, ctxt);
if (!ret)
xs->here = loc.xl_entry;
- else if (ret != -ENOSPC)
+ else if ((ret != -ENOSPC) || ctxt->set_abort)
goto end;
else {
ret = ocfs2_xattr_create_index_block(inode, xs, ctxt);
@@ -3312,14 +3304,13 @@ static int __ocfs2_xattr_set_handle(struct inode *inode,
goto out;
}
- ret = ocfs2_extend_trans(ctxt->handle, credits +
- ctxt->handle->h_buffer_credits);
+ ret = ocfs2_extend_trans(ctxt->handle, credits);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_xattr_block_set(inode, xi, xbs, ctxt);
- } else if (ret == -ENOSPC) {
+ } else if ((ret == -ENOSPC) && !ctxt->set_abort) {
if (di->i_xattr_loc && !xbs->xattr_bh) {
ret = ocfs2_xattr_block_find(inode,
xi->xi_name_index,
@@ -3343,8 +3334,7 @@ static int __ocfs2_xattr_set_handle(struct inode *inode,
goto out;
}
- ret = ocfs2_extend_trans(ctxt->handle, credits +
- ctxt->handle->h_buffer_credits);
+ ret = ocfs2_extend_trans(ctxt->handle, credits);
if (ret) {
mlog_errno(ret);
goto out;
@@ -3378,8 +3368,7 @@ static int __ocfs2_xattr_set_handle(struct inode *inode,
goto out;
}
- ret = ocfs2_extend_trans(ctxt->handle, credits +
- ctxt->handle->h_buffer_credits);
+ ret = ocfs2_extend_trans(ctxt->handle, credits);
if (ret) {
mlog_errno(ret);
goto out;
@@ -4249,7 +4238,6 @@ static int ocfs2_xattr_create_index_block(struct inode *inode,
u32 bit_off, len;
u64 blkno;
handle_t *handle = ctxt->handle;
- struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct buffer_head *xb_bh = xs->xattr_bh;
struct ocfs2_xattr_block *xb =
@@ -4277,7 +4265,7 @@ static int ocfs2_xattr_create_index_block(struct inode *inode,
goto out;
}
- ret = __ocfs2_claim_clusters(osb, handle, ctxt->data_ac,
+ ret = __ocfs2_claim_clusters(handle, ctxt->data_ac,
1, 1, &bit_off, &len);
if (ret) {
mlog_errno(ret);
@@ -4887,8 +4875,7 @@ static int ocfs2_mv_xattr_buckets(struct inode *inode, handle_t *handle,
* We need to update the first bucket of the old extent and all
* the buckets going to the new extent.
*/
- credits = ((num_buckets + 1) * blks_per_bucket) +
- handle->h_buffer_credits;
+ credits = ((num_buckets + 1) * blks_per_bucket);
ret = ocfs2_extend_trans(handle, credits);
if (ret) {
mlog_errno(ret);
@@ -4958,7 +4945,7 @@ static int ocfs2_divide_xattr_cluster(struct inode *inode,
u32 *first_hash)
{
u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
- int ret, credits = 2 * blk_per_bucket + handle->h_buffer_credits;
+ int ret, credits = 2 * blk_per_bucket;
BUG_ON(OCFS2_XATTR_BUCKET_SIZE < OCFS2_SB(inode->i_sb)->s_clustersize);
@@ -5099,7 +5086,7 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode,
goto leave;
}
- ret = __ocfs2_claim_clusters(osb, handle, ctxt->data_ac, 1,
+ ret = __ocfs2_claim_clusters(handle, ctxt->data_ac, 1,
clusters_to_add, &bit_off, &num_bits);
if (ret < 0) {
if (ret != -ENOSPC)
@@ -5153,9 +5140,7 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode,
goto leave;
}
- ret = ocfs2_journal_dirty(handle, root_bh);
- if (ret < 0)
- mlog_errno(ret);
+ ocfs2_journal_dirty(handle, root_bh);
leave:
return ret;
@@ -5200,8 +5185,7 @@ static int ocfs2_extend_xattr_bucket(struct inode *inode,
* existing bucket. Then we add the last existing bucket, the
* new bucket, and the first bucket (3 * blk_per_bucket).
*/
- credits = (end_blk - target_blk) + (3 * blk_per_bucket) +
- handle->h_buffer_credits;
+ credits = (end_blk - target_blk) + (3 * blk_per_bucket);
ret = ocfs2_extend_trans(handle, credits);
if (ret) {
mlog_errno(ret);
@@ -5477,12 +5461,7 @@ static int ocfs2_rm_xattr_cluster(struct inode *inode,
}
le32_add_cpu(&xb->xb_attrs.xb_root.xt_clusters, -len);
-
- ret = ocfs2_journal_dirty(handle, root_bh);
- if (ret) {
- mlog_errno(ret);
- goto out_commit;
- }
+ ocfs2_journal_dirty(handle, root_bh);
ret = ocfs2_truncate_log_append(osb, handle, blkno, len);
if (ret)
@@ -6935,7 +6914,7 @@ static int ocfs2_reflink_xattr_rec(struct inode *inode,
goto out;
}
- ret = ocfs2_claim_clusters(osb, handle, data_ac,
+ ret = ocfs2_claim_clusters(handle, data_ac,
len, &p_cluster, &num_clusters);
if (ret) {
mlog_errno(ret);
diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c
index c82af6a..b44bb83 100644
--- a/fs/omfs/inode.c
+++ b/fs/omfs/inode.c
@@ -3,7 +3,6 @@
* Copyright (C) 2006 Bob Copeland <me@bobcopeland.com>
* Released under GPL v2.
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
diff --git a/fs/proc/array.c b/fs/proc/array.c
index e51f2ec..885ab55 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -81,7 +81,6 @@
#include <linux/pid_namespace.h>
#include <linux/ptrace.h>
#include <linux/tracehook.h>
-#include <linux/swapops.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
@@ -495,7 +494,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
rsslim,
mm ? mm->start_code : 0,
mm ? mm->end_code : 0,
- (permitted && mm) ? task->stack_start : 0,
+ (permitted && mm) ? mm->start_stack : 0,
esp,
eip,
/* The signal information here is obsolete.
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 8418fcc..c7f9f23 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -730,6 +730,7 @@ out_no_task:
static const struct file_operations proc_info_file_operations = {
.read = proc_info_read,
+ .llseek = generic_file_llseek,
};
static int proc_single_show(struct seq_file *m, void *v)
@@ -987,6 +988,7 @@ out_no_task:
static const struct file_operations proc_environ_operations = {
.read = environ_read,
+ .llseek = generic_file_llseek,
};
static ssize_t oom_adjust_read(struct file *file, char __user *buf,
@@ -1060,6 +1062,7 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf,
static const struct file_operations proc_oom_adjust_operations = {
.read = oom_adjust_read,
.write = oom_adjust_write,
+ .llseek = generic_file_llseek,
};
#ifdef CONFIG_AUDITSYSCALL
@@ -1131,6 +1134,7 @@ out_free_page:
static const struct file_operations proc_loginuid_operations = {
.read = proc_loginuid_read,
.write = proc_loginuid_write,
+ .llseek = generic_file_llseek,
};
static ssize_t proc_sessionid_read(struct file * file, char __user * buf,
@@ -1151,6 +1155,7 @@ static ssize_t proc_sessionid_read(struct file * file, char __user * buf,
static const struct file_operations proc_sessionid_operations = {
.read = proc_sessionid_read,
+ .llseek = generic_file_llseek,
};
#endif
@@ -1202,6 +1207,7 @@ static ssize_t proc_fault_inject_write(struct file * file,
static const struct file_operations proc_fault_inject_operations = {
.read = proc_fault_inject_read,
.write = proc_fault_inject_write,
+ .llseek = generic_file_llseek,
};
#endif
@@ -1943,7 +1949,7 @@ static ssize_t proc_fdinfo_read(struct file *file, char __user *buf,
}
static const struct file_operations proc_fdinfo_file_operations = {
- .open = nonseekable_open,
+ .open = nonseekable_open,
.read = proc_fdinfo_read,
};
@@ -2227,6 +2233,7 @@ out_no_task:
static const struct file_operations proc_pid_attr_operations = {
.read = proc_pid_attr_read,
.write = proc_pid_attr_write,
+ .llseek = generic_file_llseek,
};
static const struct pid_entry attr_dir_stuff[] = {
@@ -2347,6 +2354,7 @@ static ssize_t proc_coredump_filter_write(struct file *file,
static const struct file_operations proc_coredump_filter_operations = {
.read = proc_coredump_filter_read,
.write = proc_coredump_filter_write,
+ .llseek = generic_file_llseek,
};
#endif
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index d35b232..aea8502 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -232,9 +232,9 @@ static long proc_reg_unlocked_ioctl(struct file *file, unsigned int cmd, unsigne
if (rv == -ENOIOCTLCMD)
rv = -EINVAL;
} else if (ioctl) {
- lock_kernel();
+ WARN_ONCE(1, "Procfs ioctl handlers must use unlocked_ioctl, "
+ "%pf will be called without the Bkl held\n", ioctl);
rv = ioctl(file->f_path.dentry->d_inode, file, cmd, arg);
- unlock_kernel();
}
pde_users_dec(pde);
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index 19979a2..c837a77 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -558,6 +558,7 @@ static int open_kcore(struct inode *inode, struct file *filp)
static const struct file_operations proc_kcore_operations = {
.read = read_kcore,
.open = open_kcore,
+ .llseek = generic_file_llseek,
};
#ifdef CONFIG_MEMORY_HOTPLUG
diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c
index cfe90a4..bd4b5a7 100644
--- a/fs/proc/kmsg.c
+++ b/fs/proc/kmsg.c
@@ -53,6 +53,7 @@ static const struct file_operations proc_kmsg_operations = {
.poll = kmsg_poll,
.open = kmsg_open,
.release = kmsg_release,
+ .llseek = generic_file_llseek,
};
static int __init proc_kmsg_init(void)
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 0705534..47f5b14 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -247,25 +247,6 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
} else if (vma->vm_start <= mm->start_stack &&
vma->vm_end >= mm->start_stack) {
name = "[stack]";
- } else {
- unsigned long stack_start;
- struct proc_maps_private *pmp;
-
- pmp = m->private;
- stack_start = pmp->task->stack_start;
-
- if (vma->vm_start <= stack_start &&
- vma->vm_end >= stack_start) {
- pad_len_spaces(m, len);
- seq_printf(m,
- "[threadstack:%08lx]",
-#ifdef CONFIG_STACK_GROWSUP
- vma->vm_end - stack_start
-#else
- stack_start - vma->vm_start
-#endif
- );
- }
}
} else {
name = "[vdso]";
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 9fbc99e..91c817f 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -163,6 +163,7 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer,
static const struct file_operations proc_vmcore_operations = {
.read = read_vmcore,
+ .llseek = generic_file_llseek,
};
static struct vmcore* __init get_new_element(void)
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 788b580..655a4c5 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -82,7 +82,7 @@
/*
* There are three quota SMP locks. dq_list_lock protects all lists with quotas
- * and quota formats, dqstats structure containing statistics about the lists
+ * and quota formats.
* dq_data_lock protects data from dq_dqb and also mem_dqinfo structures and
* also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes.
* i_blocks and i_bytes updates itself are guarded by i_lock acquired directly
@@ -132,7 +132,9 @@ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock);
__cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
EXPORT_SYMBOL(dq_data_lock);
+#if defined(CONFIG_QUOTA_DEBUG) || defined(CONFIG_PRINT_QUOTA_WARNING)
static char *quotatypes[] = INITQFNAMES;
+#endif
static struct quota_format_type *quota_formats; /* List of registered formats */
static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
@@ -226,6 +228,10 @@ static struct hlist_head *dquot_hash;
struct dqstats dqstats;
EXPORT_SYMBOL(dqstats);
+#ifdef CONFIG_SMP
+struct dqstats *dqstats_pcpu;
+EXPORT_SYMBOL(dqstats_pcpu);
+#endif
static qsize_t inode_get_rsv_space(struct inode *inode);
static void __dquot_initialize(struct inode *inode, int type);
@@ -273,7 +279,7 @@ static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb,
static inline void put_dquot_last(struct dquot *dquot)
{
list_add_tail(&dquot->dq_free, &free_dquots);
- dqstats.free_dquots++;
+ dqstats_inc(DQST_FREE_DQUOTS);
}
static inline void remove_free_dquot(struct dquot *dquot)
@@ -281,7 +287,7 @@ static inline void remove_free_dquot(struct dquot *dquot)
if (list_empty(&dquot->dq_free))
return;
list_del_init(&dquot->dq_free);
- dqstats.free_dquots--;
+ dqstats_dec(DQST_FREE_DQUOTS);
}
static inline void put_inuse(struct dquot *dquot)
@@ -289,12 +295,12 @@ static inline void put_inuse(struct dquot *dquot)
/* We add to the back of inuse list so we don't have to restart
* when traversing this list and we block */
list_add_tail(&dquot->dq_inuse, &inuse_list);
- dqstats.allocated_dquots++;
+ dqstats_inc(DQST_ALLOC_DQUOTS);
}
static inline void remove_inuse(struct dquot *dquot)
{
- dqstats.allocated_dquots--;
+ dqstats_dec(DQST_ALLOC_DQUOTS);
list_del(&dquot->dq_inuse);
}
/*
@@ -317,14 +323,23 @@ static inline int mark_dquot_dirty(struct dquot *dquot)
return dquot->dq_sb->dq_op->mark_dirty(dquot);
}
+/* Mark dquot dirty in atomic manner, and return it's old dirty flag state */
int dquot_mark_dquot_dirty(struct dquot *dquot)
{
+ int ret = 1;
+
+ /* If quota is dirty already, we don't have to acquire dq_list_lock */
+ if (test_bit(DQ_MOD_B, &dquot->dq_flags))
+ return 1;
+
spin_lock(&dq_list_lock);
- if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags))
+ if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) {
list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)->
info[dquot->dq_type].dqi_dirty_list);
+ ret = 0;
+ }
spin_unlock(&dq_list_lock);
- return 0;
+ return ret;
}
EXPORT_SYMBOL(dquot_mark_dquot_dirty);
@@ -550,8 +565,8 @@ int dquot_scan_active(struct super_block *sb,
continue;
/* Now we have active dquot so we can just increase use count */
atomic_inc(&dquot->dq_count);
- dqstats.lookups++;
spin_unlock(&dq_list_lock);
+ dqstats_inc(DQST_LOOKUPS);
dqput(old_dquot);
old_dquot = dquot;
ret = fn(dquot, priv);
@@ -596,8 +611,8 @@ int vfs_quota_sync(struct super_block *sb, int type, int wait)
* holding reference so we can safely just increase
* use count */
atomic_inc(&dquot->dq_count);
- dqstats.lookups++;
spin_unlock(&dq_list_lock);
+ dqstats_inc(DQST_LOOKUPS);
sb->dq_op->write_dquot(dquot);
dqput(dquot);
spin_lock(&dq_list_lock);
@@ -609,9 +624,7 @@ int vfs_quota_sync(struct super_block *sb, int type, int wait)
if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt)
&& info_dirty(&dqopt->info[cnt]))
sb->dq_op->write_info(sb, cnt);
- spin_lock(&dq_list_lock);
- dqstats.syncs++;
- spin_unlock(&dq_list_lock);
+ dqstats_inc(DQST_SYNCS);
mutex_unlock(&dqopt->dqonoff_mutex);
if (!wait || (sb_dqopt(sb)->flags & DQUOT_QUOTA_SYS_FILE))
@@ -663,6 +676,22 @@ static void prune_dqcache(int count)
}
}
+static int dqstats_read(unsigned int type)
+{
+ int count = 0;
+#ifdef CONFIG_SMP
+ int cpu;
+ for_each_possible_cpu(cpu)
+ count += per_cpu_ptr(dqstats_pcpu, cpu)->stat[type];
+ /* Statistics reading is racy, but absolute accuracy isn't required */
+ if (count < 0)
+ count = 0;
+#else
+ count = dqstats.stat[type];
+#endif
+ return count;
+}
+
/*
* This is called from kswapd when we think we need some
* more memory
@@ -675,7 +704,7 @@ static int shrink_dqcache_memory(int nr, gfp_t gfp_mask)
prune_dqcache(nr);
spin_unlock(&dq_list_lock);
}
- return (dqstats.free_dquots / 100) * sysctl_vfs_cache_pressure;
+ return (dqstats_read(DQST_FREE_DQUOTS)/100) * sysctl_vfs_cache_pressure;
}
static struct shrinker dqcache_shrinker = {
@@ -703,10 +732,7 @@ void dqput(struct dquot *dquot)
BUG();
}
#endif
-
- spin_lock(&dq_list_lock);
- dqstats.drops++;
- spin_unlock(&dq_list_lock);
+ dqstats_inc(DQST_DROPS);
we_slept:
spin_lock(&dq_list_lock);
if (atomic_read(&dquot->dq_count) > 1) {
@@ -823,15 +849,15 @@ we_slept:
put_inuse(dquot);
/* hash it first so it can be found */
insert_dquot_hash(dquot);
- dqstats.lookups++;
spin_unlock(&dq_list_lock);
+ dqstats_inc(DQST_LOOKUPS);
} else {
if (!atomic_read(&dquot->dq_count))
remove_free_dquot(dquot);
atomic_inc(&dquot->dq_count);
- dqstats.cache_hits++;
- dqstats.lookups++;
spin_unlock(&dq_list_lock);
+ dqstats_inc(DQST_CACHE_HITS);
+ dqstats_inc(DQST_LOOKUPS);
}
/* Wait for dq_lock - after this we know that either dquot_release() is
* already finished or it will be canceled due to dq_count > 1 test */
@@ -1677,16 +1703,19 @@ EXPORT_SYMBOL(dquot_free_inode);
/*
* Transfer the number of inode and blocks from one diskquota to an other.
+ * On success, dquot references in transfer_to are consumed and references
+ * to original dquots that need to be released are placed there. On failure,
+ * references are kept untouched.
*
* This operation can block, but only after everything is updated
* A transaction must be started when entering this function.
+ *
*/
-static int __dquot_transfer(struct inode *inode, qid_t *chid, unsigned long mask)
+int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
{
qsize_t space, cur_space;
qsize_t rsv_space = 0;
- struct dquot *transfer_from[MAXQUOTAS];
- struct dquot *transfer_to[MAXQUOTAS];
+ struct dquot *transfer_from[MAXQUOTAS] = {};
int cnt, ret = 0;
char warntype_to[MAXQUOTAS];
char warntype_from_inodes[MAXQUOTAS], warntype_from_space[MAXQUOTAS];
@@ -1696,19 +1725,12 @@ static int __dquot_transfer(struct inode *inode, qid_t *chid, unsigned long mask
if (IS_NOQUOTA(inode))
return 0;
/* Initialize the arrays */
- for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- transfer_from[cnt] = NULL;
- transfer_to[cnt] = NULL;
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++)
warntype_to[cnt] = QUOTA_NL_NOWARN;
- }
- for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (mask & (1 << cnt))
- transfer_to[cnt] = dqget(inode->i_sb, chid[cnt], cnt);
- }
down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
if (IS_NOQUOTA(inode)) { /* File without quota accounting? */
up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
- goto put_all;
+ return 0;
}
spin_lock(&dq_data_lock);
cur_space = inode_get_bytes(inode);
@@ -1760,47 +1782,41 @@ static int __dquot_transfer(struct inode *inode, qid_t *chid, unsigned long mask
mark_all_dquot_dirty(transfer_from);
mark_all_dquot_dirty(transfer_to);
- /* The reference we got is transferred to the inode */
+ /* Pass back references to put */
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
- transfer_to[cnt] = NULL;
-warn_put_all:
+ transfer_to[cnt] = transfer_from[cnt];
+warn:
flush_warnings(transfer_to, warntype_to);
flush_warnings(transfer_from, warntype_from_inodes);
flush_warnings(transfer_from, warntype_from_space);
-put_all:
- dqput_all(transfer_from);
- dqput_all(transfer_to);
return ret;
over_quota:
spin_unlock(&dq_data_lock);
up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
- /* Clear dquot pointers we don't want to dqput() */
- for (cnt = 0; cnt < MAXQUOTAS; cnt++)
- transfer_from[cnt] = NULL;
- goto warn_put_all;
+ goto warn;
}
+EXPORT_SYMBOL(__dquot_transfer);
/* Wrapper for transferring ownership of an inode for uid/gid only
* Called from FSXXX_setattr()
*/
int dquot_transfer(struct inode *inode, struct iattr *iattr)
{
- qid_t chid[MAXQUOTAS];
- unsigned long mask = 0;
+ struct dquot *transfer_to[MAXQUOTAS] = {};
+ struct super_block *sb = inode->i_sb;
+ int ret;
- if (iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) {
- mask |= 1 << USRQUOTA;
- chid[USRQUOTA] = iattr->ia_uid;
- }
- if (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid) {
- mask |= 1 << GRPQUOTA;
- chid[GRPQUOTA] = iattr->ia_gid;
- }
- if (sb_any_quota_active(inode->i_sb) && !IS_NOQUOTA(inode)) {
- dquot_initialize(inode);
- return __dquot_transfer(inode, chid, mask);
- }
- return 0;
+ if (!sb_any_quota_active(sb) || IS_NOQUOTA(inode))
+ return 0;
+
+ if (iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid)
+ transfer_to[USRQUOTA] = dqget(sb, iattr->ia_uid, USRQUOTA);
+ if (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)
+ transfer_to[GRPQUOTA] = dqget(sb, iattr->ia_uid, GRPQUOTA);
+
+ ret = __dquot_transfer(inode, transfer_to);
+ dqput_all(transfer_to);
+ return ret;
}
EXPORT_SYMBOL(dquot_transfer);
@@ -2275,25 +2291,30 @@ static inline qsize_t stoqb(qsize_t space)
}
/* Generic routine for getting common part of quota structure */
-static void do_get_dqblk(struct dquot *dquot, struct if_dqblk *di)
+static void do_get_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
{
struct mem_dqblk *dm = &dquot->dq_dqb;
+ memset(di, 0, sizeof(*di));
+ di->d_version = FS_DQUOT_VERSION;
+ di->d_flags = dquot->dq_type == USRQUOTA ?
+ XFS_USER_QUOTA : XFS_GROUP_QUOTA;
+ di->d_id = dquot->dq_id;
+
spin_lock(&dq_data_lock);
- di->dqb_bhardlimit = stoqb(dm->dqb_bhardlimit);
- di->dqb_bsoftlimit = stoqb(dm->dqb_bsoftlimit);
- di->dqb_curspace = dm->dqb_curspace + dm->dqb_rsvspace;
- di->dqb_ihardlimit = dm->dqb_ihardlimit;
- di->dqb_isoftlimit = dm->dqb_isoftlimit;
- di->dqb_curinodes = dm->dqb_curinodes;
- di->dqb_btime = dm->dqb_btime;
- di->dqb_itime = dm->dqb_itime;
- di->dqb_valid = QIF_ALL;
+ di->d_blk_hardlimit = stoqb(dm->dqb_bhardlimit);
+ di->d_blk_softlimit = stoqb(dm->dqb_bsoftlimit);
+ di->d_ino_hardlimit = dm->dqb_ihardlimit;
+ di->d_ino_softlimit = dm->dqb_isoftlimit;
+ di->d_bcount = dm->dqb_curspace + dm->dqb_rsvspace;
+ di->d_icount = dm->dqb_curinodes;
+ di->d_btimer = dm->dqb_btime;
+ di->d_itimer = dm->dqb_itime;
spin_unlock(&dq_data_lock);
}
int vfs_get_dqblk(struct super_block *sb, int type, qid_t id,
- struct if_dqblk *di)
+ struct fs_disk_quota *di)
{
struct dquot *dquot;
@@ -2307,51 +2328,70 @@ int vfs_get_dqblk(struct super_block *sb, int type, qid_t id,
}
EXPORT_SYMBOL(vfs_get_dqblk);
+#define VFS_FS_DQ_MASK \
+ (FS_DQ_BCOUNT | FS_DQ_BSOFT | FS_DQ_BHARD | \
+ FS_DQ_ICOUNT | FS_DQ_ISOFT | FS_DQ_IHARD | \
+ FS_DQ_BTIMER | FS_DQ_ITIMER)
+
/* Generic routine for setting common part of quota structure */
-static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
+static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
{
struct mem_dqblk *dm = &dquot->dq_dqb;
int check_blim = 0, check_ilim = 0;
struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type];
- if ((di->dqb_valid & QIF_BLIMITS &&
- (di->dqb_bhardlimit > dqi->dqi_maxblimit ||
- di->dqb_bsoftlimit > dqi->dqi_maxblimit)) ||
- (di->dqb_valid & QIF_ILIMITS &&
- (di->dqb_ihardlimit > dqi->dqi_maxilimit ||
- di->dqb_isoftlimit > dqi->dqi_maxilimit)))
+ if (di->d_fieldmask & ~VFS_FS_DQ_MASK)
+ return -EINVAL;
+
+ if (((di->d_fieldmask & FS_DQ_BSOFT) &&
+ (di->d_blk_softlimit > dqi->dqi_maxblimit)) ||
+ ((di->d_fieldmask & FS_DQ_BHARD) &&
+ (di->d_blk_hardlimit > dqi->dqi_maxblimit)) ||
+ ((di->d_fieldmask & FS_DQ_ISOFT) &&
+ (di->d_ino_softlimit > dqi->dqi_maxilimit)) ||
+ ((di->d_fieldmask & FS_DQ_IHARD) &&
+ (di->d_ino_hardlimit > dqi->dqi_maxilimit)))
return -ERANGE;
spin_lock(&dq_data_lock);
- if (di->dqb_valid & QIF_SPACE) {
- dm->dqb_curspace = di->dqb_curspace - dm->dqb_rsvspace;
+ if (di->d_fieldmask & FS_DQ_BCOUNT) {
+ dm->dqb_curspace = di->d_bcount - dm->dqb_rsvspace;
check_blim = 1;
set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
}
- if (di->dqb_valid & QIF_BLIMITS) {
- dm->dqb_bsoftlimit = qbtos(di->dqb_bsoftlimit);
- dm->dqb_bhardlimit = qbtos(di->dqb_bhardlimit);
+
+ if (di->d_fieldmask & FS_DQ_BSOFT)
+ dm->dqb_bsoftlimit = qbtos(di->d_blk_softlimit);
+ if (di->d_fieldmask & FS_DQ_BHARD)
+ dm->dqb_bhardlimit = qbtos(di->d_blk_hardlimit);
+ if (di->d_fieldmask & (FS_DQ_BSOFT | FS_DQ_BHARD)) {
check_blim = 1;
set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
}
- if (di->dqb_valid & QIF_INODES) {
- dm->dqb_curinodes = di->dqb_curinodes;
+
+ if (di->d_fieldmask & FS_DQ_ICOUNT) {
+ dm->dqb_curinodes = di->d_icount;
check_ilim = 1;
set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
}
- if (di->dqb_valid & QIF_ILIMITS) {
- dm->dqb_isoftlimit = di->dqb_isoftlimit;
- dm->dqb_ihardlimit = di->dqb_ihardlimit;
+
+ if (di->d_fieldmask & FS_DQ_ISOFT)
+ dm->dqb_isoftlimit = di->d_ino_softlimit;
+ if (di->d_fieldmask & FS_DQ_IHARD)
+ dm->dqb_ihardlimit = di->d_ino_hardlimit;
+ if (di->d_fieldmask & (FS_DQ_ISOFT | FS_DQ_IHARD)) {
check_ilim = 1;
set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
}
- if (di->dqb_valid & QIF_BTIME) {
- dm->dqb_btime = di->dqb_btime;
+
+ if (di->d_fieldmask & FS_DQ_BTIMER) {
+ dm->dqb_btime = di->d_btimer;
check_blim = 1;
set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
}
- if (di->dqb_valid & QIF_ITIME) {
- dm->dqb_itime = di->dqb_itime;
+
+ if (di->d_fieldmask & FS_DQ_ITIMER) {
+ dm->dqb_itime = di->d_itimer;
check_ilim = 1;
set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
}
@@ -2361,7 +2401,7 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
dm->dqb_curspace < dm->dqb_bsoftlimit) {
dm->dqb_btime = 0;
clear_bit(DQ_BLKS_B, &dquot->dq_flags);
- } else if (!(di->dqb_valid & QIF_BTIME))
+ } else if (!(di->d_fieldmask & FS_DQ_BTIMER))
/* Set grace only if user hasn't provided his own... */
dm->dqb_btime = get_seconds() + dqi->dqi_bgrace;
}
@@ -2370,7 +2410,7 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
dm->dqb_curinodes < dm->dqb_isoftlimit) {
dm->dqb_itime = 0;
clear_bit(DQ_INODES_B, &dquot->dq_flags);
- } else if (!(di->dqb_valid & QIF_ITIME))
+ } else if (!(di->d_fieldmask & FS_DQ_ITIMER))
/* Set grace only if user hasn't provided his own... */
dm->dqb_itime = get_seconds() + dqi->dqi_igrace;
}
@@ -2386,7 +2426,7 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
}
int vfs_set_dqblk(struct super_block *sb, int type, qid_t id,
- struct if_dqblk *di)
+ struct fs_disk_quota *di)
{
struct dquot *dquot;
int rc;
@@ -2465,62 +2505,74 @@ const struct quotactl_ops vfs_quotactl_ops = {
.set_dqblk = vfs_set_dqblk
};
+
+static int do_proc_dqstats(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+#ifdef CONFIG_SMP
+ /* Update global table */
+ unsigned int type = (int *)table->data - dqstats.stat;
+ dqstats.stat[type] = dqstats_read(type);
+#endif
+ return proc_dointvec(table, write, buffer, lenp, ppos);
+}
+
static ctl_table fs_dqstats_table[] = {
{
.procname = "lookups",
- .data = &dqstats.lookups,
+ .data = &dqstats.stat[DQST_LOOKUPS],
.maxlen = sizeof(int),
.mode = 0444,
- .proc_handler = proc_dointvec,
+ .proc_handler = do_proc_dqstats,
},
{
.procname = "drops",
- .data = &dqstats.drops,
+ .data = &dqstats.stat[DQST_DROPS],
.maxlen = sizeof(int),
.mode = 0444,
- .proc_handler = proc_dointvec,
+ .proc_handler = do_proc_dqstats,
},
{
.procname = "reads",
- .data = &dqstats.reads,
+ .data = &dqstats.stat[DQST_READS],
.maxlen = sizeof(int),
.mode = 0444,
- .proc_handler = proc_dointvec,
+ .proc_handler = do_proc_dqstats,
},
{
.procname = "writes",
- .data = &dqstats.writes,
+ .data = &dqstats.stat[DQST_WRITES],
.maxlen = sizeof(int),
.mode = 0444,
- .proc_handler = proc_dointvec,
+ .proc_handler = do_proc_dqstats,
},
{
.procname = "cache_hits",
- .data = &dqstats.cache_hits,
+ .data = &dqstats.stat[DQST_CACHE_HITS],
.maxlen = sizeof(int),
.mode = 0444,
- .proc_handler = proc_dointvec,
+ .proc_handler = do_proc_dqstats,
},
{
.procname = "allocated_dquots",
- .data = &dqstats.allocated_dquots,
+ .data = &dqstats.stat[DQST_ALLOC_DQUOTS],
.maxlen = sizeof(int),
.mode = 0444,
- .proc_handler = proc_dointvec,
+ .proc_handler = do_proc_dqstats,
},
{
.procname = "free_dquots",
- .data = &dqstats.free_dquots,
+ .data = &dqstats.stat[DQST_FREE_DQUOTS],
.maxlen = sizeof(int),
.mode = 0444,
- .proc_handler = proc_dointvec,
+ .proc_handler = do_proc_dqstats,
},
{
.procname = "syncs",
- .data = &dqstats.syncs,
+ .data = &dqstats.stat[DQST_SYNCS],
.maxlen = sizeof(int),
.mode = 0444,
- .proc_handler = proc_dointvec,
+ .proc_handler = do_proc_dqstats,
},
#ifdef CONFIG_PRINT_QUOTA_WARNING
{
@@ -2572,6 +2624,13 @@ static int __init dquot_init(void)
if (!dquot_hash)
panic("Cannot create dquot hash table");
+#ifdef CONFIG_SMP
+ dqstats_pcpu = alloc_percpu(struct dqstats);
+ if (!dqstats_pcpu)
+ panic("Cannot create dquot stats table");
+#endif
+ memset(&dqstats, 0, sizeof(struct dqstats));
+
/* Find power-of-two hlist_heads which can fit into allocation */
nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
dq_hash_bits = 0;
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index 95388f9..cfc7882 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -113,8 +113,6 @@ static int quota_getinfo(struct super_block *sb, int type, void __user *addr)
struct if_dqinfo info;
int ret;
- if (!sb_has_quota_active(sb, type))
- return -ESRCH;
if (!sb->s_qcop->get_info)
return -ENOSYS;
ret = sb->s_qcop->get_info(sb, type, &info);
@@ -129,43 +127,80 @@ static int quota_setinfo(struct super_block *sb, int type, void __user *addr)
if (copy_from_user(&info, addr, sizeof(info)))
return -EFAULT;
- if (!sb_has_quota_active(sb, type))
- return -ESRCH;
if (!sb->s_qcop->set_info)
return -ENOSYS;
return sb->s_qcop->set_info(sb, type, &info);
}
+static void copy_to_if_dqblk(struct if_dqblk *dst, struct fs_disk_quota *src)
+{
+ dst->dqb_bhardlimit = src->d_blk_hardlimit;
+ dst->dqb_bsoftlimit = src->d_blk_softlimit;
+ dst->dqb_curspace = src->d_bcount;
+ dst->dqb_ihardlimit = src->d_ino_hardlimit;
+ dst->dqb_isoftlimit = src->d_ino_softlimit;
+ dst->dqb_curinodes = src->d_icount;
+ dst->dqb_btime = src->d_btimer;
+ dst->dqb_itime = src->d_itimer;
+ dst->dqb_valid = QIF_ALL;
+}
+
static int quota_getquota(struct super_block *sb, int type, qid_t id,
void __user *addr)
{
+ struct fs_disk_quota fdq;
struct if_dqblk idq;
int ret;
- if (!sb_has_quota_active(sb, type))
- return -ESRCH;
if (!sb->s_qcop->get_dqblk)
return -ENOSYS;
- ret = sb->s_qcop->get_dqblk(sb, type, id, &idq);
+ ret = sb->s_qcop->get_dqblk(sb, type, id, &fdq);
if (ret)
return ret;
+ copy_to_if_dqblk(&idq, &fdq);
if (copy_to_user(addr, &idq, sizeof(idq)))
return -EFAULT;
return 0;
}
+static void copy_from_if_dqblk(struct fs_disk_quota *dst, struct if_dqblk *src)
+{
+ dst->d_blk_hardlimit = src->dqb_bhardlimit;
+ dst->d_blk_softlimit = src->dqb_bsoftlimit;
+ dst->d_bcount = src->dqb_curspace;
+ dst->d_ino_hardlimit = src->dqb_ihardlimit;
+ dst->d_ino_softlimit = src->dqb_isoftlimit;
+ dst->d_icount = src->dqb_curinodes;
+ dst->d_btimer = src->dqb_btime;
+ dst->d_itimer = src->dqb_itime;
+
+ dst->d_fieldmask = 0;
+ if (src->dqb_valid & QIF_BLIMITS)
+ dst->d_fieldmask |= FS_DQ_BSOFT | FS_DQ_BHARD;
+ if (src->dqb_valid & QIF_SPACE)
+ dst->d_fieldmask |= FS_DQ_BCOUNT;
+ if (src->dqb_valid & QIF_ILIMITS)
+ dst->d_fieldmask |= FS_DQ_ISOFT | FS_DQ_IHARD;
+ if (src->dqb_valid & QIF_INODES)
+ dst->d_fieldmask |= FS_DQ_ICOUNT;
+ if (src->dqb_valid & QIF_BTIME)
+ dst->d_fieldmask |= FS_DQ_BTIMER;
+ if (src->dqb_valid & QIF_ITIME)
+ dst->d_fieldmask |= FS_DQ_ITIMER;
+}
+
static int quota_setquota(struct super_block *sb, int type, qid_t id,
void __user *addr)
{
+ struct fs_disk_quota fdq;
struct if_dqblk idq;
if (copy_from_user(&idq, addr, sizeof(idq)))
return -EFAULT;
- if (!sb_has_quota_active(sb, type))
- return -ESRCH;
if (!sb->s_qcop->set_dqblk)
return -ENOSYS;
- return sb->s_qcop->set_dqblk(sb, type, id, &idq);
+ copy_from_if_dqblk(&fdq, &idq);
+ return sb->s_qcop->set_dqblk(sb, type, id, &fdq);
}
static int quota_setxstate(struct super_block *sb, int cmd, void __user *addr)
@@ -199,9 +234,9 @@ static int quota_setxquota(struct super_block *sb, int type, qid_t id,
if (copy_from_user(&fdq, addr, sizeof(fdq)))
return -EFAULT;
- if (!sb->s_qcop->set_xquota)
+ if (!sb->s_qcop->set_dqblk)
return -ENOSYS;
- return sb->s_qcop->set_xquota(sb, type, id, &fdq);
+ return sb->s_qcop->set_dqblk(sb, type, id, &fdq);
}
static int quota_getxquota(struct super_block *sb, int type, qid_t id,
@@ -210,9 +245,9 @@ static int quota_getxquota(struct super_block *sb, int type, qid_t id,
struct fs_disk_quota fdq;
int ret;
- if (!sb->s_qcop->get_xquota)
+ if (!sb->s_qcop->get_dqblk)
return -ENOSYS;
- ret = sb->s_qcop->get_xquota(sb, type, id, &fdq);
+ ret = sb->s_qcop->get_dqblk(sb, type, id, &fdq);
if (!ret && copy_to_user(addr, &fdq, sizeof(fdq)))
return -EFAULT;
return ret;
diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c
index f81f4bc..24f0340 100644
--- a/fs/quota/quota_tree.c
+++ b/fs/quota/quota_tree.c
@@ -60,9 +60,17 @@ static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
{
struct super_block *sb = info->dqi_sb;
+ ssize_t ret;
- return sb->s_op->quota_write(sb, info->dqi_type, buf,
+ ret = sb->s_op->quota_write(sb, info->dqi_type, buf,
info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
+ if (ret != info->dqi_usable_bs) {
+ q_warn(KERN_WARNING "VFS: dquota write failed on "
+ "dev %s\n", sb->s_id);
+ if (ret >= 0)
+ ret = -EIO;
+ }
+ return ret;
}
/* Remove empty block from list and return it */
@@ -152,7 +160,7 @@ static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0);
/* No matter whether write succeeds block is out of list */
if (write_blk(info, blk, buf) < 0)
- printk(KERN_ERR
+ q_warn(KERN_ERR
"VFS: Can't write block (%u) with free entries.\n",
blk);
return 0;
@@ -244,7 +252,7 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
if (le16_to_cpu(dh->dqdh_entries) + 1 >= qtree_dqstr_in_blk(info)) {
*err = remove_free_dqentry(info, buf, blk);
if (*err < 0) {
- printk(KERN_ERR "VFS: find_free_dqentry(): Can't "
+ q_warn(KERN_ERR "VFS: find_free_dqentry(): Can't "
"remove block (%u) from entry free list.\n",
blk);
goto out_buf;
@@ -268,7 +276,7 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
#endif
*err = write_blk(info, blk, buf);
if (*err < 0) {
- printk(KERN_ERR "VFS: find_free_dqentry(): Can't write quota "
+ q_warn(KERN_ERR "VFS: find_free_dqentry(): Can't write quota "
"data block %u.\n", blk);
goto out_buf;
}
@@ -303,7 +311,7 @@ static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
} else {
ret = read_blk(info, *treeblk, buf);
if (ret < 0) {
- printk(KERN_ERR "VFS: Can't read tree quota block "
+ q_warn(KERN_ERR "VFS: Can't read tree quota block "
"%u.\n", *treeblk);
goto out_buf;
}
@@ -365,7 +373,7 @@ int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
if (!dquot->dq_off) {
ret = dq_insert_tree(info, dquot);
if (ret < 0) {
- printk(KERN_ERR "VFS: Error %zd occurred while "
+ q_warn(KERN_ERR "VFS: Error %zd occurred while "
"creating quota.\n", ret);
kfree(ddquot);
return ret;
@@ -377,14 +385,14 @@ int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size,
dquot->dq_off);
if (ret != info->dqi_entry_size) {
- printk(KERN_WARNING "VFS: dquota write failed on dev %s\n",
+ q_warn(KERN_WARNING "VFS: dquota write failed on dev %s\n",
sb->s_id);
if (ret >= 0)
ret = -ENOSPC;
} else {
ret = 0;
}
- dqstats.writes++;
+ dqstats_inc(DQST_WRITES);
kfree(ddquot);
return ret;
@@ -402,14 +410,14 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
if (!buf)
return -ENOMEM;
if (dquot->dq_off >> info->dqi_blocksize_bits != blk) {
- printk(KERN_ERR "VFS: Quota structure has offset to other "
+ q_warn(KERN_ERR "VFS: Quota structure has offset to other "
"block (%u) than it should (%u).\n", blk,
(uint)(dquot->dq_off >> info->dqi_blocksize_bits));
goto out_buf;
}
ret = read_blk(info, blk, buf);
if (ret < 0) {
- printk(KERN_ERR "VFS: Can't read quota data block %u\n", blk);
+ q_warn(KERN_ERR "VFS: Can't read quota data block %u\n", blk);
goto out_buf;
}
dh = (struct qt_disk_dqdbheader *)buf;
@@ -419,7 +427,7 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
if (ret >= 0)
ret = put_free_dqblk(info, buf, blk);
if (ret < 0) {
- printk(KERN_ERR "VFS: Can't move quota data block (%u) "
+ q_warn(KERN_ERR "VFS: Can't move quota data block (%u) "
"to free list.\n", blk);
goto out_buf;
}
@@ -432,14 +440,14 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
/* Insert will write block itself */
ret = insert_free_dqentry(info, buf, blk);
if (ret < 0) {
- printk(KERN_ERR "VFS: Can't insert quota data "
+ q_warn(KERN_ERR "VFS: Can't insert quota data "
"block (%u) to free entry list.\n", blk);
goto out_buf;
}
} else {
ret = write_blk(info, blk, buf);
if (ret < 0) {
- printk(KERN_ERR "VFS: Can't write quota data "
+ q_warn(KERN_ERR "VFS: Can't write quota data "
"block %u\n", blk);
goto out_buf;
}
@@ -464,7 +472,7 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
return -ENOMEM;
ret = read_blk(info, *blk, buf);
if (ret < 0) {
- printk(KERN_ERR "VFS: Can't read quota data block %u\n", *blk);
+ q_warn(KERN_ERR "VFS: Can't read quota data block %u\n", *blk);
goto out_buf;
}
newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
@@ -488,7 +496,7 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
} else {
ret = write_blk(info, *blk, buf);
if (ret < 0)
- printk(KERN_ERR "VFS: Can't write quota tree "
+ q_warn(KERN_ERR "VFS: Can't write quota tree "
"block %u.\n", *blk);
}
}
@@ -521,7 +529,7 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
return -ENOMEM;
ret = read_blk(info, blk, buf);
if (ret < 0) {
- printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk);
+ q_warn(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk);
goto out_buf;
}
ddquot = buf + sizeof(struct qt_disk_dqdbheader);
@@ -531,7 +539,7 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
ddquot += info->dqi_entry_size;
}
if (i == qtree_dqstr_in_blk(info)) {
- printk(KERN_ERR "VFS: Quota for id %u referenced "
+ q_warn(KERN_ERR "VFS: Quota for id %u referenced "
"but not present.\n", dquot->dq_id);
ret = -EIO;
goto out_buf;
@@ -556,7 +564,7 @@ static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
return -ENOMEM;
ret = read_blk(info, blk, buf);
if (ret < 0) {
- printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk);
+ q_warn(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk);
goto out_buf;
}
ret = 0;
@@ -599,7 +607,7 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
offset = find_dqentry(info, dquot);
if (offset <= 0) { /* Entry not present? */
if (offset < 0)
- printk(KERN_ERR "VFS: Can't read quota "
+ q_warn(KERN_ERR "VFS: Can't read quota "
"structure for id %u.\n", dquot->dq_id);
dquot->dq_off = 0;
set_bit(DQ_FAKE_B, &dquot->dq_flags);
@@ -617,7 +625,7 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
if (ret != info->dqi_entry_size) {
if (ret >= 0)
ret = -EIO;
- printk(KERN_ERR "VFS: Error while reading quota "
+ q_warn(KERN_ERR "VFS: Error while reading quota "
"structure for id %u.\n", dquot->dq_id);
set_bit(DQ_FAKE_B, &dquot->dq_flags);
memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
@@ -634,7 +642,7 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
spin_unlock(&dq_data_lock);
kfree(ddquot);
out:
- dqstats.reads++;
+ dqstats_inc(DQST_READS);
return ret;
}
EXPORT_SYMBOL(qtree_read_dquot);
diff --git a/fs/quota/quota_tree.h b/fs/quota/quota_tree.h
index a1ab8db..ccc3e71 100644
--- a/fs/quota/quota_tree.h
+++ b/fs/quota/quota_tree.h
@@ -22,4 +22,10 @@ struct qt_disk_dqdbheader {
#define QT_TREEOFF 1 /* Offset of tree in file in blocks */
+#define q_warn(fmt, args...) \
+do { \
+ if (printk_ratelimit()) \
+ printk(fmt, ## args); \
+} while(0)
+
#endif /* _LINUX_QUOTAIO_TREE_H */
diff --git a/fs/quota/quota_v1.c b/fs/quota/quota_v1.c
index 2ae757e..4af344c 100644
--- a/fs/quota/quota_v1.c
+++ b/fs/quota/quota_v1.c
@@ -71,7 +71,7 @@ static int v1_read_dqblk(struct dquot *dquot)
dquot->dq_dqb.dqb_ihardlimit == 0 &&
dquot->dq_dqb.dqb_isoftlimit == 0)
set_bit(DQ_FAKE_B, &dquot->dq_flags);
- dqstats.reads++;
+ dqstats_inc(DQST_READS);
return 0;
}
@@ -104,7 +104,7 @@ static int v1_commit_dqblk(struct dquot *dquot)
ret = 0;
out:
- dqstats.writes++;
+ dqstats_inc(DQST_WRITES);
return ret;
}
diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c
index e3da02f..135206a 100644
--- a/fs/quota/quota_v2.c
+++ b/fs/quota/quota_v2.c
@@ -63,7 +63,7 @@ static int v2_read_header(struct super_block *sb, int type,
size = sb->s_op->quota_read(sb, type, (char *)dqhead,
sizeof(struct v2_disk_dqheader), 0);
if (size != sizeof(struct v2_disk_dqheader)) {
- printk(KERN_WARNING "quota_v2: Failed header read:"
+ q_warn(KERN_WARNING "quota_v2: Failed header read:"
" expected=%zd got=%zd\n",
sizeof(struct v2_disk_dqheader), size);
return 0;
@@ -106,7 +106,7 @@ static int v2_read_file_info(struct super_block *sb, int type)
size = sb->s_op->quota_read(sb, type, (char *)&dinfo,
sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF);
if (size != sizeof(struct v2_disk_dqinfo)) {
- printk(KERN_WARNING "quota_v2: Can't read info structure on device %s.\n",
+ q_warn(KERN_WARNING "quota_v2: Can't read info structure on device %s.\n",
sb->s_id);
return -1;
}
@@ -167,7 +167,7 @@ static int v2_write_file_info(struct super_block *sb, int type)
size = sb->s_op->quota_write(sb, type, (char *)&dinfo,
sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF);
if (size != sizeof(struct v2_disk_dqinfo)) {
- printk(KERN_WARNING "Can't write info structure on device %s.\n",
+ q_warn(KERN_WARNING "Can't write info structure on device %s.\n",
sb->s_id);
return -1;
}
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index c948534..f47cd21 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -214,7 +214,7 @@ static int ramfs_parse_options(char *data, struct ramfs_mount_opts *opts)
return 0;
}
-static int ramfs_fill_super(struct super_block * sb, void * data, int silent)
+int ramfs_fill_super(struct super_block *sb, void *data, int silent)
{
struct ramfs_fs_info *fsi;
struct inode *inode = NULL;
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index dc2c65e..0f22fda 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -3076,9 +3076,10 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
ia_valid = attr->ia_valid &= ~(ATTR_KILL_SUID|ATTR_KILL_SGID);
depth = reiserfs_write_lock_once(inode->i_sb);
- if (attr->ia_valid & ATTR_SIZE) {
+ if (is_quota_modification(inode, attr))
dquot_initialize(inode);
+ if (attr->ia_valid & ATTR_SIZE) {
/* version 2 items will be caught by the s_maxbytes check
** done for us in vmtruncate
*/
diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
index e9d2935..4e321f7 100644
--- a/fs/sysfs/bin.c
+++ b/fs/sysfs/bin.c
@@ -46,9 +46,9 @@ struct bin_buffer {
};
static int
-fill_read(struct dentry *dentry, char *buffer, loff_t off, size_t count)
+fill_read(struct file *file, char *buffer, loff_t off, size_t count)
{
- struct sysfs_dirent *attr_sd = dentry->d_fsdata;
+ struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
struct bin_attribute *attr = attr_sd->s_bin_attr.bin_attr;
struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
int rc;
@@ -59,7 +59,7 @@ fill_read(struct dentry *dentry, char *buffer, loff_t off, size_t count)
rc = -EIO;
if (attr->read)
- rc = attr->read(kobj, attr, buffer, off, count);
+ rc = attr->read(file, kobj, attr, buffer, off, count);
sysfs_put_active(attr_sd);
@@ -70,8 +70,7 @@ static ssize_t
read(struct file *file, char __user *userbuf, size_t bytes, loff_t *off)
{
struct bin_buffer *bb = file->private_data;
- struct dentry *dentry = file->f_path.dentry;
- int size = dentry->d_inode->i_size;
+ int size = file->f_path.dentry->d_inode->i_size;
loff_t offs = *off;
int count = min_t(size_t, bytes, PAGE_SIZE);
char *temp;
@@ -92,7 +91,7 @@ read(struct file *file, char __user *userbuf, size_t bytes, loff_t *off)
mutex_lock(&bb->mutex);
- count = fill_read(dentry, bb->buffer, offs, count);
+ count = fill_read(file, bb->buffer, offs, count);
if (count < 0) {
mutex_unlock(&bb->mutex);
goto out_free;
@@ -117,9 +116,9 @@ read(struct file *file, char __user *userbuf, size_t bytes, loff_t *off)
}
static int
-flush_write(struct dentry *dentry, char *buffer, loff_t offset, size_t count)
+flush_write(struct file *file, char *buffer, loff_t offset, size_t count)
{
- struct sysfs_dirent *attr_sd = dentry->d_fsdata;
+ struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
struct bin_attribute *attr = attr_sd->s_bin_attr.bin_attr;
struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
int rc;
@@ -130,7 +129,7 @@ flush_write(struct dentry *dentry, char *buffer, loff_t offset, size_t count)
rc = -EIO;
if (attr->write)
- rc = attr->write(kobj, attr, buffer, offset, count);
+ rc = attr->write(file, kobj, attr, buffer, offset, count);
sysfs_put_active(attr_sd);
@@ -141,8 +140,7 @@ static ssize_t write(struct file *file, const char __user *userbuf,
size_t bytes, loff_t *off)
{
struct bin_buffer *bb = file->private_data;
- struct dentry *dentry = file->f_path.dentry;
- int size = dentry->d_inode->i_size;
+ int size = file->f_path.dentry->d_inode->i_size;
loff_t offs = *off;
int count = min_t(size_t, bytes, PAGE_SIZE);
char *temp;
@@ -165,7 +163,7 @@ static ssize_t write(struct file *file, const char __user *userbuf,
memcpy(bb->buffer, temp, count);
- count = flush_write(dentry, bb->buffer, offs, count);
+ count = flush_write(file, bb->buffer, offs, count);
mutex_unlock(&bb->mutex);
if (count > 0)
@@ -363,7 +361,7 @@ static int mmap(struct file *file, struct vm_area_struct *vma)
if (!attr->mmap)
goto out_put;
- rc = attr->mmap(kobj, attr, vma);
+ rc = attr->mmap(file, kobj, attr, vma);
if (rc)
goto out_put;
@@ -501,7 +499,7 @@ int sysfs_create_bin_file(struct kobject *kobj,
void sysfs_remove_bin_file(struct kobject *kobj,
const struct bin_attribute *attr)
{
- sysfs_hash_and_remove(kobj->sd, attr->attr.name);
+ sysfs_hash_and_remove(kobj->sd, NULL, attr->attr.name);
}
EXPORT_SYMBOL_GPL(sysfs_create_bin_file);
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index 5907178..7e54bac 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -380,7 +380,7 @@ int __sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd)
{
struct sysfs_inode_attrs *ps_iattr;
- if (sysfs_find_dirent(acxt->parent_sd, sd->s_name))
+ if (sysfs_find_dirent(acxt->parent_sd, sd->s_ns, sd->s_name))
return -EEXIST;
sd->s_parent = sysfs_get(acxt->parent_sd);
@@ -533,13 +533,17 @@ void sysfs_addrm_finish(struct sysfs_addrm_cxt *acxt)
* Pointer to sysfs_dirent if found, NULL if not.
*/
struct sysfs_dirent *sysfs_find_dirent(struct sysfs_dirent *parent_sd,
+ const void *ns,
const unsigned char *name)
{
struct sysfs_dirent *sd;
- for (sd = parent_sd->s_dir.children; sd; sd = sd->s_sibling)
+ for (sd = parent_sd->s_dir.children; sd; sd = sd->s_sibling) {
+ if (ns && sd->s_ns && (sd->s_ns != ns))
+ continue;
if (!strcmp(sd->s_name, name))
return sd;
+ }
return NULL;
}
@@ -558,12 +562,13 @@ struct sysfs_dirent *sysfs_find_dirent(struct sysfs_dirent *parent_sd,
* Pointer to sysfs_dirent if found, NULL if not.
*/
struct sysfs_dirent *sysfs_get_dirent(struct sysfs_dirent *parent_sd,
+ const void *ns,
const unsigned char *name)
{
struct sysfs_dirent *sd;
mutex_lock(&sysfs_mutex);
- sd = sysfs_find_dirent(parent_sd, name);
+ sd = sysfs_find_dirent(parent_sd, ns, name);
sysfs_get(sd);
mutex_unlock(&sysfs_mutex);
@@ -572,7 +577,8 @@ struct sysfs_dirent *sysfs_get_dirent(struct sysfs_dirent *parent_sd,
EXPORT_SYMBOL_GPL(sysfs_get_dirent);
static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
- const char *name, struct sysfs_dirent **p_sd)
+ enum kobj_ns_type type, const void *ns, const char *name,
+ struct sysfs_dirent **p_sd)
{
umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO;
struct sysfs_addrm_cxt acxt;
@@ -583,6 +589,9 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
if (!sd)
return -ENOMEM;
+
+ sd->s_flags |= (type << SYSFS_NS_TYPE_SHIFT);
+ sd->s_ns = ns;
sd->s_dir.kobj = kobj;
/* link in */
@@ -601,7 +610,33 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
int sysfs_create_subdir(struct kobject *kobj, const char *name,
struct sysfs_dirent **p_sd)
{
- return create_dir(kobj, kobj->sd, name, p_sd);
+ return create_dir(kobj, kobj->sd,
+ KOBJ_NS_TYPE_NONE, NULL, name, p_sd);
+}
+
+/**
+ * sysfs_read_ns_type: return associated ns_type
+ * @kobj: the kobject being queried
+ *
+ * Each kobject can be tagged with exactly one namespace type
+ * (i.e. network or user). Return the ns_type associated with
+ * this object if any
+ */
+static enum kobj_ns_type sysfs_read_ns_type(struct kobject *kobj)
+{
+ const struct kobj_ns_type_operations *ops;
+ enum kobj_ns_type type;
+
+ ops = kobj_child_ns_ops(kobj);
+ if (!ops)
+ return KOBJ_NS_TYPE_NONE;
+
+ type = ops->type;
+ BUG_ON(type <= KOBJ_NS_TYPE_NONE);
+ BUG_ON(type >= KOBJ_NS_TYPES);
+ BUG_ON(!kobj_ns_type_registered(type));
+
+ return type;
}
/**
@@ -610,7 +645,9 @@ int sysfs_create_subdir(struct kobject *kobj, const char *name,
*/
int sysfs_create_dir(struct kobject * kobj)
{
+ enum kobj_ns_type type;
struct sysfs_dirent *parent_sd, *sd;
+ const void *ns = NULL;
int error = 0;
BUG_ON(!kobj);
@@ -620,7 +657,11 @@ int sysfs_create_dir(struct kobject * kobj)
else
parent_sd = &sysfs_root;
- error = create_dir(kobj, parent_sd, kobject_name(kobj), &sd);
+ if (sysfs_ns_type(parent_sd))
+ ns = kobj->ktype->namespace(kobj);
+ type = sysfs_read_ns_type(kobj);
+
+ error = create_dir(kobj, parent_sd, type, ns, kobject_name(kobj), &sd);
if (!error)
kobj->sd = sd;
return error;
@@ -630,13 +671,19 @@ static struct dentry * sysfs_lookup(struct inode *dir, struct dentry *dentry,
struct nameidata *nd)
{
struct dentry *ret = NULL;
- struct sysfs_dirent *parent_sd = dentry->d_parent->d_fsdata;
+ struct dentry *parent = dentry->d_parent;
+ struct sysfs_dirent *parent_sd = parent->d_fsdata;
struct sysfs_dirent *sd;
struct inode *inode;
+ enum kobj_ns_type type;
+ const void *ns;
mutex_lock(&sysfs_mutex);
- sd = sysfs_find_dirent(parent_sd, dentry->d_name.name);
+ type = sysfs_ns_type(parent_sd);
+ ns = sysfs_info(dir->i_sb)->ns[type];
+
+ sd = sysfs_find_dirent(parent_sd, ns, dentry->d_name.name);
/* no such entry */
if (!sd) {
@@ -735,7 +782,8 @@ void sysfs_remove_dir(struct kobject * kobj)
}
int sysfs_rename(struct sysfs_dirent *sd,
- struct sysfs_dirent *new_parent_sd, const char *new_name)
+ struct sysfs_dirent *new_parent_sd, const void *new_ns,
+ const char *new_name)
{
const char *dup_name = NULL;
int error;
@@ -743,12 +791,12 @@ int sysfs_rename(struct sysfs_dirent *sd,
mutex_lock(&sysfs_mutex);
error = 0;
- if ((sd->s_parent == new_parent_sd) &&
+ if ((sd->s_parent == new_parent_sd) && (sd->s_ns == new_ns) &&
(strcmp(sd->s_name, new_name) == 0))
goto out; /* nothing to rename */
error = -EEXIST;
- if (sysfs_find_dirent(new_parent_sd, new_name))
+ if (sysfs_find_dirent(new_parent_sd, new_ns, new_name))
goto out;
/* rename sysfs_dirent */
@@ -770,6 +818,7 @@ int sysfs_rename(struct sysfs_dirent *sd,
sd->s_parent = new_parent_sd;
sysfs_link_sibling(sd);
}
+ sd->s_ns = new_ns;
error = 0;
out:
@@ -780,19 +829,28 @@ int sysfs_rename(struct sysfs_dirent *sd,
int sysfs_rename_dir(struct kobject *kobj, const char *new_name)
{
- return sysfs_rename(kobj->sd, kobj->sd->s_parent, new_name);
+ struct sysfs_dirent *parent_sd = kobj->sd->s_parent;
+ const void *new_ns = NULL;
+
+ if (sysfs_ns_type(parent_sd))
+ new_ns = kobj->ktype->namespace(kobj);
+
+ return sysfs_rename(kobj->sd, parent_sd, new_ns, new_name);
}
int sysfs_move_dir(struct kobject *kobj, struct kobject *new_parent_kobj)
{
struct sysfs_dirent *sd = kobj->sd;
struct sysfs_dirent *new_parent_sd;
+ const void *new_ns = NULL;
BUG_ON(!sd->s_parent);
+ if (sysfs_ns_type(sd->s_parent))
+ new_ns = kobj->ktype->namespace(kobj);
new_parent_sd = new_parent_kobj && new_parent_kobj->sd ?
new_parent_kobj->sd : &sysfs_root;
- return sysfs_rename(sd, new_parent_sd, sd->s_name);
+ return sysfs_rename(sd, new_parent_sd, new_ns, sd->s_name);
}
/* Relationship between s_mode and the DT_xxx types */
@@ -807,32 +865,35 @@ static int sysfs_dir_release(struct inode *inode, struct file *filp)
return 0;
}
-static struct sysfs_dirent *sysfs_dir_pos(struct sysfs_dirent *parent_sd,
- ino_t ino, struct sysfs_dirent *pos)
+static struct sysfs_dirent *sysfs_dir_pos(const void *ns,
+ struct sysfs_dirent *parent_sd, ino_t ino, struct sysfs_dirent *pos)
{
if (pos) {
int valid = !(pos->s_flags & SYSFS_FLAG_REMOVED) &&
pos->s_parent == parent_sd &&
ino == pos->s_ino;
sysfs_put(pos);
- if (valid)
- return pos;
+ if (!valid)
+ pos = NULL;
}
- pos = NULL;
- if ((ino > 1) && (ino < INT_MAX)) {
+ if (!pos && (ino > 1) && (ino < INT_MAX)) {
pos = parent_sd->s_dir.children;
while (pos && (ino > pos->s_ino))
pos = pos->s_sibling;
}
+ while (pos && pos->s_ns && pos->s_ns != ns)
+ pos = pos->s_sibling;
return pos;
}
-static struct sysfs_dirent *sysfs_dir_next_pos(struct sysfs_dirent *parent_sd,
- ino_t ino, struct sysfs_dirent *pos)
+static struct sysfs_dirent *sysfs_dir_next_pos(const void *ns,
+ struct sysfs_dirent *parent_sd, ino_t ino, struct sysfs_dirent *pos)
{
- pos = sysfs_dir_pos(parent_sd, ino, pos);
+ pos = sysfs_dir_pos(ns, parent_sd, ino, pos);
if (pos)
pos = pos->s_sibling;
+ while (pos && pos->s_ns && pos->s_ns != ns)
+ pos = pos->s_sibling;
return pos;
}
@@ -841,8 +902,13 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
struct dentry *dentry = filp->f_path.dentry;
struct sysfs_dirent * parent_sd = dentry->d_fsdata;
struct sysfs_dirent *pos = filp->private_data;
+ enum kobj_ns_type type;
+ const void *ns;
ino_t ino;
+ type = sysfs_ns_type(parent_sd);
+ ns = sysfs_info(dentry->d_sb)->ns[type];
+
if (filp->f_pos == 0) {
ino = parent_sd->s_ino;
if (filldir(dirent, ".", 1, filp->f_pos, ino, DT_DIR) == 0)
@@ -857,9 +923,9 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
filp->f_pos++;
}
mutex_lock(&sysfs_mutex);
- for (pos = sysfs_dir_pos(parent_sd, filp->f_pos, pos);
+ for (pos = sysfs_dir_pos(ns, parent_sd, filp->f_pos, pos);
pos;
- pos = sysfs_dir_next_pos(parent_sd, filp->f_pos, pos)) {
+ pos = sysfs_dir_next_pos(ns, parent_sd, filp->f_pos, pos)) {
const char * name;
unsigned int type;
int len, ret;
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index e222b25..1beaa73 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -478,9 +478,12 @@ void sysfs_notify(struct kobject *k, const char *dir, const char *attr)
mutex_lock(&sysfs_mutex);
if (sd && dir)
- sd = sysfs_find_dirent(sd, dir);
+ /* Only directories are tagged, so no need to pass
+ * a tag explicitly.
+ */
+ sd = sysfs_find_dirent(sd, NULL, dir);
if (sd && attr)
- sd = sysfs_find_dirent(sd, attr);
+ sd = sysfs_find_dirent(sd, NULL, attr);
if (sd)
sysfs_notify_dirent(sd);
@@ -569,7 +572,7 @@ int sysfs_add_file_to_group(struct kobject *kobj,
int error;
if (group)
- dir_sd = sysfs_get_dirent(kobj->sd, group);
+ dir_sd = sysfs_get_dirent(kobj->sd, NULL, group);
else
dir_sd = sysfs_get(kobj->sd);
@@ -599,7 +602,7 @@ int sysfs_chmod_file(struct kobject *kobj, struct attribute *attr, mode_t mode)
mutex_lock(&sysfs_mutex);
rc = -ENOENT;
- sd = sysfs_find_dirent(kobj->sd, attr->name);
+ sd = sysfs_find_dirent(kobj->sd, NULL, attr->name);
if (!sd)
goto out;
@@ -624,7 +627,7 @@ EXPORT_SYMBOL_GPL(sysfs_chmod_file);
void sysfs_remove_file(struct kobject * kobj, const struct attribute * attr)
{
- sysfs_hash_and_remove(kobj->sd, attr->name);
+ sysfs_hash_and_remove(kobj->sd, NULL, attr->name);
}
void sysfs_remove_files(struct kobject * kobj, const struct attribute **ptr)
@@ -646,11 +649,11 @@ void sysfs_remove_file_from_group(struct kobject *kobj,
struct sysfs_dirent *dir_sd;
if (group)
- dir_sd = sysfs_get_dirent(kobj->sd, group);
+ dir_sd = sysfs_get_dirent(kobj->sd, NULL, group);
else
dir_sd = sysfs_get(kobj->sd);
if (dir_sd) {
- sysfs_hash_and_remove(dir_sd, attr->name);
+ sysfs_hash_and_remove(dir_sd, NULL, attr->name);
sysfs_put(dir_sd);
}
}
diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c
index fe61194..23c1e59 100644
--- a/fs/sysfs/group.c
+++ b/fs/sysfs/group.c
@@ -23,7 +23,7 @@ static void remove_files(struct sysfs_dirent *dir_sd, struct kobject *kobj,
int i;
for (i = 0, attr = grp->attrs; *attr; i++, attr++)
- sysfs_hash_and_remove(dir_sd, (*attr)->name);
+ sysfs_hash_and_remove(dir_sd, NULL, (*attr)->name);
}
static int create_files(struct sysfs_dirent *dir_sd, struct kobject *kobj,
@@ -39,7 +39,7 @@ static int create_files(struct sysfs_dirent *dir_sd, struct kobject *kobj,
* visibility. Do this by first removing then
* re-adding (if required) the file */
if (update)
- sysfs_hash_and_remove(dir_sd, (*attr)->name);
+ sysfs_hash_and_remove(dir_sd, NULL, (*attr)->name);
if (grp->is_visible) {
mode = grp->is_visible(kobj, *attr, i);
if (!mode)
@@ -132,7 +132,7 @@ void sysfs_remove_group(struct kobject * kobj,
struct sysfs_dirent *sd;
if (grp->name) {
- sd = sysfs_get_dirent(dir_sd, grp->name);
+ sd = sysfs_get_dirent(dir_sd, NULL, grp->name);
if (!sd) {
WARN(!sd, KERN_WARNING "sysfs group %p not found for "
"kobject '%s'\n", grp, kobject_name(kobj));
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index a4a0a941..bbd77e9 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -324,7 +324,7 @@ void sysfs_delete_inode(struct inode *inode)
sysfs_put(sd);
}
-int sysfs_hash_and_remove(struct sysfs_dirent *dir_sd, const char *name)
+int sysfs_hash_and_remove(struct sysfs_dirent *dir_sd, const void *ns, const char *name)
{
struct sysfs_addrm_cxt acxt;
struct sysfs_dirent *sd;
@@ -334,7 +334,9 @@ int sysfs_hash_and_remove(struct sysfs_dirent *dir_sd, const char *name)
sysfs_addrm_start(&acxt, dir_sd);
- sd = sysfs_find_dirent(dir_sd, name);
+ sd = sysfs_find_dirent(dir_sd, ns, name);
+ if (sd && (sd->s_ns != ns))
+ sd = NULL;
if (sd)
sysfs_remove_one(&acxt, sd);
diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
index 7761378..281c0c9 100644
--- a/fs/sysfs/mount.c
+++ b/fs/sysfs/mount.c
@@ -35,7 +35,7 @@ static const struct super_operations sysfs_ops = {
struct sysfs_dirent sysfs_root = {
.s_name = "",
.s_count = ATOMIC_INIT(1),
- .s_flags = SYSFS_DIR,
+ .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
.s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
.s_ino = 1,
};
@@ -72,18 +72,107 @@ static int sysfs_fill_super(struct super_block *sb, void *data, int silent)
return 0;
}
+static int sysfs_test_super(struct super_block *sb, void *data)
+{
+ struct sysfs_super_info *sb_info = sysfs_info(sb);
+ struct sysfs_super_info *info = data;
+ enum kobj_ns_type type;
+ int found = 1;
+
+ for (type = KOBJ_NS_TYPE_NONE; type < KOBJ_NS_TYPES; type++) {
+ if (sb_info->ns[type] != info->ns[type])
+ found = 0;
+ }
+ return found;
+}
+
+static int sysfs_set_super(struct super_block *sb, void *data)
+{
+ int error;
+ error = set_anon_super(sb, data);
+ if (!error)
+ sb->s_fs_info = data;
+ return error;
+}
+
static int sysfs_get_sb(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_single(fs_type, flags, data, sysfs_fill_super, mnt);
+ struct sysfs_super_info *info;
+ enum kobj_ns_type type;
+ struct super_block *sb;
+ int error;
+
+ error = -ENOMEM;
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ goto out;
+
+ for (type = KOBJ_NS_TYPE_NONE; type < KOBJ_NS_TYPES; type++)
+ info->ns[type] = kobj_ns_current(type);
+
+ sb = sget(fs_type, sysfs_test_super, sysfs_set_super, info);
+ if (IS_ERR(sb) || sb->s_fs_info != info)
+ kfree(info);
+ if (IS_ERR(sb)) {
+ error = PTR_ERR(sb);
+ goto out;
+ }
+ if (!sb->s_root) {
+ sb->s_flags = flags;
+ error = sysfs_fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
+ if (error) {
+ deactivate_locked_super(sb);
+ goto out;
+ }
+ sb->s_flags |= MS_ACTIVE;
+ }
+
+ simple_set_mnt(mnt, sb);
+ error = 0;
+out:
+ return error;
+}
+
+static void sysfs_kill_sb(struct super_block *sb)
+{
+ struct sysfs_super_info *info = sysfs_info(sb);
+
+ /* Remove the superblock from fs_supers/s_instances
+ * so we can't find it, before freeing sysfs_super_info.
+ */
+ kill_anon_super(sb);
+ kfree(info);
}
static struct file_system_type sysfs_fs_type = {
.name = "sysfs",
.get_sb = sysfs_get_sb,
- .kill_sb = kill_anon_super,
+ .kill_sb = sysfs_kill_sb,
};
+void sysfs_exit_ns(enum kobj_ns_type type, const void *ns)
+{
+ struct super_block *sb;
+
+ mutex_lock(&sysfs_mutex);
+ spin_lock(&sb_lock);
+ list_for_each_entry(sb, &sysfs_fs_type.fs_supers, s_instances) {
+ struct sysfs_super_info *info = sysfs_info(sb);
+ /*
+ * If we see a superblock on the fs_supers/s_instances
+ * list the unmount has not completed and sb->s_fs_info
+ * points to a valid struct sysfs_super_info.
+ */
+ /* Ignore superblocks with the wrong ns */
+ if (info->ns[type] != ns)
+ continue;
+ info->ns[type] = NULL;
+ }
+ spin_unlock(&sb_lock);
+ mutex_unlock(&sysfs_mutex);
+}
+
int __init sysfs_init(void)
{
int err = -ENOMEM;
diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
index b93ec51..f71246b 100644
--- a/fs/sysfs/symlink.c
+++ b/fs/sysfs/symlink.c
@@ -58,6 +58,8 @@ static int sysfs_do_create_link(struct kobject *kobj, struct kobject *target,
if (!sd)
goto out_put;
+ if (sysfs_ns_type(parent_sd))
+ sd->s_ns = target->ktype->namespace(target);
sd->s_symlink.target_sd = target_sd;
target_sd = NULL; /* reference is now owned by the symlink */
@@ -107,6 +109,26 @@ int sysfs_create_link_nowarn(struct kobject *kobj, struct kobject *target,
}
/**
+ * sysfs_delete_link - remove symlink in object's directory.
+ * @kobj: object we're acting for.
+ * @targ: object we're pointing to.
+ * @name: name of the symlink to remove.
+ *
+ * Unlike sysfs_remove_link sysfs_delete_link has enough information
+ * to successfully delete symlinks in tagged directories.
+ */
+void sysfs_delete_link(struct kobject *kobj, struct kobject *targ,
+ const char *name)
+{
+ const void *ns = NULL;
+ spin_lock(&sysfs_assoc_lock);
+ if (targ->sd)
+ ns = targ->sd->s_ns;
+ spin_unlock(&sysfs_assoc_lock);
+ sysfs_hash_and_remove(kobj->sd, ns, name);
+}
+
+/**
* sysfs_remove_link - remove symlink in object's directory.
* @kobj: object we're acting for.
* @name: name of the symlink to remove.
@@ -121,7 +143,7 @@ void sysfs_remove_link(struct kobject * kobj, const char * name)
else
parent_sd = kobj->sd;
- sysfs_hash_and_remove(parent_sd, name);
+ sysfs_hash_and_remove(parent_sd, NULL, name);
}
/**
@@ -137,6 +159,7 @@ int sysfs_rename_link(struct kobject *kobj, struct kobject *targ,
const char *old, const char *new)
{
struct sysfs_dirent *parent_sd, *sd = NULL;
+ const void *old_ns = NULL, *new_ns = NULL;
int result;
if (!kobj)
@@ -144,8 +167,11 @@ int sysfs_rename_link(struct kobject *kobj, struct kobject *targ,
else
parent_sd = kobj->sd;
+ if (targ->sd)
+ old_ns = targ->sd->s_ns;
+
result = -ENOENT;
- sd = sysfs_get_dirent(parent_sd, old);
+ sd = sysfs_get_dirent(parent_sd, old_ns, old);
if (!sd)
goto out;
@@ -155,7 +181,10 @@ int sysfs_rename_link(struct kobject *kobj, struct kobject *targ,
if (sd->s_symlink.target_sd->s_dir.kobj != targ)
goto out;
- result = sysfs_rename(sd, parent_sd, new);
+ if (sysfs_ns_type(parent_sd))
+ new_ns = targ->ktype->namespace(targ);
+
+ result = sysfs_rename(sd, parent_sd, new_ns, new);
out:
sysfs_put(sd);
@@ -261,3 +290,4 @@ const struct inode_operations sysfs_symlink_inode_operations = {
EXPORT_SYMBOL_GPL(sysfs_create_link);
EXPORT_SYMBOL_GPL(sysfs_remove_link);
+EXPORT_SYMBOL_GPL(sysfs_rename_link);
diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h
index 30f5a44..6a13105 100644
--- a/fs/sysfs/sysfs.h
+++ b/fs/sysfs/sysfs.h
@@ -58,6 +58,7 @@ struct sysfs_dirent {
struct sysfs_dirent *s_sibling;
const char *s_name;
+ const void *s_ns; /* namespace tag */
union {
struct sysfs_elem_dir s_dir;
struct sysfs_elem_symlink s_symlink;
@@ -81,14 +82,27 @@ struct sysfs_dirent {
#define SYSFS_COPY_NAME (SYSFS_DIR | SYSFS_KOBJ_LINK)
#define SYSFS_ACTIVE_REF (SYSFS_KOBJ_ATTR | SYSFS_KOBJ_BIN_ATTR)
-#define SYSFS_FLAG_MASK ~SYSFS_TYPE_MASK
-#define SYSFS_FLAG_REMOVED 0x0200
+/* identify any namespace tag on sysfs_dirents */
+#define SYSFS_NS_TYPE_MASK 0xff00
+#define SYSFS_NS_TYPE_SHIFT 8
+
+#define SYSFS_FLAG_MASK ~(SYSFS_NS_TYPE_MASK|SYSFS_TYPE_MASK)
+#define SYSFS_FLAG_REMOVED 0x020000
static inline unsigned int sysfs_type(struct sysfs_dirent *sd)
{
return sd->s_flags & SYSFS_TYPE_MASK;
}
+/*
+ * Return any namespace tags on this dirent.
+ * enum kobj_ns_type is defined in linux/kobject.h
+ */
+static inline enum kobj_ns_type sysfs_ns_type(struct sysfs_dirent *sd)
+{
+ return (sd->s_flags & SYSFS_NS_TYPE_MASK) >> SYSFS_NS_TYPE_SHIFT;
+}
+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
#define sysfs_dirent_init_lockdep(sd) \
do { \
@@ -114,6 +128,16 @@ struct sysfs_addrm_cxt {
/*
* mount.c
*/
+
+/*
+ * Each sb is associated with a set of namespace tags (i.e.
+ * the network namespace of the task which mounted this sysfs
+ * instance).
+ */
+struct sysfs_super_info {
+ const void *ns[KOBJ_NS_TYPES];
+};
+#define sysfs_info(SB) ((struct sysfs_super_info *)(SB->s_fs_info))
extern struct sysfs_dirent sysfs_root;
extern struct kmem_cache *sysfs_dir_cachep;
@@ -137,8 +161,10 @@ void sysfs_remove_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd);
void sysfs_addrm_finish(struct sysfs_addrm_cxt *acxt);
struct sysfs_dirent *sysfs_find_dirent(struct sysfs_dirent *parent_sd,
+ const void *ns,
const unsigned char *name);
struct sysfs_dirent *sysfs_get_dirent(struct sysfs_dirent *parent_sd,
+ const void *ns,
const unsigned char *name);
struct sysfs_dirent *sysfs_new_dirent(const char *name, umode_t mode, int type);
@@ -149,7 +175,7 @@ int sysfs_create_subdir(struct kobject *kobj, const char *name,
void sysfs_remove_subdir(struct sysfs_dirent *sd);
int sysfs_rename(struct sysfs_dirent *sd,
- struct sysfs_dirent *new_parent_sd, const char *new_name);
+ struct sysfs_dirent *new_parent_sd, const void *ns, const char *new_name);
static inline struct sysfs_dirent *__sysfs_get(struct sysfs_dirent *sd)
{
@@ -179,7 +205,7 @@ int sysfs_setattr(struct dentry *dentry, struct iattr *iattr);
int sysfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
int sysfs_setxattr(struct dentry *dentry, const char *name, const void *value,
size_t size, int flags);
-int sysfs_hash_and_remove(struct sysfs_dirent *dir_sd, const char *name);
+int sysfs_hash_and_remove(struct sysfs_dirent *dir_sd, const void *ns, const char *name);
int sysfs_inode_init(void);
/*
diff --git a/fs/sysv/dir.c b/fs/sysv/dir.c
index 4e50286..1dabed2 100644
--- a/fs/sysv/dir.c
+++ b/fs/sysv/dir.c
@@ -164,8 +164,8 @@ struct sysv_dir_entry *sysv_find_entry(struct dentry *dentry, struct page **res_
name, de->name))
goto found;
}
+ dir_put_page(page);
}
- dir_put_page(page);
if (++n >= npages)
n = 0;
diff --git a/fs/timerfd.c b/fs/timerfd.c
index 98158de..b86ab8e 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -110,31 +110,14 @@ static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count,
struct timerfd_ctx *ctx = file->private_data;
ssize_t res;
u64 ticks = 0;
- DECLARE_WAITQUEUE(wait, current);
if (count < sizeof(ticks))
return -EINVAL;
spin_lock_irq(&ctx->wqh.lock);
- res = -EAGAIN;
- if (!ctx->ticks && !(file->f_flags & O_NONBLOCK)) {
- __add_wait_queue(&ctx->wqh, &wait);
- for (res = 0;;) {
- set_current_state(TASK_INTERRUPTIBLE);
- if (ctx->ticks) {
- res = 0;
- break;
- }
- if (signal_pending(current)) {
- res = -ERESTARTSYS;
- break;
- }
- spin_unlock_irq(&ctx->wqh.lock);
- schedule();
- spin_lock_irq(&ctx->wqh.lock);
- }
- __remove_wait_queue(&ctx->wqh, &wait);
- __set_current_state(TASK_RUNNING);
- }
+ if (file->f_flags & O_NONBLOCK)
+ res = -EAGAIN;
+ else
+ res = wait_event_interruptible_locked_irq(ctx->wqh, ctx->ticks);
if (ctx->ticks) {
ticks = ctx->ticks;
if (ctx->expired && ctx->tintv.tv64) {
diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
index 77d5cf4..bcf5a16 100644
--- a/fs/ubifs/io.c
+++ b/fs/ubifs/io.c
@@ -64,6 +64,7 @@ void ubifs_ro_mode(struct ubifs_info *c, int err)
if (!c->ro_media) {
c->ro_media = 1;
c->no_chk_data_crc = 0;
+ c->vfs_sb->s_flags |= MS_RDONLY;
ubifs_warn("switched to read-only mode, error %d", err);
dbg_dump_stack();
}
diff --git a/fs/udf/dir.c b/fs/udf/dir.c
index f0f2a43..3a84455 100644
--- a/fs/udf/dir.c
+++ b/fs/udf/dir.c
@@ -209,6 +209,6 @@ static int udf_readdir(struct file *filp, void *dirent, filldir_t filldir)
const struct file_operations udf_dir_operations = {
.read = generic_read_dir,
.readdir = udf_readdir,
- .ioctl = udf_ioctl,
+ .unlocked_ioctl = udf_ioctl,
.fsync = simple_fsync,
};
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 4b6a46c..baae3a7 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -37,6 +37,7 @@
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
#include <linux/aio.h>
+#include <linux/smp_lock.h>
#include "udf_i.h"
#include "udf_sb.h"
@@ -144,50 +145,60 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
return retval;
}
-int udf_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
- unsigned long arg)
+long udf_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
+ struct inode *inode = filp->f_dentry->d_inode;
long old_block, new_block;
int result = -EINVAL;
+ lock_kernel();
+
if (file_permission(filp, MAY_READ) != 0) {
- udf_debug("no permission to access inode %lu\n",
- inode->i_ino);
- return -EPERM;
+ udf_debug("no permission to access inode %lu\n", inode->i_ino);
+ result = -EPERM;
+ goto out;
}
if (!arg) {
udf_debug("invalid argument to udf_ioctl\n");
- return -EINVAL;
+ result = -EINVAL;
+ goto out;
}
switch (cmd) {
case UDF_GETVOLIDENT:
if (copy_to_user((char __user *)arg,
UDF_SB(inode->i_sb)->s_volume_ident, 32))
- return -EFAULT;
+ result = -EFAULT;
else
- return 0;
+ result = 0;
+ goto out;
case UDF_RELOCATE_BLOCKS:
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
- if (get_user(old_block, (long __user *)arg))
- return -EFAULT;
+ if (!capable(CAP_SYS_ADMIN)) {
+ result = -EACCES;
+ goto out;
+ }
+ if (get_user(old_block, (long __user *)arg)) {
+ result = -EFAULT;
+ goto out;
+ }
result = udf_relocate_blocks(inode->i_sb,
old_block, &new_block);
if (result == 0)
result = put_user(new_block, (long __user *)arg);
- return result;
+ goto out;
case UDF_GETEASIZE:
result = put_user(UDF_I(inode)->i_lenEAttr, (int __user *)arg);
- break;
+ goto out;
case UDF_GETEABLOCK:
result = copy_to_user((char __user *)arg,
UDF_I(inode)->i_ext.i_data,
UDF_I(inode)->i_lenEAttr) ? -EFAULT : 0;
- break;
+ goto out;
}
+out:
+ unlock_kernel();
return result;
}
@@ -207,7 +218,7 @@ static int udf_release_file(struct inode *inode, struct file *filp)
const struct file_operations udf_file_operations = {
.read = do_sync_read,
.aio_read = generic_file_aio_read,
- .ioctl = udf_ioctl,
+ .unlocked_ioctl = udf_ioctl,
.open = dquot_file_open,
.mmap = generic_file_mmap,
.write = do_sync_write,
@@ -227,7 +238,7 @@ int udf_setattr(struct dentry *dentry, struct iattr *iattr)
if (error)
return error;
- if (iattr->ia_valid & ATTR_SIZE)
+ if (is_quota_modification(inode, iattr))
dquot_initialize(inode);
if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index 702a114..9079ff7 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -130,8 +130,7 @@ extern int udf_write_fi(struct inode *inode, struct fileIdentDesc *,
uint8_t *, uint8_t *);
/* file.c */
-extern int udf_ioctl(struct inode *, struct file *, unsigned int,
- unsigned long);
+extern long udf_ioctl(struct file *, unsigned int, unsigned long);
extern int udf_setattr(struct dentry *dentry, struct iattr *iattr);
/* inode.c */
extern struct inode *udf_iget(struct super_block *, struct kernel_lb_addr *);
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 80b68c3..cffa756 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -603,7 +603,7 @@ static void ufs_set_inode_ops(struct inode *inode)
if (!inode->i_blocks)
inode->i_op = &ufs_fast_symlink_inode_operations;
else {
- inode->i_op = &page_symlink_inode_operations;
+ inode->i_op = &ufs_symlink_inode_operations;
inode->i_mapping->a_ops = &ufs_aops;
}
} else
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index 1185562..eabc02e 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -148,7 +148,7 @@ static int ufs_symlink (struct inode * dir, struct dentry * dentry,
if (l > UFS_SB(sb)->s_uspi->s_maxsymlinklen) {
/* slow symlink */
- inode->i_op = &page_symlink_inode_operations;
+ inode->i_op = &ufs_symlink_inode_operations;
inode->i_mapping->a_ops = &ufs_aops;
err = page_symlink(inode, symname, l);
if (err)
diff --git a/fs/ufs/symlink.c b/fs/ufs/symlink.c
index c0156ed..d283628 100644
--- a/fs/ufs/symlink.c
+++ b/fs/ufs/symlink.c
@@ -42,4 +42,12 @@ static void *ufs_follow_link(struct dentry *dentry, struct nameidata *nd)
const struct inode_operations ufs_fast_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = ufs_follow_link,
+ .setattr = ufs_setattr,
+};
+
+const struct inode_operations ufs_symlink_inode_operations = {
+ .readlink = generic_readlink,
+ .follow_link = page_follow_link_light,
+ .put_link = page_put_link,
+ .setattr = ufs_setattr,
};
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c
index d3b6270..f294c44 100644
--- a/fs/ufs/truncate.c
+++ b/fs/ufs/truncate.c
@@ -508,7 +508,7 @@ out:
* - there is no way to know old size
* - there is no way inform user about error, if it happens in `truncate'
*/
-static int ufs_setattr(struct dentry *dentry, struct iattr *attr)
+int ufs_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = dentry->d_inode;
unsigned int ia_valid = attr->ia_valid;
@@ -518,18 +518,18 @@ static int ufs_setattr(struct dentry *dentry, struct iattr *attr)
if (error)
return error;
+ if (is_quota_modification(inode, attr))
+ dquot_initialize(inode);
+
if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
(ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
error = dquot_transfer(inode, attr);
if (error)
return error;
}
- if (ia_valid & ATTR_SIZE &&
- attr->ia_size != i_size_read(inode)) {
+ if (ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) {
loff_t old_i_size = inode->i_size;
- dquot_initialize(inode);
-
error = vmtruncate(inode, attr->ia_size);
if (error)
return error;
diff --git a/fs/ufs/ufs.h b/fs/ufs/ufs.h
index 43f9f5d..179ae6b 100644
--- a/fs/ufs/ufs.h
+++ b/fs/ufs/ufs.h
@@ -122,9 +122,11 @@ extern void ufs_panic (struct super_block *, const char *, const char *, ...) __
/* symlink.c */
extern const struct inode_operations ufs_fast_symlink_inode_operations;
+extern const struct inode_operations ufs_symlink_inode_operations;
/* truncate.c */
extern int ufs_truncate (struct inode *, loff_t);
+extern int ufs_setattr(struct dentry *dentry, struct iattr *attr);
static inline struct ufs_sb_info *UFS_SB(struct super_block *sb)
{
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 0f8b996..089eaca 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -45,6 +45,15 @@
#include <linux/pagevec.h>
#include <linux/writeback.h>
+/*
+ * Types of I/O for bmap clustering and I/O completion tracking.
+ */
+enum {
+ IO_READ, /* mapping for a read */
+ IO_DELAY, /* mapping covers delalloc region */
+ IO_UNWRITTEN, /* mapping covers allocated but uninitialized data */
+ IO_NEW /* just allocated */
+};
/*
* Prime number of hash buckets since address is used as the key.
@@ -103,8 +112,9 @@ xfs_count_page_state(
STATIC struct block_device *
xfs_find_bdev_for_inode(
- struct xfs_inode *ip)
+ struct inode *inode)
{
+ struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
if (XFS_IS_REALTIME_INODE(ip))
@@ -183,7 +193,7 @@ xfs_setfilesize(
xfs_fsize_t isize;
ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
- ASSERT(ioend->io_type != IOMAP_READ);
+ ASSERT(ioend->io_type != IO_READ);
if (unlikely(ioend->io_error))
return 0;
@@ -214,7 +224,7 @@ xfs_finish_ioend(
if (atomic_dec_and_test(&ioend->io_remaining)) {
struct workqueue_struct *wq;
- wq = (ioend->io_type == IOMAP_UNWRITTEN) ?
+ wq = (ioend->io_type == IO_UNWRITTEN) ?
xfsconvertd_workqueue : xfsdatad_workqueue;
queue_work(wq, &ioend->io_work);
if (wait)
@@ -237,7 +247,7 @@ xfs_end_io(
* For unwritten extents we need to issue transactions to convert a
* range to normal written extens after the data I/O has finished.
*/
- if (ioend->io_type == IOMAP_UNWRITTEN &&
+ if (ioend->io_type == IO_UNWRITTEN &&
likely(!ioend->io_error && !XFS_FORCED_SHUTDOWN(ip->i_mount))) {
error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
@@ -250,7 +260,7 @@ xfs_end_io(
* We might have to update the on-disk file size after extending
* writes.
*/
- if (ioend->io_type != IOMAP_READ) {
+ if (ioend->io_type != IO_READ) {
error = xfs_setfilesize(ioend);
ASSERT(!error || error == EAGAIN);
}
@@ -309,21 +319,25 @@ xfs_map_blocks(
struct inode *inode,
loff_t offset,
ssize_t count,
- xfs_iomap_t *mapp,
+ struct xfs_bmbt_irec *imap,
int flags)
{
int nmaps = 1;
+ int new = 0;
- return -xfs_iomap(XFS_I(inode), offset, count, flags, mapp, &nmaps);
+ return -xfs_iomap(XFS_I(inode), offset, count, flags, imap, &nmaps, &new);
}
STATIC int
-xfs_iomap_valid(
- xfs_iomap_t *iomapp,
- loff_t offset)
+xfs_imap_valid(
+ struct inode *inode,
+ struct xfs_bmbt_irec *imap,
+ xfs_off_t offset)
{
- return offset >= iomapp->iomap_offset &&
- offset < iomapp->iomap_offset + iomapp->iomap_bsize;
+ offset >>= inode->i_blkbits;
+
+ return offset >= imap->br_startoff &&
+ offset < imap->br_startoff + imap->br_blockcount;
}
/*
@@ -554,19 +568,23 @@ xfs_add_to_ioend(
STATIC void
xfs_map_buffer(
+ struct inode *inode,
struct buffer_head *bh,
- xfs_iomap_t *mp,
- xfs_off_t offset,
- uint block_bits)
+ struct xfs_bmbt_irec *imap,
+ xfs_off_t offset)
{
sector_t bn;
+ struct xfs_mount *m = XFS_I(inode)->i_mount;
+ xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
+ xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
- ASSERT(mp->iomap_bn != IOMAP_DADDR_NULL);
+ ASSERT(imap->br_startblock != HOLESTARTBLOCK);
+ ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
- bn = (mp->iomap_bn >> (block_bits - BBSHIFT)) +
- ((offset - mp->iomap_offset) >> block_bits);
+ bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
+ ((offset - iomap_offset) >> inode->i_blkbits);
- ASSERT(bn || (mp->iomap_flags & IOMAP_REALTIME));
+ ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
bh->b_blocknr = bn;
set_buffer_mapped(bh);
@@ -574,17 +592,17 @@ xfs_map_buffer(
STATIC void
xfs_map_at_offset(
+ struct inode *inode,
struct buffer_head *bh,
- loff_t offset,
- int block_bits,
- xfs_iomap_t *iomapp)
+ struct xfs_bmbt_irec *imap,
+ xfs_off_t offset)
{
- ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
- ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
+ ASSERT(imap->br_startblock != HOLESTARTBLOCK);
+ ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
lock_buffer(bh);
- xfs_map_buffer(bh, iomapp, offset, block_bits);
- bh->b_bdev = iomapp->iomap_target->bt_bdev;
+ xfs_map_buffer(inode, bh, imap, offset);
+ bh->b_bdev = xfs_find_bdev_for_inode(inode);
set_buffer_mapped(bh);
clear_buffer_delay(bh);
clear_buffer_unwritten(bh);
@@ -713,11 +731,11 @@ xfs_is_delayed_page(
bh = head = page_buffers(page);
do {
if (buffer_unwritten(bh))
- acceptable = (type == IOMAP_UNWRITTEN);
+ acceptable = (type == IO_UNWRITTEN);
else if (buffer_delay(bh))
- acceptable = (type == IOMAP_DELAY);
+ acceptable = (type == IO_DELAY);
else if (buffer_dirty(bh) && buffer_mapped(bh))
- acceptable = (type == IOMAP_NEW);
+ acceptable = (type == IO_NEW);
else
break;
} while ((bh = bh->b_this_page) != head);
@@ -740,7 +758,7 @@ xfs_convert_page(
struct inode *inode,
struct page *page,
loff_t tindex,
- xfs_iomap_t *mp,
+ struct xfs_bmbt_irec *imap,
xfs_ioend_t **ioendp,
struct writeback_control *wbc,
int startio,
@@ -750,7 +768,6 @@ xfs_convert_page(
xfs_off_t end_offset;
unsigned long p_offset;
unsigned int type;
- int bbits = inode->i_blkbits;
int len, page_dirty;
int count = 0, done = 0, uptodate = 1;
xfs_off_t offset = page_offset(page);
@@ -802,19 +819,19 @@ xfs_convert_page(
if (buffer_unwritten(bh) || buffer_delay(bh)) {
if (buffer_unwritten(bh))
- type = IOMAP_UNWRITTEN;
+ type = IO_UNWRITTEN;
else
- type = IOMAP_DELAY;
+ type = IO_DELAY;
- if (!xfs_iomap_valid(mp, offset)) {
+ if (!xfs_imap_valid(inode, imap, offset)) {
done = 1;
continue;
}
- ASSERT(!(mp->iomap_flags & IOMAP_HOLE));
- ASSERT(!(mp->iomap_flags & IOMAP_DELAY));
+ ASSERT(imap->br_startblock != HOLESTARTBLOCK);
+ ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
- xfs_map_at_offset(bh, offset, bbits, mp);
+ xfs_map_at_offset(inode, bh, imap, offset);
if (startio) {
xfs_add_to_ioend(inode, bh, offset,
type, ioendp, done);
@@ -826,7 +843,7 @@ xfs_convert_page(
page_dirty--;
count++;
} else {
- type = IOMAP_NEW;
+ type = IO_NEW;
if (buffer_mapped(bh) && all_bh && startio) {
lock_buffer(bh);
xfs_add_to_ioend(inode, bh, offset,
@@ -866,7 +883,7 @@ STATIC void
xfs_cluster_write(
struct inode *inode,
pgoff_t tindex,
- xfs_iomap_t *iomapp,
+ struct xfs_bmbt_irec *imap,
xfs_ioend_t **ioendp,
struct writeback_control *wbc,
int startio,
@@ -885,7 +902,7 @@ xfs_cluster_write(
for (i = 0; i < pagevec_count(&pvec); i++) {
done = xfs_convert_page(inode, pvec.pages[i], tindex++,
- iomapp, ioendp, wbc, startio, all_bh);
+ imap, ioendp, wbc, startio, all_bh);
if (done)
break;
}
@@ -930,7 +947,7 @@ xfs_aops_discard_page(
loff_t offset = page_offset(page);
ssize_t len = 1 << inode->i_blkbits;
- if (!xfs_is_delayed_page(page, IOMAP_DELAY))
+ if (!xfs_is_delayed_page(page, IO_DELAY))
goto out_invalidate;
if (XFS_FORCED_SHUTDOWN(ip->i_mount))
@@ -1042,15 +1059,15 @@ xfs_page_state_convert(
int unmapped) /* also implies page uptodate */
{
struct buffer_head *bh, *head;
- xfs_iomap_t iomap;
+ struct xfs_bmbt_irec imap;
xfs_ioend_t *ioend = NULL, *iohead = NULL;
loff_t offset;
unsigned long p_offset = 0;
unsigned int type;
__uint64_t end_offset;
- pgoff_t end_index, last_index, tlast;
+ pgoff_t end_index, last_index;
ssize_t size, len;
- int flags, err, iomap_valid = 0, uptodate = 1;
+ int flags, err, imap_valid = 0, uptodate = 1;
int page_dirty, count = 0;
int trylock = 0;
int all_bh = unmapped;
@@ -1097,7 +1114,7 @@ xfs_page_state_convert(
bh = head = page_buffers(page);
offset = page_offset(page);
flags = BMAPI_READ;
- type = IOMAP_NEW;
+ type = IO_NEW;
/* TODO: cleanup count and page_dirty */
@@ -1111,12 +1128,12 @@ xfs_page_state_convert(
* the iomap is actually still valid, but the ioend
* isn't. shouldn't happen too often.
*/
- iomap_valid = 0;
+ imap_valid = 0;
continue;
}
- if (iomap_valid)
- iomap_valid = xfs_iomap_valid(&iomap, offset);
+ if (imap_valid)
+ imap_valid = xfs_imap_valid(inode, &imap, offset);
/*
* First case, map an unwritten extent and prepare for
@@ -1137,20 +1154,20 @@ xfs_page_state_convert(
* Make sure we don't use a read-only iomap
*/
if (flags == BMAPI_READ)
- iomap_valid = 0;
+ imap_valid = 0;
if (buffer_unwritten(bh)) {
- type = IOMAP_UNWRITTEN;
+ type = IO_UNWRITTEN;
flags = BMAPI_WRITE | BMAPI_IGNSTATE;
} else if (buffer_delay(bh)) {
- type = IOMAP_DELAY;
+ type = IO_DELAY;
flags = BMAPI_ALLOCATE | trylock;
} else {
- type = IOMAP_NEW;
+ type = IO_NEW;
flags = BMAPI_WRITE | BMAPI_MMAP;
}
- if (!iomap_valid) {
+ if (!imap_valid) {
/*
* if we didn't have a valid mapping then we
* need to ensure that we put the new mapping
@@ -1160,7 +1177,7 @@ xfs_page_state_convert(
* for unwritten extent conversion.
*/
new_ioend = 1;
- if (type == IOMAP_NEW) {
+ if (type == IO_NEW) {
size = xfs_probe_cluster(inode,
page, bh, head, 0);
} else {
@@ -1168,14 +1185,14 @@ xfs_page_state_convert(
}
err = xfs_map_blocks(inode, offset, size,
- &iomap, flags);
+ &imap, flags);
if (err)
goto error;
- iomap_valid = xfs_iomap_valid(&iomap, offset);
+ imap_valid = xfs_imap_valid(inode, &imap,
+ offset);
}
- if (iomap_valid) {
- xfs_map_at_offset(bh, offset,
- inode->i_blkbits, &iomap);
+ if (imap_valid) {
+ xfs_map_at_offset(inode, bh, &imap, offset);
if (startio) {
xfs_add_to_ioend(inode, bh, offset,
type, &ioend,
@@ -1194,40 +1211,41 @@ xfs_page_state_convert(
* That means it must already have extents allocated
* underneath it. Map the extent by reading it.
*/
- if (!iomap_valid || flags != BMAPI_READ) {
+ if (!imap_valid || flags != BMAPI_READ) {
flags = BMAPI_READ;
size = xfs_probe_cluster(inode, page, bh,
head, 1);
err = xfs_map_blocks(inode, offset, size,
- &iomap, flags);
+ &imap, flags);
if (err)
goto error;
- iomap_valid = xfs_iomap_valid(&iomap, offset);
+ imap_valid = xfs_imap_valid(inode, &imap,
+ offset);
}
/*
- * We set the type to IOMAP_NEW in case we are doing a
+ * We set the type to IO_NEW in case we are doing a
* small write at EOF that is extending the file but
* without needing an allocation. We need to update the
* file size on I/O completion in this case so it is
* the same case as having just allocated a new extent
* that we are writing into for the first time.
*/
- type = IOMAP_NEW;
+ type = IO_NEW;
if (trylock_buffer(bh)) {
ASSERT(buffer_mapped(bh));
- if (iomap_valid)
+ if (imap_valid)
all_bh = 1;
xfs_add_to_ioend(inode, bh, offset, type,
- &ioend, !iomap_valid);
+ &ioend, !imap_valid);
page_dirty--;
count++;
} else {
- iomap_valid = 0;
+ imap_valid = 0;
}
} else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
(unmapped || startio)) {
- iomap_valid = 0;
+ imap_valid = 0;
}
if (!iohead)
@@ -1241,12 +1259,23 @@ xfs_page_state_convert(
if (startio)
xfs_start_page_writeback(page, 1, count);
- if (ioend && iomap_valid) {
- offset = (iomap.iomap_offset + iomap.iomap_bsize - 1) >>
- PAGE_CACHE_SHIFT;
- tlast = min_t(pgoff_t, offset, last_index);
- xfs_cluster_write(inode, page->index + 1, &iomap, &ioend,
- wbc, startio, all_bh, tlast);
+ if (ioend && imap_valid) {
+ xfs_off_t end_index;
+
+ end_index = imap.br_startoff + imap.br_blockcount;
+
+ /* to bytes */
+ end_index <<= inode->i_blkbits;
+
+ /* to pages */
+ end_index = (end_index - 1) >> PAGE_CACHE_SHIFT;
+
+ /* check against file size */
+ if (end_index > last_index)
+ end_index = last_index;
+
+ xfs_cluster_write(inode, page->index + 1, &imap, &ioend,
+ wbc, startio, all_bh, end_index);
}
if (iohead)
@@ -1448,10 +1477,11 @@ __xfs_get_blocks(
int direct,
bmapi_flags_t flags)
{
- xfs_iomap_t iomap;
+ struct xfs_bmbt_irec imap;
xfs_off_t offset;
ssize_t size;
- int niomap = 1;
+ int nimap = 1;
+ int new = 0;
int error;
offset = (xfs_off_t)iblock << inode->i_blkbits;
@@ -1462,22 +1492,21 @@ __xfs_get_blocks(
return 0;
error = xfs_iomap(XFS_I(inode), offset, size,
- create ? flags : BMAPI_READ, &iomap, &niomap);
+ create ? flags : BMAPI_READ, &imap, &nimap, &new);
if (error)
return -error;
- if (niomap == 0)
+ if (nimap == 0)
return 0;
- if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
+ if (imap.br_startblock != HOLESTARTBLOCK &&
+ imap.br_startblock != DELAYSTARTBLOCK) {
/*
* For unwritten extents do not report a disk address on
* the read case (treat as if we're reading into a hole).
*/
- if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) {
- xfs_map_buffer(bh_result, &iomap, offset,
- inode->i_blkbits);
- }
- if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) {
+ if (create || !ISUNWRITTEN(&imap))
+ xfs_map_buffer(inode, bh_result, &imap, offset);
+ if (create && ISUNWRITTEN(&imap)) {
if (direct)
bh_result->b_private = inode;
set_buffer_unwritten(bh_result);
@@ -1488,7 +1517,7 @@ __xfs_get_blocks(
* If this is a realtime file, data may be on a different device.
* to that pointed to from the buffer_head b_bdev currently.
*/
- bh_result->b_bdev = iomap.iomap_target->bt_bdev;
+ bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
/*
* If we previously allocated a block out beyond eof and we are now
@@ -1502,10 +1531,10 @@ __xfs_get_blocks(
if (create &&
((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
(offset >= i_size_read(inode)) ||
- (iomap.iomap_flags & (IOMAP_NEW|IOMAP_UNWRITTEN))))
+ (new || ISUNWRITTEN(&imap))))
set_buffer_new(bh_result);
- if (iomap.iomap_flags & IOMAP_DELAY) {
+ if (imap.br_startblock == DELAYSTARTBLOCK) {
BUG_ON(direct);
if (create) {
set_buffer_uptodate(bh_result);
@@ -1514,11 +1543,23 @@ __xfs_get_blocks(
}
}
+ /*
+ * If this is O_DIRECT or the mpage code calling tell them how large
+ * the mapping is, so that we can avoid repeated get_blocks calls.
+ */
if (direct || size > (1 << inode->i_blkbits)) {
- ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0);
- offset = min_t(xfs_off_t,
- iomap.iomap_bsize - iomap.iomap_delta, size);
- bh_result->b_size = (ssize_t)min_t(xfs_off_t, LONG_MAX, offset);
+ xfs_off_t mapping_size;
+
+ mapping_size = imap.br_startoff + imap.br_blockcount - iblock;
+ mapping_size <<= inode->i_blkbits;
+
+ ASSERT(mapping_size > 0);
+ if (mapping_size > size)
+ mapping_size = size;
+ if (mapping_size > LONG_MAX)
+ mapping_size = LONG_MAX;
+
+ bh_result->b_size = mapping_size;
}
return 0;
@@ -1576,7 +1617,7 @@ xfs_end_io_direct(
*/
ioend->io_offset = offset;
ioend->io_size = size;
- if (ioend->io_type == IOMAP_READ) {
+ if (ioend->io_type == IO_READ) {
xfs_finish_ioend(ioend, 0);
} else if (private && size > 0) {
xfs_finish_ioend(ioend, is_sync_kiocb(iocb));
@@ -1587,7 +1628,7 @@ xfs_end_io_direct(
* didn't map an unwritten extent so switch it's completion
* handler.
*/
- ioend->io_type = IOMAP_NEW;
+ ioend->io_type = IO_NEW;
xfs_finish_ioend(ioend, 0);
}
@@ -1612,10 +1653,10 @@ xfs_vm_direct_IO(
struct block_device *bdev;
ssize_t ret;
- bdev = xfs_find_bdev_for_inode(XFS_I(inode));
+ bdev = xfs_find_bdev_for_inode(inode);
iocb->private = xfs_alloc_ioend(inode, rw == WRITE ?
- IOMAP_UNWRITTEN : IOMAP_READ);
+ IO_UNWRITTEN : IO_READ);
ret = blockdev_direct_IO_no_locking(rw, iocb, inode, bdev, iov,
offset, nr_segs,
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 44c2b0e..f01de3c 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -1007,25 +1007,20 @@ xfs_bwrite(
struct xfs_mount *mp,
struct xfs_buf *bp)
{
- int iowait = (bp->b_flags & XBF_ASYNC) == 0;
- int error = 0;
+ int error;
bp->b_strat = xfs_bdstrat_cb;
bp->b_mount = mp;
bp->b_flags |= XBF_WRITE;
- if (!iowait)
- bp->b_flags |= _XBF_RUN_QUEUES;
+ bp->b_flags &= ~(XBF_ASYNC | XBF_READ);
xfs_buf_delwri_dequeue(bp);
xfs_buf_iostrategy(bp);
- if (iowait) {
- error = xfs_buf_iowait(bp);
- if (error)
- xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
- xfs_buf_relse(bp);
- }
-
+ error = xfs_buf_iowait(bp);
+ if (error)
+ xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
+ xfs_buf_relse(bp);
return error;
}
@@ -1614,7 +1609,8 @@ xfs_mapping_buftarg(
STATIC int
xfs_alloc_delwrite_queue(
- xfs_buftarg_t *btp)
+ xfs_buftarg_t *btp,
+ const char *fsname)
{
int error = 0;
@@ -1622,7 +1618,7 @@ xfs_alloc_delwrite_queue(
INIT_LIST_HEAD(&btp->bt_delwrite_queue);
spin_lock_init(&btp->bt_delwrite_lock);
btp->bt_flags = 0;
- btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd");
+ btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd/%s", fsname);
if (IS_ERR(btp->bt_task)) {
error = PTR_ERR(btp->bt_task);
goto out_error;
@@ -1635,7 +1631,8 @@ out_error:
xfs_buftarg_t *
xfs_alloc_buftarg(
struct block_device *bdev,
- int external)
+ int external,
+ const char *fsname)
{
xfs_buftarg_t *btp;
@@ -1647,7 +1644,7 @@ xfs_alloc_buftarg(
goto error;
if (xfs_mapping_buftarg(btp, bdev))
goto error;
- if (xfs_alloc_delwrite_queue(btp))
+ if (xfs_alloc_delwrite_queue(btp, fsname))
goto error;
xfs_alloc_bufhash(btp, external);
return btp;
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index 386e736..5fbecef 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -390,7 +390,7 @@ static inline void xfs_buf_relse(xfs_buf_t *bp)
/*
* Handling of buftargs.
*/
-extern xfs_buftarg_t *xfs_alloc_buftarg(struct block_device *, int);
+extern xfs_buftarg_t *xfs_alloc_buftarg(struct block_device *, int, const char *);
extern void xfs_free_buftarg(struct xfs_mount *, struct xfs_buftarg *);
extern void xfs_wait_buftarg(xfs_buftarg_t *);
extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int);
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index 42dd3bc..d8fb1b5 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -115,6 +115,8 @@ xfs_file_fsync(
xfs_iflags_clear(ip, XFS_ITRUNCATED);
+ xfs_ioend_wait(ip);
+
/*
* We always need to make sure that the required inode state is safe on
* disk. The inode might be clean but we still might need to force the
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index 7b26cc2..699b60c 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -527,6 +527,10 @@ xfs_attrmulti_by_handle(
if (copy_from_user(&am_hreq, arg, sizeof(xfs_fsop_attrmulti_handlereq_t)))
return -XFS_ERROR(EFAULT);
+ /* overflow check */
+ if (am_hreq.opcount >= INT_MAX / sizeof(xfs_attr_multiop_t))
+ return -E2BIG;
+
dentry = xfs_handlereq_to_dentry(parfilp, &am_hreq.hreq);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
index 593c05b..9287135 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl32.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
@@ -420,6 +420,10 @@ xfs_compat_attrmulti_by_handle(
sizeof(compat_xfs_fsop_attrmulti_handlereq_t)))
return -XFS_ERROR(EFAULT);
+ /* overflow check */
+ if (am_hreq.opcount >= INT_MAX / sizeof(compat_xfs_attr_multiop_t))
+ return -E2BIG;
+
dentry = xfs_compat_handlereq_to_dentry(parfilp, &am_hreq.hreq);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index e65a793..9c8019c 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -673,7 +673,10 @@ xfs_vn_fiemap(
bm.bmv_length = BTOBB(length);
/* We add one because in getbmap world count includes the header */
- bm.bmv_count = fieinfo->fi_extents_max + 1;
+ bm.bmv_count = !fieinfo->fi_extents_max ? MAXEXTNUM :
+ fieinfo->fi_extents_max + 1;
+ bm.bmv_count = min_t(__s32, bm.bmv_count,
+ (PAGE_SIZE * 16 / sizeof(struct getbmapx)));
bm.bmv_iflags = BMV_IF_PREALLOC;
if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR)
bm.bmv_iflags |= BMV_IF_ATTRFORK;
diff --git a/fs/xfs/linux-2.6/xfs_quotaops.c b/fs/xfs/linux-2.6/xfs_quotaops.c
index 1947514..e31bf21 100644
--- a/fs/xfs/linux-2.6/xfs_quotaops.c
+++ b/fs/xfs/linux-2.6/xfs_quotaops.c
@@ -97,7 +97,7 @@ xfs_fs_set_xstate(
}
STATIC int
-xfs_fs_get_xquota(
+xfs_fs_get_dqblk(
struct super_block *sb,
int type,
qid_t id,
@@ -114,7 +114,7 @@ xfs_fs_get_xquota(
}
STATIC int
-xfs_fs_set_xquota(
+xfs_fs_set_dqblk(
struct super_block *sb,
int type,
qid_t id,
@@ -135,6 +135,6 @@ xfs_fs_set_xquota(
const struct quotactl_ops xfs_quotactl_operations = {
.get_xstate = xfs_fs_get_xstate,
.set_xstate = xfs_fs_set_xstate,
- .get_xquota = xfs_fs_get_xquota,
- .set_xquota = xfs_fs_set_xquota,
+ .get_dqblk = xfs_fs_get_dqblk,
+ .set_dqblk = xfs_fs_set_dqblk,
};
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 2b177c7..f24dbe5 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -790,18 +790,18 @@ xfs_open_devices(
* Setup xfs_mount buffer target pointers
*/
error = ENOMEM;
- mp->m_ddev_targp = xfs_alloc_buftarg(ddev, 0);
+ mp->m_ddev_targp = xfs_alloc_buftarg(ddev, 0, mp->m_fsname);
if (!mp->m_ddev_targp)
goto out_close_rtdev;
if (rtdev) {
- mp->m_rtdev_targp = xfs_alloc_buftarg(rtdev, 1);
+ mp->m_rtdev_targp = xfs_alloc_buftarg(rtdev, 1, mp->m_fsname);
if (!mp->m_rtdev_targp)
goto out_free_ddev_targ;
}
if (logdev && logdev != ddev) {
- mp->m_logdev_targp = xfs_alloc_buftarg(logdev, 1);
+ mp->m_logdev_targp = xfs_alloc_buftarg(logdev, 1, mp->m_fsname);
if (!mp->m_logdev_targp)
goto out_free_rtdev_targ;
} else {
@@ -903,7 +903,8 @@ xfsaild_start(
struct xfs_ail *ailp)
{
ailp->xa_target = 0;
- ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild");
+ ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
+ ailp->xa_mount->m_fsname);
if (IS_ERR(ailp->xa_task))
return -PTR_ERR(ailp->xa_task);
return 0;
@@ -1093,6 +1094,7 @@ xfs_fs_write_inode(
* the code will only flush the inode if it isn't already
* being flushed.
*/
+ xfs_ioend_wait(ip);
xfs_ilock(ip, XFS_ILOCK_SHARED);
if (ip->i_update_core) {
error = xfs_log_inode(ip);
@@ -1210,6 +1212,7 @@ xfs_fs_put_super(
xfs_unmountfs(mp);
xfs_freesb(mp);
+ xfs_inode_shrinker_unregister(mp);
xfs_icsb_destroy_counters(mp);
xfs_close_devices(mp);
xfs_dmops_put(mp);
@@ -1623,6 +1626,8 @@ xfs_fs_fill_super(
if (error)
goto fail_vnrele;
+ xfs_inode_shrinker_register(mp);
+
kfree(mtpt);
return 0;
@@ -1868,6 +1873,7 @@ init_xfs_fs(void)
goto out_cleanup_procfs;
vfs_initquota();
+ xfs_inode_shrinker_init();
error = register_filesystem(&xfs_fs_type);
if (error)
@@ -1895,6 +1901,7 @@ exit_xfs_fs(void)
{
vfs_exitquota();
unregister_filesystem(&xfs_fs_type);
+ xfs_inode_shrinker_destroy();
xfs_sysctl_unregister();
xfs_cleanup_procfs();
xfs_buf_terminate();
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index fd96982..3884e20 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -95,7 +95,8 @@ xfs_inode_ag_walk(
struct xfs_perag *pag, int flags),
int flags,
int tag,
- int exclusive)
+ int exclusive,
+ int *nr_to_scan)
{
uint32_t first_index;
int last_error = 0;
@@ -134,7 +135,7 @@ restart:
if (error == EFSCORRUPTED)
break;
- } while (1);
+ } while ((*nr_to_scan)--);
if (skipped) {
delay(1);
@@ -150,12 +151,15 @@ xfs_inode_ag_iterator(
struct xfs_perag *pag, int flags),
int flags,
int tag,
- int exclusive)
+ int exclusive,
+ int *nr_to_scan)
{
int error = 0;
int last_error = 0;
xfs_agnumber_t ag;
+ int nr;
+ nr = nr_to_scan ? *nr_to_scan : INT_MAX;
for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) {
struct xfs_perag *pag;
@@ -165,14 +169,18 @@ xfs_inode_ag_iterator(
continue;
}
error = xfs_inode_ag_walk(mp, pag, execute, flags, tag,
- exclusive);
+ exclusive, &nr);
xfs_perag_put(pag);
if (error) {
last_error = error;
if (error == EFSCORRUPTED)
break;
}
+ if (nr <= 0)
+ break;
}
+ if (nr_to_scan)
+ *nr_to_scan = nr;
return XFS_ERROR(last_error);
}
@@ -291,7 +299,7 @@ xfs_sync_data(
ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0);
error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags,
- XFS_ICI_NO_TAG, 0);
+ XFS_ICI_NO_TAG, 0, NULL);
if (error)
return XFS_ERROR(error);
@@ -310,7 +318,7 @@ xfs_sync_attr(
ASSERT((flags & ~SYNC_WAIT) == 0);
return xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags,
- XFS_ICI_NO_TAG, 0);
+ XFS_ICI_NO_TAG, 0, NULL);
}
STATIC int
@@ -348,68 +356,23 @@ xfs_commit_dummy_trans(
STATIC int
xfs_sync_fsdata(
- struct xfs_mount *mp,
- int flags)
+ struct xfs_mount *mp)
{
struct xfs_buf *bp;
- struct xfs_buf_log_item *bip;
- int error = 0;
-
- /*
- * If this is xfssyncd() then only sync the superblock if we can
- * lock it without sleeping and it is not pinned.
- */
- if (flags & SYNC_TRYLOCK) {
- ASSERT(!(flags & SYNC_WAIT));
-
- bp = xfs_getsb(mp, XBF_TRYLOCK);
- if (!bp)
- goto out;
-
- bip = XFS_BUF_FSPRIVATE(bp, struct xfs_buf_log_item *);
- if (!bip || !xfs_buf_item_dirty(bip) || XFS_BUF_ISPINNED(bp))
- goto out_brelse;
- } else {
- bp = xfs_getsb(mp, 0);
-
- /*
- * If the buffer is pinned then push on the log so we won't
- * get stuck waiting in the write for someone, maybe
- * ourselves, to flush the log.
- *
- * Even though we just pushed the log above, we did not have
- * the superblock buffer locked at that point so it can
- * become pinned in between there and here.
- */
- if (XFS_BUF_ISPINNED(bp))
- xfs_log_force(mp, 0);
- }
-
-
- if (flags & SYNC_WAIT)
- XFS_BUF_UNASYNC(bp);
- else
- XFS_BUF_ASYNC(bp);
-
- error = xfs_bwrite(mp, bp);
- if (error)
- return error;
/*
- * If this is a data integrity sync make sure all pending buffers
- * are flushed out for the log coverage check below.
+ * If the buffer is pinned then push on the log so we won't get stuck
+ * waiting in the write for someone, maybe ourselves, to flush the log.
+ *
+ * Even though we just pushed the log above, we did not have the
+ * superblock buffer locked at that point so it can become pinned in
+ * between there and here.
*/
- if (flags & SYNC_WAIT)
- xfs_flush_buftarg(mp->m_ddev_targp, 1);
-
- if (xfs_log_need_covered(mp))
- error = xfs_commit_dummy_trans(mp, flags);
- return error;
+ bp = xfs_getsb(mp, 0);
+ if (XFS_BUF_ISPINNED(bp))
+ xfs_log_force(mp, 0);
- out_brelse:
- xfs_buf_relse(bp);
- out:
- return error;
+ return xfs_bwrite(mp, bp);
}
/*
@@ -433,7 +396,7 @@ int
xfs_quiesce_data(
struct xfs_mount *mp)
{
- int error;
+ int error, error2 = 0;
/* push non-blocking */
xfs_sync_data(mp, 0);
@@ -444,13 +407,20 @@ xfs_quiesce_data(
xfs_qm_sync(mp, SYNC_WAIT);
/* write superblock and hoover up shutdown errors */
- error = xfs_sync_fsdata(mp, SYNC_WAIT);
+ error = xfs_sync_fsdata(mp);
+
+ /* make sure all delwri buffers are written out */
+ xfs_flush_buftarg(mp->m_ddev_targp, 1);
+
+ /* mark the log as covered if needed */
+ if (xfs_log_need_covered(mp))
+ error2 = xfs_commit_dummy_trans(mp, SYNC_WAIT);
/* flush data-only devices */
if (mp->m_rtdev_targp)
XFS_bflush(mp->m_rtdev_targp);
- return error;
+ return error ? error : error2;
}
STATIC void
@@ -573,9 +543,9 @@ xfs_flush_inodes(
}
/*
- * Every sync period we need to unpin all items, reclaim inodes, sync
- * quota and write out the superblock. We might need to cover the log
- * to indicate it is idle.
+ * Every sync period we need to unpin all items, reclaim inodes and sync
+ * disk quotas. We might need to cover the log to indicate that the
+ * filesystem is idle.
*/
STATIC void
xfs_sync_worker(
@@ -589,7 +559,8 @@ xfs_sync_worker(
xfs_reclaim_inodes(mp, 0);
/* dgc: errors ignored here */
error = xfs_qm_sync(mp, SYNC_TRYLOCK);
- error = xfs_sync_fsdata(mp, SYNC_TRYLOCK);
+ if (xfs_log_need_covered(mp))
+ error = xfs_commit_dummy_trans(mp, 0);
}
mp->m_sync_seq++;
wake_up(&mp->m_wait_single_sync_task);
@@ -652,7 +623,7 @@ xfs_syncd_init(
mp->m_sync_work.w_syncer = xfs_sync_worker;
mp->m_sync_work.w_mount = mp;
mp->m_sync_work.w_completion = NULL;
- mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd");
+ mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd/%s", mp->m_fsname);
if (IS_ERR(mp->m_sync_task))
return -PTR_ERR(mp->m_sync_task);
return 0;
@@ -673,6 +644,7 @@ __xfs_inode_set_reclaim_tag(
radix_tree_tag_set(&pag->pag_ici_root,
XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
XFS_ICI_RECLAIM_TAG);
+ pag->pag_ici_reclaimable++;
}
/*
@@ -705,6 +677,7 @@ __xfs_inode_clear_reclaim_tag(
{
radix_tree_tag_clear(&pag->pag_ici_root,
XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
+ pag->pag_ici_reclaimable--;
}
/*
@@ -854,5 +827,93 @@ xfs_reclaim_inodes(
int mode)
{
return xfs_inode_ag_iterator(mp, xfs_reclaim_inode, mode,
- XFS_ICI_RECLAIM_TAG, 1);
+ XFS_ICI_RECLAIM_TAG, 1, NULL);
+}
+
+/*
+ * Shrinker infrastructure.
+ *
+ * This is all far more complex than it needs to be. It adds a global list of
+ * mounts because the shrinkers can only call a global context. We need to make
+ * the shrinkers pass a context to avoid the need for global state.
+ */
+static LIST_HEAD(xfs_mount_list);
+static struct rw_semaphore xfs_mount_list_lock;
+
+static int
+xfs_reclaim_inode_shrink(
+ int nr_to_scan,
+ gfp_t gfp_mask)
+{
+ struct xfs_mount *mp;
+ struct xfs_perag *pag;
+ xfs_agnumber_t ag;
+ int reclaimable = 0;
+
+ if (nr_to_scan) {
+ if (!(gfp_mask & __GFP_FS))
+ return -1;
+
+ down_read(&xfs_mount_list_lock);
+ list_for_each_entry(mp, &xfs_mount_list, m_mplist) {
+ xfs_inode_ag_iterator(mp, xfs_reclaim_inode, 0,
+ XFS_ICI_RECLAIM_TAG, 1, &nr_to_scan);
+ if (nr_to_scan <= 0)
+ break;
+ }
+ up_read(&xfs_mount_list_lock);
+ }
+
+ down_read(&xfs_mount_list_lock);
+ list_for_each_entry(mp, &xfs_mount_list, m_mplist) {
+ for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) {
+
+ pag = xfs_perag_get(mp, ag);
+ if (!pag->pag_ici_init) {
+ xfs_perag_put(pag);
+ continue;
+ }
+ reclaimable += pag->pag_ici_reclaimable;
+ xfs_perag_put(pag);
+ }
+ }
+ up_read(&xfs_mount_list_lock);
+ return reclaimable;
+}
+
+static struct shrinker xfs_inode_shrinker = {
+ .shrink = xfs_reclaim_inode_shrink,
+ .seeks = DEFAULT_SEEKS,
+};
+
+void __init
+xfs_inode_shrinker_init(void)
+{
+ init_rwsem(&xfs_mount_list_lock);
+ register_shrinker(&xfs_inode_shrinker);
+}
+
+void
+xfs_inode_shrinker_destroy(void)
+{
+ ASSERT(list_empty(&xfs_mount_list));
+ unregister_shrinker(&xfs_inode_shrinker);
+}
+
+void
+xfs_inode_shrinker_register(
+ struct xfs_mount *mp)
+{
+ down_write(&xfs_mount_list_lock);
+ list_add_tail(&mp->m_mplist, &xfs_mount_list);
+ up_write(&xfs_mount_list_lock);
+}
+
+void
+xfs_inode_shrinker_unregister(
+ struct xfs_mount *mp)
+{
+ down_write(&xfs_mount_list_lock);
+ list_del(&mp->m_mplist);
+ up_write(&xfs_mount_list_lock);
}
diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h
index d480c34..cdcbaac 100644
--- a/fs/xfs/linux-2.6/xfs_sync.h
+++ b/fs/xfs/linux-2.6/xfs_sync.h
@@ -53,6 +53,11 @@ void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp, struct xfs_perag *pag,
int xfs_sync_inode_valid(struct xfs_inode *ip, struct xfs_perag *pag);
int xfs_inode_ag_iterator(struct xfs_mount *mp,
int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags),
- int flags, int tag, int write_lock);
+ int flags, int tag, int write_lock, int *nr_to_scan);
+
+void xfs_inode_shrinker_init(void);
+void xfs_inode_shrinker_destroy(void);
+void xfs_inode_shrinker_register(struct xfs_mount *mp);
+void xfs_inode_shrinker_unregister(struct xfs_mount *mp);
#endif
diff --git a/fs/xfs/linux-2.6/xfs_trace.c b/fs/xfs/linux-2.6/xfs_trace.c
index 5a10760..207fa77 100644
--- a/fs/xfs/linux-2.6/xfs_trace.c
+++ b/fs/xfs/linux-2.6/xfs_trace.c
@@ -41,7 +41,6 @@
#include "xfs_alloc.h"
#include "xfs_bmap.h"
#include "xfs_attr.h"
-#include "xfs_attr_sf.h"
#include "xfs_attr_leaf.h"
#include "xfs_log_priv.h"
#include "xfs_buf_item.h"
@@ -50,6 +49,9 @@
#include "xfs_aops.h"
#include "quota/xfs_dquot_item.h"
#include "quota/xfs_dquot.h"
+#include "xfs_log_recover.h"
+#include "xfs_buf_item.h"
+#include "xfs_inode_item.h"
/*
* We include this last to have the helpers above available for the trace
diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h
index fcaa62f..8a319cf 100644
--- a/fs/xfs/linux-2.6/xfs_trace.h
+++ b/fs/xfs/linux-2.6/xfs_trace.h
@@ -32,6 +32,10 @@ struct xfs_da_node_entry;
struct xfs_dquot;
struct xlog_ticket;
struct log;
+struct xlog_recover;
+struct xlog_recover_item;
+struct xfs_buf_log_format;
+struct xfs_inode_log_format;
DECLARE_EVENT_CLASS(xfs_attr_list_class,
TP_PROTO(struct xfs_attr_list_context *ctx),
@@ -562,18 +566,21 @@ DECLARE_EVENT_CLASS(xfs_inode_class,
__field(dev_t, dev)
__field(xfs_ino_t, ino)
__field(int, count)
+ __field(int, pincount)
__field(unsigned long, caller_ip)
),
TP_fast_assign(
__entry->dev = VFS_I(ip)->i_sb->s_dev;
__entry->ino = ip->i_ino;
__entry->count = atomic_read(&VFS_I(ip)->i_count);
+ __entry->pincount = atomic_read(&ip->i_pincount);
__entry->caller_ip = caller_ip;
),
- TP_printk("dev %d:%d ino 0x%llx count %d caller %pf",
+ TP_printk("dev %d:%d ino 0x%llx count %d pincount %d caller %pf",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->count,
+ __entry->pincount,
(char *)__entry->caller_ip)
)
@@ -583,6 +590,10 @@ DEFINE_EVENT(xfs_inode_class, name, \
TP_ARGS(ip, caller_ip))
DEFINE_INODE_EVENT(xfs_ihold);
DEFINE_INODE_EVENT(xfs_irele);
+DEFINE_INODE_EVENT(xfs_inode_pin);
+DEFINE_INODE_EVENT(xfs_inode_unpin);
+DEFINE_INODE_EVENT(xfs_inode_unpin_nowait);
+
/* the old xfs_itrace_entry tracer - to be replaced by s.th. in the VFS */
DEFINE_INODE_EVENT(xfs_inode);
#define xfs_itrace_entry(ip) \
@@ -642,8 +653,6 @@ DEFINE_EVENT(xfs_dquot_class, name, \
TP_PROTO(struct xfs_dquot *dqp), \
TP_ARGS(dqp))
DEFINE_DQUOT_EVENT(xfs_dqadjust);
-DEFINE_DQUOT_EVENT(xfs_dqshake_dirty);
-DEFINE_DQUOT_EVENT(xfs_dqshake_unlink);
DEFINE_DQUOT_EVENT(xfs_dqreclaim_want);
DEFINE_DQUOT_EVENT(xfs_dqreclaim_dirty);
DEFINE_DQUOT_EVENT(xfs_dqreclaim_unlink);
@@ -658,7 +667,6 @@ DEFINE_DQUOT_EVENT(xfs_dqread_fail);
DEFINE_DQUOT_EVENT(xfs_dqlookup_found);
DEFINE_DQUOT_EVENT(xfs_dqlookup_want);
DEFINE_DQUOT_EVENT(xfs_dqlookup_freelist);
-DEFINE_DQUOT_EVENT(xfs_dqlookup_move);
DEFINE_DQUOT_EVENT(xfs_dqlookup_done);
DEFINE_DQUOT_EVENT(xfs_dqget_hit);
DEFINE_DQUOT_EVENT(xfs_dqget_miss);
@@ -1495,6 +1503,140 @@ DEFINE_EVENT(xfs_swap_extent_class, name, \
DEFINE_SWAPEXT_EVENT(xfs_swap_extent_before);
DEFINE_SWAPEXT_EVENT(xfs_swap_extent_after);
+DECLARE_EVENT_CLASS(xfs_log_recover_item_class,
+ TP_PROTO(struct log *log, struct xlog_recover *trans,
+ struct xlog_recover_item *item, int pass),
+ TP_ARGS(log, trans, item, pass),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(unsigned long, item)
+ __field(xlog_tid_t, tid)
+ __field(int, type)
+ __field(int, pass)
+ __field(int, count)
+ __field(int, total)
+ ),
+ TP_fast_assign(
+ __entry->dev = log->l_mp->m_super->s_dev;
+ __entry->item = (unsigned long)item;
+ __entry->tid = trans->r_log_tid;
+ __entry->type = ITEM_TYPE(item);
+ __entry->pass = pass;
+ __entry->count = item->ri_cnt;
+ __entry->total = item->ri_total;
+ ),
+ TP_printk("dev %d:%d trans 0x%x, pass %d, item 0x%p, item type %s "
+ "item region count/total %d/%d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->tid,
+ __entry->pass,
+ (void *)__entry->item,
+ __print_symbolic(__entry->type, XFS_LI_TYPE_DESC),
+ __entry->count,
+ __entry->total)
+)
+
+#define DEFINE_LOG_RECOVER_ITEM(name) \
+DEFINE_EVENT(xfs_log_recover_item_class, name, \
+ TP_PROTO(struct log *log, struct xlog_recover *trans, \
+ struct xlog_recover_item *item, int pass), \
+ TP_ARGS(log, trans, item, pass))
+
+DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_add);
+DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_add_cont);
+DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_reorder_head);
+DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_reorder_tail);
+DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_recover);
+
+DECLARE_EVENT_CLASS(xfs_log_recover_buf_item_class,
+ TP_PROTO(struct log *log, struct xfs_buf_log_format *buf_f),
+ TP_ARGS(log, buf_f),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(__int64_t, blkno)
+ __field(unsigned short, len)
+ __field(unsigned short, flags)
+ __field(unsigned short, size)
+ __field(unsigned int, map_size)
+ ),
+ TP_fast_assign(
+ __entry->dev = log->l_mp->m_super->s_dev;
+ __entry->blkno = buf_f->blf_blkno;
+ __entry->len = buf_f->blf_len;
+ __entry->flags = buf_f->blf_flags;
+ __entry->size = buf_f->blf_size;
+ __entry->map_size = buf_f->blf_map_size;
+ ),
+ TP_printk("dev %d:%d blkno 0x%llx, len %u, flags 0x%x, size %d, "
+ "map_size %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->blkno,
+ __entry->len,
+ __entry->flags,
+ __entry->size,
+ __entry->map_size)
+)
+
+#define DEFINE_LOG_RECOVER_BUF_ITEM(name) \
+DEFINE_EVENT(xfs_log_recover_buf_item_class, name, \
+ TP_PROTO(struct log *log, struct xfs_buf_log_format *buf_f), \
+ TP_ARGS(log, buf_f))
+
+DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_not_cancel);
+DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_cancel);
+DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_cancel_add);
+DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_cancel_ref_inc);
+DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_recover);
+DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_inode_buf);
+DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_reg_buf);
+DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_dquot_buf);
+
+DECLARE_EVENT_CLASS(xfs_log_recover_ino_item_class,
+ TP_PROTO(struct log *log, struct xfs_inode_log_format *in_f),
+ TP_ARGS(log, in_f),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, ino)
+ __field(unsigned short, size)
+ __field(int, fields)
+ __field(unsigned short, asize)
+ __field(unsigned short, dsize)
+ __field(__int64_t, blkno)
+ __field(int, len)
+ __field(int, boffset)
+ ),
+ TP_fast_assign(
+ __entry->dev = log->l_mp->m_super->s_dev;
+ __entry->ino = in_f->ilf_ino;
+ __entry->size = in_f->ilf_size;
+ __entry->fields = in_f->ilf_fields;
+ __entry->asize = in_f->ilf_asize;
+ __entry->dsize = in_f->ilf_dsize;
+ __entry->blkno = in_f->ilf_blkno;
+ __entry->len = in_f->ilf_len;
+ __entry->boffset = in_f->ilf_boffset;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx, size %u, fields 0x%x, asize %d, "
+ "dsize %d, blkno 0x%llx, len %d, boffset %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __entry->size,
+ __entry->fields,
+ __entry->asize,
+ __entry->dsize,
+ __entry->blkno,
+ __entry->len,
+ __entry->boffset)
+)
+#define DEFINE_LOG_RECOVER_INO_ITEM(name) \
+DEFINE_EVENT(xfs_log_recover_ino_item_class, name, \
+ TP_PROTO(struct log *log, struct xfs_inode_log_format *in_f), \
+ TP_ARGS(log, in_f))
+
+DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_recover);
+DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_cancel);
+DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_skip);
+
#endif /* _TRACE_XFS_H */
#undef TRACE_INCLUDE_PATH
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c
index 5f79dd7..b89ec5d 100644
--- a/fs/xfs/quota/xfs_dquot.c
+++ b/fs/xfs/quota/xfs_dquot.c
@@ -101,7 +101,7 @@ xfs_qm_dqinit(
* No need to re-initialize these if this is a reclaimed dquot.
*/
if (brandnewdquot) {
- dqp->dq_flnext = dqp->dq_flprev = dqp;
+ INIT_LIST_HEAD(&dqp->q_freelist);
mutex_init(&dqp->q_qlock);
init_waitqueue_head(&dqp->q_pinwait);
@@ -119,20 +119,20 @@ xfs_qm_dqinit(
* Only the q_core portion was zeroed in dqreclaim_one().
* So, we need to reset others.
*/
- dqp->q_nrefs = 0;
- dqp->q_blkno = 0;
- dqp->MPL_NEXT = dqp->HL_NEXT = NULL;
- dqp->HL_PREVP = dqp->MPL_PREVP = NULL;
- dqp->q_bufoffset = 0;
- dqp->q_fileoffset = 0;
- dqp->q_transp = NULL;
- dqp->q_gdquot = NULL;
- dqp->q_res_bcount = 0;
- dqp->q_res_icount = 0;
- dqp->q_res_rtbcount = 0;
- atomic_set(&dqp->q_pincount, 0);
- dqp->q_hash = NULL;
- ASSERT(dqp->dq_flnext == dqp->dq_flprev);
+ dqp->q_nrefs = 0;
+ dqp->q_blkno = 0;
+ INIT_LIST_HEAD(&dqp->q_mplist);
+ INIT_LIST_HEAD(&dqp->q_hashlist);
+ dqp->q_bufoffset = 0;
+ dqp->q_fileoffset = 0;
+ dqp->q_transp = NULL;
+ dqp->q_gdquot = NULL;
+ dqp->q_res_bcount = 0;
+ dqp->q_res_icount = 0;
+ dqp->q_res_rtbcount = 0;
+ atomic_set(&dqp->q_pincount, 0);
+ dqp->q_hash = NULL;
+ ASSERT(list_empty(&dqp->q_freelist));
trace_xfs_dqreuse(dqp);
}
@@ -158,7 +158,7 @@ void
xfs_qm_dqdestroy(
xfs_dquot_t *dqp)
{
- ASSERT(! XFS_DQ_IS_ON_FREELIST(dqp));
+ ASSERT(list_empty(&dqp->q_freelist));
mutex_destroy(&dqp->q_qlock);
sv_destroy(&dqp->q_pinwait);
@@ -252,7 +252,7 @@ xfs_qm_adjust_dqtimers(
(be64_to_cpu(d->d_bcount) >=
be64_to_cpu(d->d_blk_hardlimit)))) {
d->d_btimer = cpu_to_be32(get_seconds() +
- XFS_QI_BTIMELIMIT(mp));
+ mp->m_quotainfo->qi_btimelimit);
} else {
d->d_bwarns = 0;
}
@@ -275,7 +275,7 @@ xfs_qm_adjust_dqtimers(
(be64_to_cpu(d->d_icount) >=
be64_to_cpu(d->d_ino_hardlimit)))) {
d->d_itimer = cpu_to_be32(get_seconds() +
- XFS_QI_ITIMELIMIT(mp));
+ mp->m_quotainfo->qi_itimelimit);
} else {
d->d_iwarns = 0;
}
@@ -298,7 +298,7 @@ xfs_qm_adjust_dqtimers(
(be64_to_cpu(d->d_rtbcount) >=
be64_to_cpu(d->d_rtb_hardlimit)))) {
d->d_rtbtimer = cpu_to_be32(get_seconds() +
- XFS_QI_RTBTIMELIMIT(mp));
+ mp->m_quotainfo->qi_rtbtimelimit);
} else {
d->d_rtbwarns = 0;
}
@@ -325,6 +325,7 @@ xfs_qm_init_dquot_blk(
uint type,
xfs_buf_t *bp)
{
+ struct xfs_quotainfo *q = mp->m_quotainfo;
xfs_dqblk_t *d;
int curid, i;
@@ -337,16 +338,16 @@ xfs_qm_init_dquot_blk(
/*
* ID of the first dquot in the block - id's are zero based.
*/
- curid = id - (id % XFS_QM_DQPERBLK(mp));
+ curid = id - (id % q->qi_dqperchunk);
ASSERT(curid >= 0);
- memset(d, 0, BBTOB(XFS_QI_DQCHUNKLEN(mp)));
- for (i = 0; i < XFS_QM_DQPERBLK(mp); i++, d++, curid++)
+ memset(d, 0, BBTOB(q->qi_dqchunklen));
+ for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++)
xfs_qm_dqinit_core(curid, type, d);
xfs_trans_dquot_buf(tp, bp,
(type & XFS_DQ_USER ? XFS_BLI_UDQUOT_BUF :
((type & XFS_DQ_PROJ) ? XFS_BLI_PDQUOT_BUF :
XFS_BLI_GDQUOT_BUF)));
- xfs_trans_log_buf(tp, bp, 0, BBTOB(XFS_QI_DQCHUNKLEN(mp)) - 1);
+ xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1);
}
@@ -419,7 +420,7 @@ xfs_qm_dqalloc(
/* now we can just get the buffer (there's nothing to read yet) */
bp = xfs_trans_get_buf(tp, mp->m_ddev_targp,
dqp->q_blkno,
- XFS_QI_DQCHUNKLEN(mp),
+ mp->m_quotainfo->qi_dqchunklen,
0);
if (!bp || (error = XFS_BUF_GETERROR(bp)))
goto error1;
@@ -500,7 +501,8 @@ xfs_qm_dqtobp(
*/
if (dqp->q_blkno == (xfs_daddr_t) 0) {
/* We use the id as an index */
- dqp->q_fileoffset = (xfs_fileoff_t)id / XFS_QM_DQPERBLK(mp);
+ dqp->q_fileoffset = (xfs_fileoff_t)id /
+ mp->m_quotainfo->qi_dqperchunk;
nmaps = 1;
quotip = XFS_DQ_TO_QIP(dqp);
xfs_ilock(quotip, XFS_ILOCK_SHARED);
@@ -529,7 +531,7 @@ xfs_qm_dqtobp(
/*
* offset of dquot in the (fixed sized) dquot chunk.
*/
- dqp->q_bufoffset = (id % XFS_QM_DQPERBLK(mp)) *
+ dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) *
sizeof(xfs_dqblk_t);
if (map.br_startblock == HOLESTARTBLOCK) {
/*
@@ -559,15 +561,13 @@ xfs_qm_dqtobp(
* Read in the buffer, unless we've just done the allocation
* (in which case we already have the buf).
*/
- if (! newdquot) {
+ if (!newdquot) {
trace_xfs_dqtobp_read(dqp);
- if ((error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
- dqp->q_blkno,
- XFS_QI_DQCHUNKLEN(mp),
- 0, &bp))) {
- return (error);
- }
+ error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
+ dqp->q_blkno,
+ mp->m_quotainfo->qi_dqchunklen,
+ 0, &bp);
if (error || !bp)
return XFS_ERROR(error);
}
@@ -689,14 +689,14 @@ xfs_qm_idtodq(
tp = NULL;
if (flags & XFS_QMOPT_DQALLOC) {
tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC);
- if ((error = xfs_trans_reserve(tp,
- XFS_QM_DQALLOC_SPACE_RES(mp),
- XFS_WRITE_LOG_RES(mp) +
- BBTOB(XFS_QI_DQCHUNKLEN(mp)) - 1 +
- 128,
- 0,
- XFS_TRANS_PERM_LOG_RES,
- XFS_WRITE_LOG_COUNT))) {
+ error = xfs_trans_reserve(tp, XFS_QM_DQALLOC_SPACE_RES(mp),
+ XFS_WRITE_LOG_RES(mp) +
+ BBTOB(mp->m_quotainfo->qi_dqchunklen) - 1 +
+ 128,
+ 0,
+ XFS_TRANS_PERM_LOG_RES,
+ XFS_WRITE_LOG_COUNT);
+ if (error) {
cancelflags = 0;
goto error0;
}
@@ -751,7 +751,6 @@ xfs_qm_dqlookup(
{
xfs_dquot_t *dqp;
uint flist_locked;
- xfs_dquot_t *d;
ASSERT(mutex_is_locked(&qh->qh_lock));
@@ -760,7 +759,7 @@ xfs_qm_dqlookup(
/*
* Traverse the hashchain looking for a match
*/
- for (dqp = qh->qh_next; dqp != NULL; dqp = dqp->HL_NEXT) {
+ list_for_each_entry(dqp, &qh->qh_list, q_hashlist) {
/*
* We already have the hashlock. We don't need the
* dqlock to look at the id field of the dquot, since the
@@ -772,12 +771,12 @@ xfs_qm_dqlookup(
/*
* All in core dquots must be on the dqlist of mp
*/
- ASSERT(dqp->MPL_PREVP != NULL);
+ ASSERT(!list_empty(&dqp->q_mplist));
xfs_dqlock(dqp);
if (dqp->q_nrefs == 0) {
- ASSERT (XFS_DQ_IS_ON_FREELIST(dqp));
- if (! xfs_qm_freelist_lock_nowait(xfs_Gqm)) {
+ ASSERT(!list_empty(&dqp->q_freelist));
+ if (!mutex_trylock(&xfs_Gqm->qm_dqfrlist_lock)) {
trace_xfs_dqlookup_want(dqp);
/*
@@ -787,7 +786,7 @@ xfs_qm_dqlookup(
*/
dqp->dq_flags |= XFS_DQ_WANT;
xfs_dqunlock(dqp);
- xfs_qm_freelist_lock(xfs_Gqm);
+ mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
xfs_dqlock(dqp);
dqp->dq_flags &= ~(XFS_DQ_WANT);
}
@@ -802,46 +801,28 @@ xfs_qm_dqlookup(
if (flist_locked) {
if (dqp->q_nrefs != 0) {
- xfs_qm_freelist_unlock(xfs_Gqm);
+ mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
flist_locked = B_FALSE;
} else {
- /*
- * take it off the freelist
- */
+ /* take it off the freelist */
trace_xfs_dqlookup_freelist(dqp);
- XQM_FREELIST_REMOVE(dqp);
- /* xfs_qm_freelist_print(&(xfs_Gqm->
- qm_dqfreelist),
- "after removal"); */
+ list_del_init(&dqp->q_freelist);
+ xfs_Gqm->qm_dqfrlist_cnt--;
}
}
- /*
- * grab a reference
- */
XFS_DQHOLD(dqp);
if (flist_locked)
- xfs_qm_freelist_unlock(xfs_Gqm);
+ mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
/*
* move the dquot to the front of the hashchain
*/
ASSERT(mutex_is_locked(&qh->qh_lock));
- if (dqp->HL_PREVP != &qh->qh_next) {
- trace_xfs_dqlookup_move(dqp);
- if ((d = dqp->HL_NEXT))
- d->HL_PREVP = dqp->HL_PREVP;
- *(dqp->HL_PREVP) = d;
- d = qh->qh_next;
- d->HL_PREVP = &dqp->HL_NEXT;
- dqp->HL_NEXT = d;
- dqp->HL_PREVP = &qh->qh_next;
- qh->qh_next = dqp;
- }
+ list_move(&dqp->q_hashlist, &qh->qh_list);
trace_xfs_dqlookup_done(dqp);
*O_dqpp = dqp;
- ASSERT(mutex_is_locked(&qh->qh_lock));
- return (0);
+ return 0;
}
}
@@ -975,16 +956,17 @@ xfs_qm_dqget(
*/
if (ip) {
xfs_ilock(ip, XFS_ILOCK_EXCL);
- if (! XFS_IS_DQTYPE_ON(mp, type)) {
- /* inode stays locked on return */
- xfs_qm_dqdestroy(dqp);
- return XFS_ERROR(ESRCH);
- }
+
/*
* A dquot could be attached to this inode by now, since
* we had dropped the ilock.
*/
if (type == XFS_DQ_USER) {
+ if (!XFS_IS_UQUOTA_ON(mp)) {
+ /* inode stays locked on return */
+ xfs_qm_dqdestroy(dqp);
+ return XFS_ERROR(ESRCH);
+ }
if (ip->i_udquot) {
xfs_qm_dqdestroy(dqp);
dqp = ip->i_udquot;
@@ -992,6 +974,11 @@ xfs_qm_dqget(
goto dqret;
}
} else {
+ if (!XFS_IS_OQUOTA_ON(mp)) {
+ /* inode stays locked on return */
+ xfs_qm_dqdestroy(dqp);
+ return XFS_ERROR(ESRCH);
+ }
if (ip->i_gdquot) {
xfs_qm_dqdestroy(dqp);
dqp = ip->i_gdquot;
@@ -1033,13 +1020,14 @@ xfs_qm_dqget(
*/
ASSERT(mutex_is_locked(&h->qh_lock));
dqp->q_hash = h;
- XQM_HASHLIST_INSERT(h, dqp);
+ list_add(&dqp->q_hashlist, &h->qh_list);
+ h->qh_version++;
/*
* Attach this dquot to this filesystem's list of all dquots,
* kept inside the mount structure in m_quotainfo field
*/
- xfs_qm_mplist_lock(mp);
+ mutex_lock(&mp->m_quotainfo->qi_dqlist_lock);
/*
* We return a locked dquot to the caller, with a reference taken
@@ -1047,9 +1035,9 @@ xfs_qm_dqget(
xfs_dqlock(dqp);
dqp->q_nrefs = 1;
- XQM_MPLIST_INSERT(&(XFS_QI_MPL_LIST(mp)), dqp);
-
- xfs_qm_mplist_unlock(mp);
+ list_add(&dqp->q_mplist, &mp->m_quotainfo->qi_dqlist);
+ mp->m_quotainfo->qi_dquots++;
+ mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock);
mutex_unlock(&h->qh_lock);
dqret:
ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL));
@@ -1086,10 +1074,10 @@ xfs_qm_dqput(
* drop the dqlock and acquire the freelist and dqlock
* in the right order; but try to get it out-of-order first
*/
- if (! xfs_qm_freelist_lock_nowait(xfs_Gqm)) {
+ if (!mutex_trylock(&xfs_Gqm->qm_dqfrlist_lock)) {
trace_xfs_dqput_wait(dqp);
xfs_dqunlock(dqp);
- xfs_qm_freelist_lock(xfs_Gqm);
+ mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
xfs_dqlock(dqp);
}
@@ -1100,10 +1088,8 @@ xfs_qm_dqput(
if (--dqp->q_nrefs == 0) {
trace_xfs_dqput_free(dqp);
- /*
- * insert at end of the freelist.
- */
- XQM_FREELIST_INSERT(&(xfs_Gqm->qm_dqfreelist), dqp);
+ list_add_tail(&dqp->q_freelist, &xfs_Gqm->qm_dqfrlist);
+ xfs_Gqm->qm_dqfrlist_cnt++;
/*
* If we just added a udquot to the freelist, then
@@ -1118,10 +1104,6 @@ xfs_qm_dqput(
xfs_dqlock(gdqp);
dqp->q_gdquot = NULL;
}
-
- /* xfs_qm_freelist_print(&(xfs_Gqm->qm_dqfreelist),
- "@@@@@++ Free list (after append) @@@@@+");
- */
}
xfs_dqunlock(dqp);
@@ -1133,7 +1115,7 @@ xfs_qm_dqput(
break;
dqp = gdqp;
}
- xfs_qm_freelist_unlock(xfs_Gqm);
+ mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
}
/*
@@ -1386,10 +1368,10 @@ int
xfs_qm_dqpurge(
xfs_dquot_t *dqp)
{
- xfs_dqhash_t *thishash;
+ xfs_dqhash_t *qh = dqp->q_hash;
xfs_mount_t *mp = dqp->q_mount;
- ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp));
+ ASSERT(mutex_is_locked(&mp->m_quotainfo->qi_dqlist_lock));
ASSERT(mutex_is_locked(&dqp->q_hash->qh_lock));
xfs_dqlock(dqp);
@@ -1407,7 +1389,7 @@ xfs_qm_dqpurge(
return (1);
}
- ASSERT(XFS_DQ_IS_ON_FREELIST(dqp));
+ ASSERT(!list_empty(&dqp->q_freelist));
/*
* If we're turning off quotas, we have to make sure that, for
@@ -1452,14 +1434,16 @@ xfs_qm_dqpurge(
ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
!(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL));
- thishash = dqp->q_hash;
- XQM_HASHLIST_REMOVE(thishash, dqp);
- XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(mp)), dqp);
+ list_del_init(&dqp->q_hashlist);
+ qh->qh_version++;
+ list_del_init(&dqp->q_mplist);
+ mp->m_quotainfo->qi_dqreclaims++;
+ mp->m_quotainfo->qi_dquots--;
/*
* XXX Move this to the front of the freelist, if we can get the
* freelist lock.
*/
- ASSERT(XFS_DQ_IS_ON_FREELIST(dqp));
+ ASSERT(!list_empty(&dqp->q_freelist));
dqp->q_mount = NULL;
dqp->q_hash = NULL;
@@ -1467,7 +1451,7 @@ xfs_qm_dqpurge(
memset(&dqp->q_core, 0, sizeof(dqp->q_core));
xfs_dqfunlock(dqp);
xfs_dqunlock(dqp);
- mutex_unlock(&thishash->qh_lock);
+ mutex_unlock(&qh->qh_lock);
return (0);
}
@@ -1517,6 +1501,7 @@ void
xfs_qm_dqflock_pushbuf_wait(
xfs_dquot_t *dqp)
{
+ xfs_mount_t *mp = dqp->q_mount;
xfs_buf_t *bp;
/*
@@ -1525,14 +1510,14 @@ xfs_qm_dqflock_pushbuf_wait(
* out immediately. We'll be able to acquire
* the flush lock when the I/O completes.
*/
- bp = xfs_incore(dqp->q_mount->m_ddev_targp, dqp->q_blkno,
- XFS_QI_DQCHUNKLEN(dqp->q_mount), XBF_TRYLOCK);
+ bp = xfs_incore(mp->m_ddev_targp, dqp->q_blkno,
+ mp->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK);
if (!bp)
goto out_lock;
if (XFS_BUF_ISDELAYWRITE(bp)) {
if (XFS_BUF_ISPINNED(bp))
- xfs_log_force(dqp->q_mount, 0);
+ xfs_log_force(mp, 0);
xfs_buf_delwri_promote(bp);
wake_up_process(bp->b_target->bt_task);
}
diff --git a/fs/xfs/quota/xfs_dquot.h b/fs/xfs/quota/xfs_dquot.h
index a0f7da5..5da3a23 100644
--- a/fs/xfs/quota/xfs_dquot.h
+++ b/fs/xfs/quota/xfs_dquot.h
@@ -33,40 +33,23 @@
* The hash chain headers (hash buckets)
*/
typedef struct xfs_dqhash {
- struct xfs_dquot *qh_next;
+ struct list_head qh_list;
struct mutex qh_lock;
uint qh_version; /* ever increasing version */
uint qh_nelems; /* number of dquots on the list */
} xfs_dqhash_t;
-typedef struct xfs_dqlink {
- struct xfs_dquot *ql_next; /* forward link */
- struct xfs_dquot **ql_prevp; /* pointer to prev ql_next */
-} xfs_dqlink_t;
-
struct xfs_mount;
struct xfs_trans;
/*
- * This is the marker which is designed to occupy the first few
- * bytes of the xfs_dquot_t structure. Even inside this, the freelist pointers
- * must come first.
- * This serves as the marker ("sentinel") when we have to restart list
- * iterations because of locking considerations.
- */
-typedef struct xfs_dqmarker {
- struct xfs_dquot*dqm_flnext; /* link to freelist: must be first */
- struct xfs_dquot*dqm_flprev;
- xfs_dqlink_t dqm_mplist; /* link to mount's list of dquots */
- xfs_dqlink_t dqm_hashlist; /* link to the hash chain */
- uint dqm_flags; /* various flags (XFS_DQ_*) */
-} xfs_dqmarker_t;
-
-/*
* The incore dquot structure
*/
typedef struct xfs_dquot {
- xfs_dqmarker_t q_lists; /* list ptrs, q_flags (marker) */
+ uint dq_flags; /* various flags (XFS_DQ_*) */
+ struct list_head q_freelist; /* global free list of dquots */
+ struct list_head q_mplist; /* mount's list of dquots */
+ struct list_head q_hashlist; /* gloabl hash list of dquots */
xfs_dqhash_t *q_hash; /* the hashchain header */
struct xfs_mount*q_mount; /* filesystem this relates to */
struct xfs_trans*q_transp; /* trans this belongs to currently */
@@ -87,13 +70,6 @@ typedef struct xfs_dquot {
wait_queue_head_t q_pinwait; /* dquot pinning wait queue */
} xfs_dquot_t;
-
-#define dq_flnext q_lists.dqm_flnext
-#define dq_flprev q_lists.dqm_flprev
-#define dq_mplist q_lists.dqm_mplist
-#define dq_hashlist q_lists.dqm_hashlist
-#define dq_flags q_lists.dqm_flags
-
/*
* Lock hierarchy for q_qlock:
* XFS_QLOCK_NORMAL is the implicit default,
@@ -127,7 +103,6 @@ static inline void xfs_dqfunlock(xfs_dquot_t *dqp)
}
#define XFS_DQ_IS_LOCKED(dqp) (mutex_is_locked(&((dqp)->q_qlock)))
-#define XFS_DQ_IS_ON_FREELIST(dqp) ((dqp)->dq_flnext != (dqp))
#define XFS_DQ_IS_DIRTY(dqp) ((dqp)->dq_flags & XFS_DQ_DIRTY)
#define XFS_QM_ISUDQ(dqp) ((dqp)->dq_flags & XFS_DQ_USER)
#define XFS_QM_ISPDQ(dqp) ((dqp)->dq_flags & XFS_DQ_PROJ)
diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/quota/xfs_dquot_item.c
index 4e4ee9a..8d89a24 100644
--- a/fs/xfs/quota/xfs_dquot_item.c
+++ b/fs/xfs/quota/xfs_dquot_item.c
@@ -107,8 +107,7 @@ xfs_qm_dquot_logitem_pin(
/* ARGSUSED */
STATIC void
xfs_qm_dquot_logitem_unpin(
- xfs_dq_logitem_t *logitem,
- int stale)
+ xfs_dq_logitem_t *logitem)
{
xfs_dquot_t *dqp = logitem->qli_dquot;
@@ -123,7 +122,7 @@ xfs_qm_dquot_logitem_unpin_remove(
xfs_dq_logitem_t *logitem,
xfs_trans_t *tp)
{
- xfs_qm_dquot_logitem_unpin(logitem, 0);
+ xfs_qm_dquot_logitem_unpin(logitem);
}
/*
@@ -228,7 +227,7 @@ xfs_qm_dquot_logitem_pushbuf(
}
mp = dqp->q_mount;
bp = xfs_incore(mp->m_ddev_targp, qip->qli_format.qlf_blkno,
- XFS_QI_DQCHUNKLEN(mp), XBF_TRYLOCK);
+ mp->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK);
xfs_dqunlock(dqp);
if (!bp)
return;
@@ -329,8 +328,7 @@ static struct xfs_item_ops xfs_dquot_item_ops = {
.iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
xfs_qm_dquot_logitem_format,
.iop_pin = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_pin,
- .iop_unpin = (void(*)(xfs_log_item_t*, int))
- xfs_qm_dquot_logitem_unpin,
+ .iop_unpin = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_unpin,
.iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t*))
xfs_qm_dquot_logitem_unpin_remove,
.iop_trylock = (uint(*)(xfs_log_item_t*))
@@ -357,9 +355,8 @@ xfs_qm_dquot_logitem_init(
xfs_dq_logitem_t *lp;
lp = &dqp->q_logitem;
- lp->qli_item.li_type = XFS_LI_DQUOT;
- lp->qli_item.li_ops = &xfs_dquot_item_ops;
- lp->qli_item.li_mountp = dqp->q_mount;
+ xfs_log_item_init(dqp->q_mount, &lp->qli_item, XFS_LI_DQUOT,
+ &xfs_dquot_item_ops);
lp->qli_dquot = dqp;
lp->qli_format.qlf_type = XFS_LI_DQUOT;
lp->qli_format.qlf_id = be32_to_cpu(dqp->q_core.d_id);
@@ -426,7 +423,7 @@ xfs_qm_qoff_logitem_pin(xfs_qoff_logitem_t *qf)
*/
/*ARGSUSED*/
STATIC void
-xfs_qm_qoff_logitem_unpin(xfs_qoff_logitem_t *qf, int stale)
+xfs_qm_qoff_logitem_unpin(xfs_qoff_logitem_t *qf)
{
return;
}
@@ -537,8 +534,7 @@ static struct xfs_item_ops xfs_qm_qoffend_logitem_ops = {
.iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
xfs_qm_qoff_logitem_format,
.iop_pin = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_pin,
- .iop_unpin = (void(*)(xfs_log_item_t* ,int))
- xfs_qm_qoff_logitem_unpin,
+ .iop_unpin = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_unpin,
.iop_unpin_remove = (void(*)(xfs_log_item_t*,xfs_trans_t*))
xfs_qm_qoff_logitem_unpin_remove,
.iop_trylock = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_trylock,
@@ -559,8 +555,7 @@ static struct xfs_item_ops xfs_qm_qoff_logitem_ops = {
.iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
xfs_qm_qoff_logitem_format,
.iop_pin = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_pin,
- .iop_unpin = (void(*)(xfs_log_item_t*, int))
- xfs_qm_qoff_logitem_unpin,
+ .iop_unpin = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_unpin,
.iop_unpin_remove = (void(*)(xfs_log_item_t*,xfs_trans_t*))
xfs_qm_qoff_logitem_unpin_remove,
.iop_trylock = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_trylock,
@@ -586,11 +581,8 @@ xfs_qm_qoff_logitem_init(
qf = (xfs_qoff_logitem_t*) kmem_zalloc(sizeof(xfs_qoff_logitem_t), KM_SLEEP);
- qf->qql_item.li_type = XFS_LI_QUOTAOFF;
- if (start)
- qf->qql_item.li_ops = &xfs_qm_qoffend_logitem_ops;
- else
- qf->qql_item.li_ops = &xfs_qm_qoff_logitem_ops;
+ xfs_log_item_init(mp, &qf->qql_item, XFS_LI_QUOTAOFF, start ?
+ &xfs_qm_qoffend_logitem_ops : &xfs_qm_qoff_logitem_ops);
qf->qql_item.li_mountp = mp;
qf->qql_format.qf_type = XFS_LI_QUOTAOFF;
qf->qql_format.qf_flags = flags;
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 417e61e..38e7641 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -67,9 +67,6 @@ static cred_t xfs_zerocr;
STATIC void xfs_qm_list_init(xfs_dqlist_t *, char *, int);
STATIC void xfs_qm_list_destroy(xfs_dqlist_t *);
-STATIC void xfs_qm_freelist_init(xfs_frlist_t *);
-STATIC void xfs_qm_freelist_destroy(xfs_frlist_t *);
-
STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
STATIC int xfs_qm_shake(int, gfp_t);
@@ -84,21 +81,25 @@ extern struct mutex qcheck_lock;
#endif
#ifdef QUOTADEBUG
-#define XQM_LIST_PRINT(l, NXT, title) \
-{ \
- xfs_dquot_t *dqp; int i = 0; \
- cmn_err(CE_DEBUG, "%s (#%d)", title, (int) (l)->qh_nelems); \
- for (dqp = (l)->qh_next; dqp != NULL; dqp = dqp->NXT) { \
- cmn_err(CE_DEBUG, " %d. \"%d (%s)\" " \
- "bcnt = %d, icnt = %d, refs = %d", \
- ++i, (int) be32_to_cpu(dqp->q_core.d_id), \
- DQFLAGTO_TYPESTR(dqp), \
- (int) be64_to_cpu(dqp->q_core.d_bcount), \
- (int) be64_to_cpu(dqp->q_core.d_icount), \
- (int) dqp->q_nrefs); } \
+static void
+xfs_qm_dquot_list_print(
+ struct xfs_mount *mp)
+{
+ xfs_dquot_t *dqp;
+ int i = 0;
+
+ list_for_each_entry(dqp, &mp->m_quotainfo->qi_dqlist_lock, qi_mplist) {
+ cmn_err(CE_DEBUG, " %d. \"%d (%s)\" "
+ "bcnt = %lld, icnt = %lld, refs = %d",
+ i++, be32_to_cpu(dqp->q_core.d_id),
+ DQFLAGTO_TYPESTR(dqp),
+ (long long)be64_to_cpu(dqp->q_core.d_bcount),
+ (long long)be64_to_cpu(dqp->q_core.d_icount),
+ dqp->q_nrefs);
+ }
}
#else
-#define XQM_LIST_PRINT(l, NXT, title) do { } while (0)
+static void xfs_qm_dquot_list_print(struct xfs_mount *mp) { }
#endif
/*
@@ -144,7 +145,9 @@ xfs_Gqm_init(void)
/*
* Freelist of all dquots of all file systems
*/
- xfs_qm_freelist_init(&(xqm->qm_dqfreelist));
+ INIT_LIST_HEAD(&xqm->qm_dqfrlist);
+ xqm->qm_dqfrlist_cnt = 0;
+ mutex_init(&xqm->qm_dqfrlist_lock);
/*
* dquot zone. we register our own low-memory callback.
@@ -189,6 +192,7 @@ STATIC void
xfs_qm_destroy(
struct xfs_qm *xqm)
{
+ struct xfs_dquot *dqp, *n;
int hsize, i;
ASSERT(xqm != NULL);
@@ -204,7 +208,21 @@ xfs_qm_destroy(
xqm->qm_usr_dqhtable = NULL;
xqm->qm_grp_dqhtable = NULL;
xqm->qm_dqhashmask = 0;
- xfs_qm_freelist_destroy(&(xqm->qm_dqfreelist));
+
+ /* frlist cleanup */
+ mutex_lock(&xqm->qm_dqfrlist_lock);
+ list_for_each_entry_safe(dqp, n, &xqm->qm_dqfrlist, q_freelist) {
+ xfs_dqlock(dqp);
+#ifdef QUOTADEBUG
+ cmn_err(CE_DEBUG, "FREELIST destroy 0x%p", dqp);
+#endif
+ list_del_init(&dqp->q_freelist);
+ xfs_Gqm->qm_dqfrlist_cnt--;
+ xfs_dqunlock(dqp);
+ xfs_qm_dqdestroy(dqp);
+ }
+ mutex_unlock(&xqm->qm_dqfrlist_lock);
+ mutex_destroy(&xqm->qm_dqfrlist_lock);
#ifdef DEBUG
mutex_destroy(&qcheck_lock);
#endif
@@ -256,7 +274,7 @@ STATIC void
xfs_qm_rele_quotafs_ref(
struct xfs_mount *mp)
{
- xfs_dquot_t *dqp, *nextdqp;
+ xfs_dquot_t *dqp, *n;
ASSERT(xfs_Gqm);
ASSERT(xfs_Gqm->qm_nrefs > 0);
@@ -264,26 +282,24 @@ xfs_qm_rele_quotafs_ref(
/*
* Go thru the freelist and destroy all inactive dquots.
*/
- xfs_qm_freelist_lock(xfs_Gqm);
+ mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
- for (dqp = xfs_Gqm->qm_dqfreelist.qh_next;
- dqp != (xfs_dquot_t *)&(xfs_Gqm->qm_dqfreelist); ) {
+ list_for_each_entry_safe(dqp, n, &xfs_Gqm->qm_dqfrlist, q_freelist) {
xfs_dqlock(dqp);
- nextdqp = dqp->dq_flnext;
if (dqp->dq_flags & XFS_DQ_INACTIVE) {
ASSERT(dqp->q_mount == NULL);
ASSERT(! XFS_DQ_IS_DIRTY(dqp));
- ASSERT(dqp->HL_PREVP == NULL);
- ASSERT(dqp->MPL_PREVP == NULL);
- XQM_FREELIST_REMOVE(dqp);
+ ASSERT(list_empty(&dqp->q_hashlist));
+ ASSERT(list_empty(&dqp->q_mplist));
+ list_del_init(&dqp->q_freelist);
+ xfs_Gqm->qm_dqfrlist_cnt--;
xfs_dqunlock(dqp);
xfs_qm_dqdestroy(dqp);
} else {
xfs_dqunlock(dqp);
}
- dqp = nextdqp;
}
- xfs_qm_freelist_unlock(xfs_Gqm);
+ mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
/*
* Destroy the entire XQM. If somebody mounts with quotaon, this'll
@@ -305,7 +321,7 @@ xfs_qm_unmount(
struct xfs_mount *mp)
{
if (mp->m_quotainfo) {
- xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL | XFS_QMOPT_UMOUNTING);
+ xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
xfs_qm_destroy_quotainfo(mp);
}
}
@@ -449,20 +465,21 @@ xfs_qm_unmount_quotas(
*/
STATIC int
xfs_qm_dqflush_all(
- xfs_mount_t *mp,
- int sync_mode)
+ struct xfs_mount *mp,
+ int sync_mode)
{
- int recl;
- xfs_dquot_t *dqp;
- int niters;
- int error;
+ struct xfs_quotainfo *q = mp->m_quotainfo;
+ int recl;
+ struct xfs_dquot *dqp;
+ int niters;
+ int error;
- if (mp->m_quotainfo == NULL)
+ if (!q)
return 0;
niters = 0;
again:
- xfs_qm_mplist_lock(mp);
- FOREACH_DQUOT_IN_MP(dqp, mp) {
+ mutex_lock(&q->qi_dqlist_lock);
+ list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
xfs_dqlock(dqp);
if (! XFS_DQ_IS_DIRTY(dqp)) {
xfs_dqunlock(dqp);
@@ -470,7 +487,7 @@ again:
}
/* XXX a sentinel would be better */
- recl = XFS_QI_MPLRECLAIMS(mp);
+ recl = q->qi_dqreclaims;
if (!xfs_dqflock_nowait(dqp)) {
/*
* If we can't grab the flush lock then check
@@ -485,21 +502,21 @@ again:
* Let go of the mplist lock. We don't want to hold it
* across a disk write.
*/
- xfs_qm_mplist_unlock(mp);
+ mutex_unlock(&q->qi_dqlist_lock);
error = xfs_qm_dqflush(dqp, sync_mode);
xfs_dqunlock(dqp);
if (error)
return error;
- xfs_qm_mplist_lock(mp);
- if (recl != XFS_QI_MPLRECLAIMS(mp)) {
- xfs_qm_mplist_unlock(mp);
+ mutex_lock(&q->qi_dqlist_lock);
+ if (recl != q->qi_dqreclaims) {
+ mutex_unlock(&q->qi_dqlist_lock);
/* XXX restart limit */
goto again;
}
}
- xfs_qm_mplist_unlock(mp);
+ mutex_unlock(&q->qi_dqlist_lock);
/* return ! busy */
return 0;
}
@@ -509,15 +526,15 @@ again:
*/
STATIC void
xfs_qm_detach_gdquots(
- xfs_mount_t *mp)
+ struct xfs_mount *mp)
{
- xfs_dquot_t *dqp, *gdqp;
- int nrecl;
+ struct xfs_quotainfo *q = mp->m_quotainfo;
+ struct xfs_dquot *dqp, *gdqp;
+ int nrecl;
again:
- ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp));
- dqp = XFS_QI_MPLNEXT(mp);
- while (dqp) {
+ ASSERT(mutex_is_locked(&q->qi_dqlist_lock));
+ list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
xfs_dqlock(dqp);
if ((gdqp = dqp->q_gdquot)) {
xfs_dqlock(gdqp);
@@ -530,15 +547,14 @@ xfs_qm_detach_gdquots(
* Can't hold the mplist lock across a dqput.
* XXXmust convert to marker based iterations here.
*/
- nrecl = XFS_QI_MPLRECLAIMS(mp);
- xfs_qm_mplist_unlock(mp);
+ nrecl = q->qi_dqreclaims;
+ mutex_unlock(&q->qi_dqlist_lock);
xfs_qm_dqput(gdqp);
- xfs_qm_mplist_lock(mp);
- if (nrecl != XFS_QI_MPLRECLAIMS(mp))
+ mutex_lock(&q->qi_dqlist_lock);
+ if (nrecl != q->qi_dqreclaims)
goto again;
}
- dqp = dqp->MPL_NEXT;
}
}
@@ -550,23 +566,23 @@ xfs_qm_detach_gdquots(
*/
STATIC int
xfs_qm_dqpurge_int(
- xfs_mount_t *mp,
- uint flags) /* QUOTAOFF/UMOUNTING/UQUOTA/PQUOTA/GQUOTA */
+ struct xfs_mount *mp,
+ uint flags)
{
- xfs_dquot_t *dqp;
- uint dqtype;
- int nrecl;
- xfs_dquot_t *nextdqp;
- int nmisses;
+ struct xfs_quotainfo *q = mp->m_quotainfo;
+ struct xfs_dquot *dqp, *n;
+ uint dqtype;
+ int nrecl;
+ int nmisses;
- if (mp->m_quotainfo == NULL)
+ if (!q)
return 0;
dqtype = (flags & XFS_QMOPT_UQUOTA) ? XFS_DQ_USER : 0;
dqtype |= (flags & XFS_QMOPT_PQUOTA) ? XFS_DQ_PROJ : 0;
dqtype |= (flags & XFS_QMOPT_GQUOTA) ? XFS_DQ_GROUP : 0;
- xfs_qm_mplist_lock(mp);
+ mutex_lock(&q->qi_dqlist_lock);
/*
* In the first pass through all incore dquots of this filesystem,
@@ -578,28 +594,25 @@ xfs_qm_dqpurge_int(
again:
nmisses = 0;
- ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp));
+ ASSERT(mutex_is_locked(&q->qi_dqlist_lock));
/*
* Try to get rid of all of the unwanted dquots. The idea is to
* get them off mplist and hashlist, but leave them on freelist.
*/
- dqp = XFS_QI_MPLNEXT(mp);
- while (dqp) {
+ list_for_each_entry_safe(dqp, n, &q->qi_dqlist, q_mplist) {
/*
* It's OK to look at the type without taking dqlock here.
* We're holding the mplist lock here, and that's needed for
* a dqreclaim.
*/
- if ((dqp->dq_flags & dqtype) == 0) {
- dqp = dqp->MPL_NEXT;
+ if ((dqp->dq_flags & dqtype) == 0)
continue;
- }
if (!mutex_trylock(&dqp->q_hash->qh_lock)) {
- nrecl = XFS_QI_MPLRECLAIMS(mp);
- xfs_qm_mplist_unlock(mp);
+ nrecl = q->qi_dqreclaims;
+ mutex_unlock(&q->qi_dqlist_lock);
mutex_lock(&dqp->q_hash->qh_lock);
- xfs_qm_mplist_lock(mp);
+ mutex_lock(&q->qi_dqlist_lock);
/*
* XXXTheoretically, we can get into a very long
@@ -607,7 +620,7 @@ xfs_qm_dqpurge_int(
* No one can be adding dquots to the mplist at
* this point, but somebody might be taking things off.
*/
- if (nrecl != XFS_QI_MPLRECLAIMS(mp)) {
+ if (nrecl != q->qi_dqreclaims) {
mutex_unlock(&dqp->q_hash->qh_lock);
goto again;
}
@@ -617,11 +630,9 @@ xfs_qm_dqpurge_int(
* Take the dquot off the mplist and hashlist. It may remain on
* freelist in INACTIVE state.
*/
- nextdqp = dqp->MPL_NEXT;
nmisses += xfs_qm_dqpurge(dqp);
- dqp = nextdqp;
}
- xfs_qm_mplist_unlock(mp);
+ mutex_unlock(&q->qi_dqlist_lock);
return nmisses;
}
@@ -921,12 +932,13 @@ xfs_qm_dqdetach(
int
xfs_qm_sync(
- xfs_mount_t *mp,
- int flags)
+ struct xfs_mount *mp,
+ int flags)
{
- int recl, restarts;
- xfs_dquot_t *dqp;
- int error;
+ struct xfs_quotainfo *q = mp->m_quotainfo;
+ int recl, restarts;
+ struct xfs_dquot *dqp;
+ int error;
if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
return 0;
@@ -934,18 +946,19 @@ xfs_qm_sync(
restarts = 0;
again:
- xfs_qm_mplist_lock(mp);
+ mutex_lock(&q->qi_dqlist_lock);
/*
* dqpurge_all() also takes the mplist lock and iterate thru all dquots
* in quotaoff. However, if the QUOTA_ACTIVE bits are not cleared
* when we have the mplist lock, we know that dquots will be consistent
* as long as we have it locked.
*/
- if (! XFS_IS_QUOTA_ON(mp)) {
- xfs_qm_mplist_unlock(mp);
+ if (!XFS_IS_QUOTA_ON(mp)) {
+ mutex_unlock(&q->qi_dqlist_lock);
return 0;
}
- FOREACH_DQUOT_IN_MP(dqp, mp) {
+ ASSERT(mutex_is_locked(&q->qi_dqlist_lock));
+ list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
/*
* If this is vfs_sync calling, then skip the dquots that
* don't 'seem' to be dirty. ie. don't acquire dqlock.
@@ -969,7 +982,7 @@ xfs_qm_sync(
}
/* XXX a sentinel would be better */
- recl = XFS_QI_MPLRECLAIMS(mp);
+ recl = q->qi_dqreclaims;
if (!xfs_dqflock_nowait(dqp)) {
if (flags & SYNC_TRYLOCK) {
xfs_dqunlock(dqp);
@@ -989,7 +1002,7 @@ xfs_qm_sync(
* Let go of the mplist lock. We don't want to hold it
* across a disk write
*/
- xfs_qm_mplist_unlock(mp);
+ mutex_unlock(&q->qi_dqlist_lock);
error = xfs_qm_dqflush(dqp, flags);
xfs_dqunlock(dqp);
if (error && XFS_FORCED_SHUTDOWN(mp))
@@ -997,17 +1010,17 @@ xfs_qm_sync(
else if (error)
return error;
- xfs_qm_mplist_lock(mp);
- if (recl != XFS_QI_MPLRECLAIMS(mp)) {
+ mutex_lock(&q->qi_dqlist_lock);
+ if (recl != q->qi_dqreclaims) {
if (++restarts >= XFS_QM_SYNC_MAX_RESTARTS)
break;
- xfs_qm_mplist_unlock(mp);
+ mutex_unlock(&q->qi_dqlist_lock);
goto again;
}
}
- xfs_qm_mplist_unlock(mp);
+ mutex_unlock(&q->qi_dqlist_lock);
return 0;
}
@@ -1052,8 +1065,9 @@ xfs_qm_init_quotainfo(
return error;
}
- xfs_qm_list_init(&qinf->qi_dqlist, "mpdqlist", 0);
- lockdep_set_class(&qinf->qi_dqlist.qh_lock, &xfs_quota_mplist_class);
+ INIT_LIST_HEAD(&qinf->qi_dqlist);
+ mutex_init(&qinf->qi_dqlist_lock);
+ lockdep_set_class(&qinf->qi_dqlist_lock, &xfs_quota_mplist_class);
qinf->qi_dqreclaims = 0;
@@ -1150,7 +1164,8 @@ xfs_qm_destroy_quotainfo(
*/
xfs_qm_rele_quotafs_ref(mp);
- xfs_qm_list_destroy(&qi->qi_dqlist);
+ ASSERT(list_empty(&qi->qi_dqlist));
+ mutex_destroy(&qi->qi_dqlist_lock);
if (qi->qi_uquotaip) {
IRELE(qi->qi_uquotaip);
@@ -1177,7 +1192,7 @@ xfs_qm_list_init(
int n)
{
mutex_init(&list->qh_lock);
- list->qh_next = NULL;
+ INIT_LIST_HEAD(&list->qh_list);
list->qh_version = 0;
list->qh_nelems = 0;
}
@@ -1316,9 +1331,6 @@ xfs_qm_qino_alloc(
*/
spin_lock(&mp->m_sb_lock);
if (flags & XFS_QMOPT_SBVERSION) {
-#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
- unsigned oldv = mp->m_sb.sb_versionnum;
-#endif
ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
XFS_SB_GQUOTINO | XFS_SB_QFLAGS)) ==
@@ -1331,11 +1343,6 @@ xfs_qm_qino_alloc(
/* qflags will get updated _after_ quotacheck */
mp->m_sb.sb_qflags = 0;
-#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
- cmn_err(CE_NOTE,
- "Old superblock version %x, converting to %x.",
- oldv, mp->m_sb.sb_versionnum);
-#endif
}
if (flags & XFS_QMOPT_UQUOTA)
mp->m_sb.sb_uquotino = (*ip)->i_ino;
@@ -1371,10 +1378,10 @@ xfs_qm_reset_dqcounts(
#ifdef DEBUG
j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
do_div(j, sizeof(xfs_dqblk_t));
- ASSERT(XFS_QM_DQPERBLK(mp) == j);
+ ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
#endif
ddq = (xfs_disk_dquot_t *)XFS_BUF_PTR(bp);
- for (j = 0; j < XFS_QM_DQPERBLK(mp); j++) {
+ for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
/*
* Do a sanity check, and if needed, repair the dqblk. Don't
* output any warnings because it's perfectly possible to
@@ -1429,7 +1436,7 @@ xfs_qm_dqiter_bufs(
while (blkcnt--) {
error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
XFS_FSB_TO_DADDR(mp, bno),
- (int)XFS_QI_DQCHUNKLEN(mp), 0, &bp);
+ mp->m_quotainfo->qi_dqchunklen, 0, &bp);
if (error)
break;
@@ -1439,7 +1446,7 @@ xfs_qm_dqiter_bufs(
* goto the next block.
*/
bno++;
- firstid += XFS_QM_DQPERBLK(mp);
+ firstid += mp->m_quotainfo->qi_dqperchunk;
}
return error;
}
@@ -1505,7 +1512,7 @@ xfs_qm_dqiterate(
continue;
firstid = (xfs_dqid_t) map[i].br_startoff *
- XFS_QM_DQPERBLK(mp);
+ mp->m_quotainfo->qi_dqperchunk;
/*
* Do a read-ahead on the next extent.
*/
@@ -1516,7 +1523,7 @@ xfs_qm_dqiterate(
while (rablkcnt--) {
xfs_baread(mp->m_ddev_targp,
XFS_FSB_TO_DADDR(mp, rablkno),
- (int)XFS_QI_DQCHUNKLEN(mp));
+ mp->m_quotainfo->qi_dqchunklen);
rablkno++;
}
}
@@ -1576,8 +1583,10 @@ xfs_qm_quotacheck_dqadjust(
/*
* Set default limits, adjust timers (since we changed usages)
+ *
+ * There are no timers for the default values set in the root dquot.
*/
- if (! XFS_IS_SUSER_DQUOT(dqp)) {
+ if (dqp->q_core.d_id) {
xfs_qm_adjust_dqlimits(dqp->q_mount, &dqp->q_core);
xfs_qm_adjust_dqtimers(dqp->q_mount, &dqp->q_core);
}
@@ -1747,14 +1756,14 @@ xfs_qm_quotacheck(
lastino = 0;
flags = 0;
- ASSERT(XFS_QI_UQIP(mp) || XFS_QI_GQIP(mp));
+ ASSERT(mp->m_quotainfo->qi_uquotaip || mp->m_quotainfo->qi_gquotaip);
ASSERT(XFS_IS_QUOTA_RUNNING(mp));
/*
* There should be no cached dquots. The (simplistic) quotacheck
* algorithm doesn't like that.
*/
- ASSERT(XFS_QI_MPLNDQUOTS(mp) == 0);
+ ASSERT(list_empty(&mp->m_quotainfo->qi_dqlist));
cmn_err(CE_NOTE, "XFS quotacheck %s: Please wait.", mp->m_fsname);
@@ -1763,15 +1772,19 @@ xfs_qm_quotacheck(
* their counters to zero. We need a clean slate.
* We don't log our changes till later.
*/
- if ((uip = XFS_QI_UQIP(mp))) {
- if ((error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA)))
+ uip = mp->m_quotainfo->qi_uquotaip;
+ if (uip) {
+ error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA);
+ if (error)
goto error_return;
flags |= XFS_UQUOTA_CHKD;
}
- if ((gip = XFS_QI_GQIP(mp))) {
- if ((error = xfs_qm_dqiterate(mp, gip, XFS_IS_GQUOTA_ON(mp) ?
- XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA)))
+ gip = mp->m_quotainfo->qi_gquotaip;
+ if (gip) {
+ error = xfs_qm_dqiterate(mp, gip, XFS_IS_GQUOTA_ON(mp) ?
+ XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA);
+ if (error)
goto error_return;
flags |= XFS_OQUOTA_CHKD;
}
@@ -1804,7 +1817,7 @@ xfs_qm_quotacheck(
* at this point (because we intentionally didn't in dqget_noattach).
*/
if (error) {
- xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL | XFS_QMOPT_QUOTAOFF);
+ xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
goto error_return;
}
@@ -1825,7 +1838,7 @@ xfs_qm_quotacheck(
mp->m_qflags &= ~(XFS_OQUOTA_CHKD | XFS_UQUOTA_CHKD);
mp->m_qflags |= flags;
- XQM_LIST_PRINT(&(XFS_QI_MPL_LIST(mp)), MPL_NEXT, "++++ Mp list +++");
+ xfs_qm_dquot_list_print(mp);
error_return:
if (error) {
@@ -1920,59 +1933,53 @@ xfs_qm_init_quotainos(
}
}
- XFS_QI_UQIP(mp) = uip;
- XFS_QI_GQIP(mp) = gip;
+ mp->m_quotainfo->qi_uquotaip = uip;
+ mp->m_quotainfo->qi_gquotaip = gip;
return 0;
}
+
/*
- * Traverse the freelist of dquots and attempt to reclaim a maximum of
- * 'howmany' dquots. This operation races with dqlookup(), and attempts to
- * favor the lookup function ...
- * XXXsup merge this with qm_reclaim_one().
+ * Just pop the least recently used dquot off the freelist and
+ * recycle it. The returned dquot is locked.
*/
-STATIC int
-xfs_qm_shake_freelist(
- int howmany)
+STATIC xfs_dquot_t *
+xfs_qm_dqreclaim_one(void)
{
- int nreclaimed;
- xfs_dqhash_t *hash;
- xfs_dquot_t *dqp, *nextdqp;
+ xfs_dquot_t *dqpout;
+ xfs_dquot_t *dqp;
int restarts;
- int nflushes;
-
- if (howmany <= 0)
- return 0;
- nreclaimed = 0;
restarts = 0;
- nflushes = 0;
+ dqpout = NULL;
-#ifdef QUOTADEBUG
- cmn_err(CE_DEBUG, "Shake free 0x%x", howmany);
-#endif
- /* lock order is : hashchainlock, freelistlock, mplistlock */
- tryagain:
- xfs_qm_freelist_lock(xfs_Gqm);
+ /* lockorder: hashchainlock, freelistlock, mplistlock, dqlock, dqflock */
+startagain:
+ mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
- for (dqp = xfs_Gqm->qm_dqfreelist.qh_next;
- ((dqp != (xfs_dquot_t *) &xfs_Gqm->qm_dqfreelist) &&
- nreclaimed < howmany); ) {
+ list_for_each_entry(dqp, &xfs_Gqm->qm_dqfrlist, q_freelist) {
+ struct xfs_mount *mp = dqp->q_mount;
xfs_dqlock(dqp);
/*
* We are racing with dqlookup here. Naturally we don't
- * want to reclaim a dquot that lookup wants.
+ * want to reclaim a dquot that lookup wants. We release the
+ * freelist lock and start over, so that lookup will grab
+ * both the dquot and the freelistlock.
*/
if (dqp->dq_flags & XFS_DQ_WANT) {
+ ASSERT(! (dqp->dq_flags & XFS_DQ_INACTIVE));
+
+ trace_xfs_dqreclaim_want(dqp);
+
xfs_dqunlock(dqp);
- xfs_qm_freelist_unlock(xfs_Gqm);
+ mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
- return nreclaimed;
+ return NULL;
XQM_STATS_INC(xqmstats.xs_qm_dqwants);
- goto tryagain;
+ goto startagain;
}
/*
@@ -1981,23 +1988,27 @@ xfs_qm_shake_freelist(
* life easier.
*/
if (dqp->dq_flags & XFS_DQ_INACTIVE) {
- ASSERT(dqp->q_mount == NULL);
+ ASSERT(mp == NULL);
ASSERT(! XFS_DQ_IS_DIRTY(dqp));
- ASSERT(dqp->HL_PREVP == NULL);
- ASSERT(dqp->MPL_PREVP == NULL);
+ ASSERT(list_empty(&dqp->q_hashlist));
+ ASSERT(list_empty(&dqp->q_mplist));
+ list_del_init(&dqp->q_freelist);
+ xfs_Gqm->qm_dqfrlist_cnt--;
+ xfs_dqunlock(dqp);
+ dqpout = dqp;
XQM_STATS_INC(xqmstats.xs_qm_dqinact_reclaims);
- nextdqp = dqp->dq_flnext;
- goto off_freelist;
+ break;
}
- ASSERT(dqp->MPL_PREVP);
+ ASSERT(dqp->q_hash);
+ ASSERT(!list_empty(&dqp->q_mplist));
+
/*
* Try to grab the flush lock. If this dquot is in the process of
* getting flushed to disk, we don't want to reclaim it.
*/
if (!xfs_dqflock_nowait(dqp)) {
xfs_dqunlock(dqp);
- dqp = dqp->dq_flnext;
continue;
}
@@ -2010,21 +2021,21 @@ xfs_qm_shake_freelist(
if (XFS_DQ_IS_DIRTY(dqp)) {
int error;
- trace_xfs_dqshake_dirty(dqp);
+ trace_xfs_dqreclaim_dirty(dqp);
/*
* We flush it delayed write, so don't bother
- * releasing the mplock.
+ * releasing the freelist lock.
*/
error = xfs_qm_dqflush(dqp, 0);
if (error) {
- xfs_fs_cmn_err(CE_WARN, dqp->q_mount,
- "xfs_qm_dqflush_all: dquot %p flush failed", dqp);
+ xfs_fs_cmn_err(CE_WARN, mp,
+ "xfs_qm_dqreclaim: dquot %p flush failed", dqp);
}
xfs_dqunlock(dqp); /* dqflush unlocks dqflock */
- dqp = dqp->dq_flnext;
continue;
}
+
/*
* We're trying to get the hashlock out of order. This races
* with dqlookup; so, we giveup and goto the next dquot if
@@ -2033,56 +2044,74 @@ xfs_qm_shake_freelist(
* waiting for the freelist lock.
*/
if (!mutex_trylock(&dqp->q_hash->qh_lock)) {
- xfs_dqfunlock(dqp);
- xfs_dqunlock(dqp);
- dqp = dqp->dq_flnext;
- continue;
+ restarts++;
+ goto dqfunlock;
}
+
/*
* This races with dquot allocation code as well as dqflush_all
* and reclaim code. So, if we failed to grab the mplist lock,
* giveup everything and start over.
*/
- hash = dqp->q_hash;
- ASSERT(hash);
- if (! xfs_qm_mplist_nowait(dqp->q_mount)) {
- /* XXX put a sentinel so that we can come back here */
+ if (!mutex_trylock(&mp->m_quotainfo->qi_dqlist_lock)) {
+ restarts++;
+ mutex_unlock(&dqp->q_hash->qh_lock);
xfs_dqfunlock(dqp);
xfs_dqunlock(dqp);
- mutex_unlock(&hash->qh_lock);
- xfs_qm_freelist_unlock(xfs_Gqm);
- if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
- return nreclaimed;
- goto tryagain;
+ mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
+ if (restarts++ >= XFS_QM_RECLAIM_MAX_RESTARTS)
+ return NULL;
+ goto startagain;
}
- trace_xfs_dqshake_unlink(dqp);
-
-#ifdef QUOTADEBUG
- cmn_err(CE_DEBUG, "Shake 0x%p, ID 0x%x\n",
- dqp, be32_to_cpu(dqp->q_core.d_id));
-#endif
ASSERT(dqp->q_nrefs == 0);
- nextdqp = dqp->dq_flnext;
- XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(dqp->q_mount)), dqp);
- XQM_HASHLIST_REMOVE(hash, dqp);
+ list_del_init(&dqp->q_mplist);
+ mp->m_quotainfo->qi_dquots--;
+ mp->m_quotainfo->qi_dqreclaims++;
+ list_del_init(&dqp->q_hashlist);
+ dqp->q_hash->qh_version++;
+ list_del_init(&dqp->q_freelist);
+ xfs_Gqm->qm_dqfrlist_cnt--;
+ dqpout = dqp;
+ mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock);
+ mutex_unlock(&dqp->q_hash->qh_lock);
+dqfunlock:
xfs_dqfunlock(dqp);
- xfs_qm_mplist_unlock(dqp->q_mount);
- mutex_unlock(&hash->qh_lock);
-
- off_freelist:
- XQM_FREELIST_REMOVE(dqp);
xfs_dqunlock(dqp);
- nreclaimed++;
- XQM_STATS_INC(xqmstats.xs_qm_dqshake_reclaims);
+ if (dqpout)
+ break;
+ if (restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
+ return NULL;
+ }
+ mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
+ return dqpout;
+}
+
+/*
+ * Traverse the freelist of dquots and attempt to reclaim a maximum of
+ * 'howmany' dquots. This operation races with dqlookup(), and attempts to
+ * favor the lookup function ...
+ */
+STATIC int
+xfs_qm_shake_freelist(
+ int howmany)
+{
+ int nreclaimed = 0;
+ xfs_dquot_t *dqp;
+
+ if (howmany <= 0)
+ return 0;
+
+ while (nreclaimed < howmany) {
+ dqp = xfs_qm_dqreclaim_one();
+ if (!dqp)
+ return nreclaimed;
xfs_qm_dqdestroy(dqp);
- dqp = nextdqp;
+ nreclaimed++;
}
- xfs_qm_freelist_unlock(xfs_Gqm);
return nreclaimed;
}
-
/*
* The kmem_shake interface is invoked when memory is running low.
*/
@@ -2097,7 +2126,7 @@ xfs_qm_shake(int nr_to_scan, gfp_t gfp_mask)
if (!xfs_Gqm)
return 0;
- nfree = xfs_Gqm->qm_dqfreelist.qh_nelems; /* free dquots */
+ nfree = xfs_Gqm->qm_dqfrlist_cnt; /* free dquots */
/* incore dquots in all f/s's */
ndqused = atomic_read(&xfs_Gqm->qm_totaldquots) - nfree;
@@ -2113,131 +2142,6 @@ xfs_qm_shake(int nr_to_scan, gfp_t gfp_mask)
}
-/*
- * Just pop the least recently used dquot off the freelist and
- * recycle it. The returned dquot is locked.
- */
-STATIC xfs_dquot_t *
-xfs_qm_dqreclaim_one(void)
-{
- xfs_dquot_t *dqpout;
- xfs_dquot_t *dqp;
- int restarts;
- int nflushes;
-
- restarts = 0;
- dqpout = NULL;
- nflushes = 0;
-
- /* lockorder: hashchainlock, freelistlock, mplistlock, dqlock, dqflock */
- startagain:
- xfs_qm_freelist_lock(xfs_Gqm);
-
- FOREACH_DQUOT_IN_FREELIST(dqp, &(xfs_Gqm->qm_dqfreelist)) {
- xfs_dqlock(dqp);
-
- /*
- * We are racing with dqlookup here. Naturally we don't
- * want to reclaim a dquot that lookup wants. We release the
- * freelist lock and start over, so that lookup will grab
- * both the dquot and the freelistlock.
- */
- if (dqp->dq_flags & XFS_DQ_WANT) {
- ASSERT(! (dqp->dq_flags & XFS_DQ_INACTIVE));
-
- trace_xfs_dqreclaim_want(dqp);
-
- xfs_dqunlock(dqp);
- xfs_qm_freelist_unlock(xfs_Gqm);
- if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
- return NULL;
- XQM_STATS_INC(xqmstats.xs_qm_dqwants);
- goto startagain;
- }
-
- /*
- * If the dquot is inactive, we are assured that it is
- * not on the mplist or the hashlist, and that makes our
- * life easier.
- */
- if (dqp->dq_flags & XFS_DQ_INACTIVE) {
- ASSERT(dqp->q_mount == NULL);
- ASSERT(! XFS_DQ_IS_DIRTY(dqp));
- ASSERT(dqp->HL_PREVP == NULL);
- ASSERT(dqp->MPL_PREVP == NULL);
- XQM_FREELIST_REMOVE(dqp);
- xfs_dqunlock(dqp);
- dqpout = dqp;
- XQM_STATS_INC(xqmstats.xs_qm_dqinact_reclaims);
- break;
- }
-
- ASSERT(dqp->q_hash);
- ASSERT(dqp->MPL_PREVP);
-
- /*
- * Try to grab the flush lock. If this dquot is in the process of
- * getting flushed to disk, we don't want to reclaim it.
- */
- if (!xfs_dqflock_nowait(dqp)) {
- xfs_dqunlock(dqp);
- continue;
- }
-
- /*
- * We have the flush lock so we know that this is not in the
- * process of being flushed. So, if this is dirty, flush it
- * DELWRI so that we don't get a freelist infested with
- * dirty dquots.
- */
- if (XFS_DQ_IS_DIRTY(dqp)) {
- int error;
-
- trace_xfs_dqreclaim_dirty(dqp);
-
- /*
- * We flush it delayed write, so don't bother
- * releasing the freelist lock.
- */
- error = xfs_qm_dqflush(dqp, 0);
- if (error) {
- xfs_fs_cmn_err(CE_WARN, dqp->q_mount,
- "xfs_qm_dqreclaim: dquot %p flush failed", dqp);
- }
- xfs_dqunlock(dqp); /* dqflush unlocks dqflock */
- continue;
- }
-
- if (! xfs_qm_mplist_nowait(dqp->q_mount)) {
- xfs_dqfunlock(dqp);
- xfs_dqunlock(dqp);
- continue;
- }
-
- if (!mutex_trylock(&dqp->q_hash->qh_lock))
- goto mplistunlock;
-
- trace_xfs_dqreclaim_unlink(dqp);
-
- ASSERT(dqp->q_nrefs == 0);
- XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(dqp->q_mount)), dqp);
- XQM_HASHLIST_REMOVE(dqp->q_hash, dqp);
- XQM_FREELIST_REMOVE(dqp);
- dqpout = dqp;
- mutex_unlock(&dqp->q_hash->qh_lock);
- mplistunlock:
- xfs_qm_mplist_unlock(dqp->q_mount);
- xfs_dqfunlock(dqp);
- xfs_dqunlock(dqp);
- if (dqpout)
- break;
- }
-
- xfs_qm_freelist_unlock(xfs_Gqm);
- return dqpout;
-}
-
-
/*------------------------------------------------------------------*/
/*
@@ -2662,66 +2566,3 @@ xfs_qm_vop_create_dqattach(
}
}
-/* ------------- list stuff -----------------*/
-STATIC void
-xfs_qm_freelist_init(xfs_frlist_t *ql)
-{
- ql->qh_next = ql->qh_prev = (xfs_dquot_t *) ql;
- mutex_init(&ql->qh_lock);
- ql->qh_version = 0;
- ql->qh_nelems = 0;
-}
-
-STATIC void
-xfs_qm_freelist_destroy(xfs_frlist_t *ql)
-{
- xfs_dquot_t *dqp, *nextdqp;
-
- mutex_lock(&ql->qh_lock);
- for (dqp = ql->qh_next;
- dqp != (xfs_dquot_t *)ql; ) {
- xfs_dqlock(dqp);
- nextdqp = dqp->dq_flnext;
-#ifdef QUOTADEBUG
- cmn_err(CE_DEBUG, "FREELIST destroy 0x%p", dqp);
-#endif
- XQM_FREELIST_REMOVE(dqp);
- xfs_dqunlock(dqp);
- xfs_qm_dqdestroy(dqp);
- dqp = nextdqp;
- }
- mutex_unlock(&ql->qh_lock);
- mutex_destroy(&ql->qh_lock);
-
- ASSERT(ql->qh_nelems == 0);
-}
-
-STATIC void
-xfs_qm_freelist_insert(xfs_frlist_t *ql, xfs_dquot_t *dq)
-{
- dq->dq_flnext = ql->qh_next;
- dq->dq_flprev = (xfs_dquot_t *)ql;
- ql->qh_next = dq;
- dq->dq_flnext->dq_flprev = dq;
- xfs_Gqm->qm_dqfreelist.qh_nelems++;
- xfs_Gqm->qm_dqfreelist.qh_version++;
-}
-
-void
-xfs_qm_freelist_unlink(xfs_dquot_t *dq)
-{
- xfs_dquot_t *next = dq->dq_flnext;
- xfs_dquot_t *prev = dq->dq_flprev;
-
- next->dq_flprev = prev;
- prev->dq_flnext = next;
- dq->dq_flnext = dq->dq_flprev = dq;
- xfs_Gqm->qm_dqfreelist.qh_nelems--;
- xfs_Gqm->qm_dqfreelist.qh_version++;
-}
-
-void
-xfs_qm_freelist_append(xfs_frlist_t *ql, xfs_dquot_t *dq)
-{
- xfs_qm_freelist_insert((xfs_frlist_t *)ql->qh_prev, dq);
-}
diff --git a/fs/xfs/quota/xfs_qm.h b/fs/xfs/quota/xfs_qm.h
index 495564b..c9446f1 100644
--- a/fs/xfs/quota/xfs_qm.h
+++ b/fs/xfs/quota/xfs_qm.h
@@ -72,17 +72,6 @@ extern kmem_zone_t *qm_dqtrxzone;
#define XFS_QM_MAX_DQCLUSTER_LOGSZ 3
typedef xfs_dqhash_t xfs_dqlist_t;
-/*
- * The freelist head. The first two fields match the first two in the
- * xfs_dquot_t structure (in xfs_dqmarker_t)
- */
-typedef struct xfs_frlist {
- struct xfs_dquot *qh_next;
- struct xfs_dquot *qh_prev;
- struct mutex qh_lock;
- uint qh_version;
- uint qh_nelems;
-} xfs_frlist_t;
/*
* Quota Manager (global) structure. Lives only in core.
@@ -91,7 +80,9 @@ typedef struct xfs_qm {
xfs_dqlist_t *qm_usr_dqhtable;/* udquot hash table */
xfs_dqlist_t *qm_grp_dqhtable;/* gdquot hash table */
uint qm_dqhashmask; /* # buckets in dq hashtab - 1 */
- xfs_frlist_t qm_dqfreelist; /* freelist of dquots */
+ struct list_head qm_dqfrlist; /* freelist of dquots */
+ struct mutex qm_dqfrlist_lock;
+ int qm_dqfrlist_cnt;
atomic_t qm_totaldquots; /* total incore dquots */
uint qm_nrefs; /* file systems with quota on */
int qm_dqfree_ratio;/* ratio of free to inuse dquots */
@@ -106,7 +97,9 @@ typedef struct xfs_qm {
typedef struct xfs_quotainfo {
xfs_inode_t *qi_uquotaip; /* user quota inode */
xfs_inode_t *qi_gquotaip; /* group quota inode */
- xfs_dqlist_t qi_dqlist; /* all dquots in filesys */
+ struct list_head qi_dqlist; /* all dquots in filesys */
+ struct mutex qi_dqlist_lock;
+ int qi_dquots;
int qi_dqreclaims; /* a change here indicates
a removal in the dqlist */
time_t qi_btimelimit; /* limit for blks timer */
@@ -175,10 +168,6 @@ extern int xfs_qm_scall_getqstat(xfs_mount_t *, fs_quota_stat_t *);
extern int xfs_qm_scall_quotaon(xfs_mount_t *, uint);
extern int xfs_qm_scall_quotaoff(xfs_mount_t *, uint);
-/* list stuff */
-extern void xfs_qm_freelist_append(xfs_frlist_t *, xfs_dquot_t *);
-extern void xfs_qm_freelist_unlink(xfs_dquot_t *);
-
#ifdef DEBUG
extern int xfs_qm_internalqcheck(xfs_mount_t *);
#else
diff --git a/fs/xfs/quota/xfs_qm_stats.c b/fs/xfs/quota/xfs_qm_stats.c
index 83e7ea3..3d1fc79 100644
--- a/fs/xfs/quota/xfs_qm_stats.c
+++ b/fs/xfs/quota/xfs_qm_stats.c
@@ -55,7 +55,7 @@ static int xqm_proc_show(struct seq_file *m, void *v)
ndquot,
xfs_Gqm? atomic_read(&xfs_Gqm->qm_totaldquots) : 0,
xfs_Gqm? xfs_Gqm->qm_dqfree_ratio : 0,
- xfs_Gqm? xfs_Gqm->qm_dqfreelist.qh_nelems : 0);
+ xfs_Gqm? xfs_Gqm->qm_dqfrlist_cnt : 0);
return 0;
}
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
index 5d0ee8d..92b002f 100644
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ b/fs/xfs/quota/xfs_qm_syscalls.c
@@ -79,6 +79,7 @@ xfs_qm_scall_quotaoff(
xfs_mount_t *mp,
uint flags)
{
+ struct xfs_quotainfo *q = mp->m_quotainfo;
uint dqtype;
int error;
uint inactivate_flags;
@@ -102,11 +103,8 @@ xfs_qm_scall_quotaoff(
* critical thing.
* If quotaoff, then we must be dealing with the root filesystem.
*/
- ASSERT(mp->m_quotainfo);
- if (mp->m_quotainfo)
- mutex_lock(&(XFS_QI_QOFFLOCK(mp)));
-
- ASSERT(mp->m_quotainfo);
+ ASSERT(q);
+ mutex_lock(&q->qi_quotaofflock);
/*
* If we're just turning off quota enforcement, change mp and go.
@@ -117,7 +115,7 @@ xfs_qm_scall_quotaoff(
spin_lock(&mp->m_sb_lock);
mp->m_sb.sb_qflags = mp->m_qflags;
spin_unlock(&mp->m_sb_lock);
- mutex_unlock(&(XFS_QI_QOFFLOCK(mp)));
+ mutex_unlock(&q->qi_quotaofflock);
/* XXX what to do if error ? Revert back to old vals incore ? */
error = xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS);
@@ -150,10 +148,8 @@ xfs_qm_scall_quotaoff(
* Nothing to do? Don't complain. This happens when we're just
* turning off quota enforcement.
*/
- if ((mp->m_qflags & flags) == 0) {
- mutex_unlock(&(XFS_QI_QOFFLOCK(mp)));
- return (0);
- }
+ if ((mp->m_qflags & flags) == 0)
+ goto out_unlock;
/*
* Write the LI_QUOTAOFF log record, and do SB changes atomically,
@@ -162,7 +158,7 @@ xfs_qm_scall_quotaoff(
*/
error = xfs_qm_log_quotaoff(mp, &qoffstart, flags);
if (error)
- goto out_error;
+ goto out_unlock;
/*
* Next we clear the XFS_MOUNT_*DQ_ACTIVE bit(s) in the mount struct
@@ -204,7 +200,7 @@ xfs_qm_scall_quotaoff(
* So, if we couldn't purge all the dquots from the filesystem,
* we can't get rid of the incore data structures.
*/
- while ((nculprits = xfs_qm_dqpurge_all(mp, dqtype|XFS_QMOPT_QUOTAOFF)))
+ while ((nculprits = xfs_qm_dqpurge_all(mp, dqtype)))
delay(10 * nculprits);
/*
@@ -222,7 +218,7 @@ xfs_qm_scall_quotaoff(
if (error) {
/* We're screwed now. Shutdown is the only option. */
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
- goto out_error;
+ goto out_unlock;
}
/*
@@ -230,27 +226,26 @@ xfs_qm_scall_quotaoff(
*/
if (((flags & XFS_MOUNT_QUOTA_ALL) == XFS_MOUNT_QUOTA_SET1) ||
((flags & XFS_MOUNT_QUOTA_ALL) == XFS_MOUNT_QUOTA_SET2)) {
- mutex_unlock(&(XFS_QI_QOFFLOCK(mp)));
+ mutex_unlock(&q->qi_quotaofflock);
xfs_qm_destroy_quotainfo(mp);
return (0);
}
/*
- * Release our quotainode references, and vn_purge them,
- * if we don't need them anymore.
+ * Release our quotainode references if we don't need them anymore.
*/
- if ((dqtype & XFS_QMOPT_UQUOTA) && XFS_QI_UQIP(mp)) {
- IRELE(XFS_QI_UQIP(mp));
- XFS_QI_UQIP(mp) = NULL;
+ if ((dqtype & XFS_QMOPT_UQUOTA) && q->qi_uquotaip) {
+ IRELE(q->qi_uquotaip);
+ q->qi_uquotaip = NULL;
}
- if ((dqtype & (XFS_QMOPT_GQUOTA|XFS_QMOPT_PQUOTA)) && XFS_QI_GQIP(mp)) {
- IRELE(XFS_QI_GQIP(mp));
- XFS_QI_GQIP(mp) = NULL;
+ if ((dqtype & (XFS_QMOPT_GQUOTA|XFS_QMOPT_PQUOTA)) && q->qi_gquotaip) {
+ IRELE(q->qi_gquotaip);
+ q->qi_gquotaip = NULL;
}
-out_error:
- mutex_unlock(&(XFS_QI_QOFFLOCK(mp)));
- return (error);
+out_unlock:
+ mutex_unlock(&q->qi_quotaofflock);
+ return error;
}
int
@@ -379,9 +374,9 @@ xfs_qm_scall_quotaon(
/*
* Switch on quota enforcement in core.
*/
- mutex_lock(&(XFS_QI_QOFFLOCK(mp)));
+ mutex_lock(&mp->m_quotainfo->qi_quotaofflock);
mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD);
- mutex_unlock(&(XFS_QI_QOFFLOCK(mp)));
+ mutex_unlock(&mp->m_quotainfo->qi_quotaofflock);
return (0);
}
@@ -392,11 +387,12 @@ xfs_qm_scall_quotaon(
*/
int
xfs_qm_scall_getqstat(
- xfs_mount_t *mp,
- fs_quota_stat_t *out)
+ struct xfs_mount *mp,
+ struct fs_quota_stat *out)
{
- xfs_inode_t *uip, *gip;
- boolean_t tempuqip, tempgqip;
+ struct xfs_quotainfo *q = mp->m_quotainfo;
+ struct xfs_inode *uip, *gip;
+ boolean_t tempuqip, tempgqip;
uip = gip = NULL;
tempuqip = tempgqip = B_FALSE;
@@ -415,9 +411,9 @@ xfs_qm_scall_getqstat(
out->qs_uquota.qfs_ino = mp->m_sb.sb_uquotino;
out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino;
- if (mp->m_quotainfo) {
- uip = mp->m_quotainfo->qi_uquotaip;
- gip = mp->m_quotainfo->qi_gquotaip;
+ if (q) {
+ uip = q->qi_uquotaip;
+ gip = q->qi_gquotaip;
}
if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) {
if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
@@ -441,17 +437,20 @@ xfs_qm_scall_getqstat(
if (tempgqip)
IRELE(gip);
}
- if (mp->m_quotainfo) {
- out->qs_incoredqs = XFS_QI_MPLNDQUOTS(mp);
- out->qs_btimelimit = XFS_QI_BTIMELIMIT(mp);
- out->qs_itimelimit = XFS_QI_ITIMELIMIT(mp);
- out->qs_rtbtimelimit = XFS_QI_RTBTIMELIMIT(mp);
- out->qs_bwarnlimit = XFS_QI_BWARNLIMIT(mp);
- out->qs_iwarnlimit = XFS_QI_IWARNLIMIT(mp);
+ if (q) {
+ out->qs_incoredqs = q->qi_dquots;
+ out->qs_btimelimit = q->qi_btimelimit;
+ out->qs_itimelimit = q->qi_itimelimit;
+ out->qs_rtbtimelimit = q->qi_rtbtimelimit;
+ out->qs_bwarnlimit = q->qi_bwarnlimit;
+ out->qs_iwarnlimit = q->qi_iwarnlimit;
}
- return (0);
+ return 0;
}
+#define XFS_DQ_MASK \
+ (FS_DQ_LIMIT_MASK | FS_DQ_TIMER_MASK | FS_DQ_WARNS_MASK)
+
/*
* Adjust quota limits, and start/stop timers accordingly.
*/
@@ -462,15 +461,17 @@ xfs_qm_scall_setqlim(
uint type,
fs_disk_quota_t *newlim)
{
+ struct xfs_quotainfo *q = mp->m_quotainfo;
xfs_disk_dquot_t *ddq;
xfs_dquot_t *dqp;
xfs_trans_t *tp;
int error;
xfs_qcnt_t hard, soft;
- if ((newlim->d_fieldmask &
- (FS_DQ_LIMIT_MASK|FS_DQ_TIMER_MASK|FS_DQ_WARNS_MASK)) == 0)
- return (0);
+ if (newlim->d_fieldmask & ~XFS_DQ_MASK)
+ return EINVAL;
+ if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0)
+ return 0;
tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);
if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_disk_dquot_t) + 128,
@@ -485,7 +486,7 @@ xfs_qm_scall_setqlim(
* a quotaoff from happening). (XXXThis doesn't currently happen
* because we take the vfslock before calling xfs_qm_sysent).
*/
- mutex_lock(&(XFS_QI_QOFFLOCK(mp)));
+ mutex_lock(&q->qi_quotaofflock);
/*
* Get the dquot (locked), and join it to the transaction.
@@ -493,9 +494,8 @@ xfs_qm_scall_setqlim(
*/
if ((error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp))) {
xfs_trans_cancel(tp, XFS_TRANS_ABORT);
- mutex_unlock(&(XFS_QI_QOFFLOCK(mp)));
ASSERT(error != ENOENT);
- return (error);
+ goto out_unlock;
}
xfs_trans_dqjoin(tp, dqp);
ddq = &dqp->q_core;
@@ -513,8 +513,8 @@ xfs_qm_scall_setqlim(
ddq->d_blk_hardlimit = cpu_to_be64(hard);
ddq->d_blk_softlimit = cpu_to_be64(soft);
if (id == 0) {
- mp->m_quotainfo->qi_bhardlimit = hard;
- mp->m_quotainfo->qi_bsoftlimit = soft;
+ q->qi_bhardlimit = hard;
+ q->qi_bsoftlimit = soft;
}
} else {
qdprintk("blkhard %Ld < blksoft %Ld\n", hard, soft);
@@ -529,8 +529,8 @@ xfs_qm_scall_setqlim(
ddq->d_rtb_hardlimit = cpu_to_be64(hard);
ddq->d_rtb_softlimit = cpu_to_be64(soft);
if (id == 0) {
- mp->m_quotainfo->qi_rtbhardlimit = hard;
- mp->m_quotainfo->qi_rtbsoftlimit = soft;
+ q->qi_rtbhardlimit = hard;
+ q->qi_rtbsoftlimit = soft;
}
} else {
qdprintk("rtbhard %Ld < rtbsoft %Ld\n", hard, soft);
@@ -546,8 +546,8 @@ xfs_qm_scall_setqlim(
ddq->d_ino_hardlimit = cpu_to_be64(hard);
ddq->d_ino_softlimit = cpu_to_be64(soft);
if (id == 0) {
- mp->m_quotainfo->qi_ihardlimit = hard;
- mp->m_quotainfo->qi_isoftlimit = soft;
+ q->qi_ihardlimit = hard;
+ q->qi_isoftlimit = soft;
}
} else {
qdprintk("ihard %Ld < isoft %Ld\n", hard, soft);
@@ -572,23 +572,23 @@ xfs_qm_scall_setqlim(
* for warnings.
*/
if (newlim->d_fieldmask & FS_DQ_BTIMER) {
- mp->m_quotainfo->qi_btimelimit = newlim->d_btimer;
+ q->qi_btimelimit = newlim->d_btimer;
ddq->d_btimer = cpu_to_be32(newlim->d_btimer);
}
if (newlim->d_fieldmask & FS_DQ_ITIMER) {
- mp->m_quotainfo->qi_itimelimit = newlim->d_itimer;
+ q->qi_itimelimit = newlim->d_itimer;
ddq->d_itimer = cpu_to_be32(newlim->d_itimer);
}
if (newlim->d_fieldmask & FS_DQ_RTBTIMER) {
- mp->m_quotainfo->qi_rtbtimelimit = newlim->d_rtbtimer;
+ q->qi_rtbtimelimit = newlim->d_rtbtimer;
ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer);
}
if (newlim->d_fieldmask & FS_DQ_BWARNS)
- mp->m_quotainfo->qi_bwarnlimit = newlim->d_bwarns;
+ q->qi_bwarnlimit = newlim->d_bwarns;
if (newlim->d_fieldmask & FS_DQ_IWARNS)
- mp->m_quotainfo->qi_iwarnlimit = newlim->d_iwarns;
+ q->qi_iwarnlimit = newlim->d_iwarns;
if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
- mp->m_quotainfo->qi_rtbwarnlimit = newlim->d_rtbwarns;
+ q->qi_rtbwarnlimit = newlim->d_rtbwarns;
} else {
/*
* If the user is now over quota, start the timelimit.
@@ -605,8 +605,9 @@ xfs_qm_scall_setqlim(
error = xfs_trans_commit(tp, 0);
xfs_qm_dqprint(dqp);
xfs_qm_dqrele(dqp);
- mutex_unlock(&(XFS_QI_QOFFLOCK(mp)));
+ out_unlock:
+ mutex_unlock(&q->qi_quotaofflock);
return error;
}
@@ -853,7 +854,8 @@ xfs_dqrele_inode(
int error;
/* skip quota inodes */
- if (ip == XFS_QI_UQIP(ip->i_mount) || ip == XFS_QI_GQIP(ip->i_mount)) {
+ if (ip == ip->i_mount->m_quotainfo->qi_uquotaip ||
+ ip == ip->i_mount->m_quotainfo->qi_gquotaip) {
ASSERT(ip->i_udquot == NULL);
ASSERT(ip->i_gdquot == NULL);
read_unlock(&pag->pag_ici_lock);
@@ -891,7 +893,8 @@ xfs_qm_dqrele_all_inodes(
uint flags)
{
ASSERT(mp->m_quotainfo);
- xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags, XFS_ICI_NO_TAG, 0);
+ xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags,
+ XFS_ICI_NO_TAG, 0, NULL);
}
/*------------------------------------------------------------------------*/
@@ -930,7 +933,8 @@ struct mutex qcheck_lock;
}
typedef struct dqtest {
- xfs_dqmarker_t q_lists;
+ uint dq_flags; /* various flags (XFS_DQ_*) */
+ struct list_head q_hashlist;
xfs_dqhash_t *q_hash; /* the hashchain header */
xfs_mount_t *q_mount; /* filesystem this relates to */
xfs_dqid_t d_id; /* user id or group id */
@@ -941,14 +945,9 @@ typedef struct dqtest {
STATIC void
xfs_qm_hashinsert(xfs_dqhash_t *h, xfs_dqtest_t *dqp)
{
- xfs_dquot_t *d;
- if (((d) = (h)->qh_next))
- (d)->HL_PREVP = &((dqp)->HL_NEXT);
- (dqp)->HL_NEXT = d;
- (dqp)->HL_PREVP = &((h)->qh_next);
- (h)->qh_next = (xfs_dquot_t *)dqp;
- (h)->qh_version++;
- (h)->qh_nelems++;
+ list_add(&dqp->q_hashlist, &h->qh_list);
+ h->qh_version++;
+ h->qh_nelems++;
}
STATIC void
xfs_qm_dqtest_print(
@@ -1060,9 +1059,7 @@ xfs_qm_internalqcheck_dqget(
xfs_dqhash_t *h;
h = DQTEST_HASH(mp, id, type);
- for (d = (xfs_dqtest_t *) h->qh_next; d != NULL;
- d = (xfs_dqtest_t *) d->HL_NEXT) {
- /* DQTEST_LIST_PRINT(h, HL_NEXT, "@@@@@ dqtestlist @@@@@"); */
+ list_for_each_entry(d, &h->qh_list, q_hashlist) {
if (d->d_id == id && mp == d->q_mount) {
*O_dq = d;
return (0);
@@ -1073,6 +1070,7 @@ xfs_qm_internalqcheck_dqget(
d->d_id = id;
d->q_mount = mp;
d->q_hash = h;
+ INIT_LIST_HEAD(&d->q_hashlist);
xfs_qm_hashinsert(h, d);
*O_dq = d;
return (0);
@@ -1179,8 +1177,6 @@ xfs_qm_internalqcheck(
xfs_ino_t lastino;
int done, count;
int i;
- xfs_dqtest_t *d, *e;
- xfs_dqhash_t *h1;
int error;
lastino = 0;
@@ -1220,19 +1216,18 @@ xfs_qm_internalqcheck(
}
cmn_err(CE_DEBUG, "Checking results against system dquots");
for (i = 0; i < qmtest_hashmask; i++) {
- h1 = &qmtest_udqtab[i];
- for (d = (xfs_dqtest_t *) h1->qh_next; d != NULL; ) {
+ xfs_dqtest_t *d, *n;
+ xfs_dqhash_t *h;
+
+ h = &qmtest_udqtab[i];
+ list_for_each_entry_safe(d, n, &h->qh_list, q_hashlist) {
xfs_dqtest_cmp(d);
- e = (xfs_dqtest_t *) d->HL_NEXT;
kmem_free(d);
- d = e;
}
- h1 = &qmtest_gdqtab[i];
- for (d = (xfs_dqtest_t *) h1->qh_next; d != NULL; ) {
+ h = &qmtest_gdqtab[i];
+ list_for_each_entry_safe(d, n, &h->qh_list, q_hashlist) {
xfs_dqtest_cmp(d);
- e = (xfs_dqtest_t *) d->HL_NEXT;
kmem_free(d);
- d = e;
}
}
diff --git a/fs/xfs/quota/xfs_quota_priv.h b/fs/xfs/quota/xfs_quota_priv.h
index 8286b28..94a3d92 100644
--- a/fs/xfs/quota/xfs_quota_priv.h
+++ b/fs/xfs/quota/xfs_quota_priv.h
@@ -24,43 +24,6 @@
*/
#define XFS_DQITER_MAP_SIZE 10
-/* Number of dquots that fit in to a dquot block */
-#define XFS_QM_DQPERBLK(mp) ((mp)->m_quotainfo->qi_dqperchunk)
-
-#define XFS_DQ_IS_ADDEDTO_TRX(t, d) ((d)->q_transp == (t))
-
-#define XFS_QI_MPLRECLAIMS(mp) ((mp)->m_quotainfo->qi_dqreclaims)
-#define XFS_QI_UQIP(mp) ((mp)->m_quotainfo->qi_uquotaip)
-#define XFS_QI_GQIP(mp) ((mp)->m_quotainfo->qi_gquotaip)
-#define XFS_QI_DQCHUNKLEN(mp) ((mp)->m_quotainfo->qi_dqchunklen)
-#define XFS_QI_BTIMELIMIT(mp) ((mp)->m_quotainfo->qi_btimelimit)
-#define XFS_QI_RTBTIMELIMIT(mp) ((mp)->m_quotainfo->qi_rtbtimelimit)
-#define XFS_QI_ITIMELIMIT(mp) ((mp)->m_quotainfo->qi_itimelimit)
-#define XFS_QI_BWARNLIMIT(mp) ((mp)->m_quotainfo->qi_bwarnlimit)
-#define XFS_QI_RTBWARNLIMIT(mp) ((mp)->m_quotainfo->qi_rtbwarnlimit)
-#define XFS_QI_IWARNLIMIT(mp) ((mp)->m_quotainfo->qi_iwarnlimit)
-#define XFS_QI_QOFFLOCK(mp) ((mp)->m_quotainfo->qi_quotaofflock)
-
-#define XFS_QI_MPL_LIST(mp) ((mp)->m_quotainfo->qi_dqlist)
-#define XFS_QI_MPLNEXT(mp) ((mp)->m_quotainfo->qi_dqlist.qh_next)
-#define XFS_QI_MPLNDQUOTS(mp) ((mp)->m_quotainfo->qi_dqlist.qh_nelems)
-
-#define xfs_qm_mplist_lock(mp) \
- mutex_lock(&(XFS_QI_MPL_LIST(mp).qh_lock))
-#define xfs_qm_mplist_nowait(mp) \
- mutex_trylock(&(XFS_QI_MPL_LIST(mp).qh_lock))
-#define xfs_qm_mplist_unlock(mp) \
- mutex_unlock(&(XFS_QI_MPL_LIST(mp).qh_lock))
-#define XFS_QM_IS_MPLIST_LOCKED(mp) \
- mutex_is_locked(&(XFS_QI_MPL_LIST(mp).qh_lock))
-
-#define xfs_qm_freelist_lock(qm) \
- mutex_lock(&((qm)->qm_dqfreelist.qh_lock))
-#define xfs_qm_freelist_lock_nowait(qm) \
- mutex_trylock(&((qm)->qm_dqfreelist.qh_lock))
-#define xfs_qm_freelist_unlock(qm) \
- mutex_unlock(&((qm)->qm_dqfreelist.qh_lock))
-
/*
* Hash into a bucket in the dquot hash table, based on <mp, id>.
*/
@@ -72,9 +35,6 @@
XFS_DQ_HASHVAL(mp, id)) : \
(xfs_Gqm->qm_grp_dqhtable + \
XFS_DQ_HASHVAL(mp, id)))
-#define XFS_IS_DQTYPE_ON(mp, type) (type == XFS_DQ_USER ? \
- XFS_IS_UQUOTA_ON(mp) : \
- XFS_IS_OQUOTA_ON(mp))
#define XFS_IS_DQUOT_UNINITIALIZED(dqp) ( \
!dqp->q_core.d_blk_hardlimit && \
!dqp->q_core.d_blk_softlimit && \
@@ -86,68 +46,6 @@
!dqp->q_core.d_rtbcount && \
!dqp->q_core.d_icount)
-#define HL_PREVP dq_hashlist.ql_prevp
-#define HL_NEXT dq_hashlist.ql_next
-#define MPL_PREVP dq_mplist.ql_prevp
-#define MPL_NEXT dq_mplist.ql_next
-
-
-#define _LIST_REMOVE(h, dqp, PVP, NXT) \
- { \
- xfs_dquot_t *d; \
- if (((d) = (dqp)->NXT)) \
- (d)->PVP = (dqp)->PVP; \
- *((dqp)->PVP) = d; \
- (dqp)->NXT = NULL; \
- (dqp)->PVP = NULL; \
- (h)->qh_version++; \
- (h)->qh_nelems--; \
- }
-
-#define _LIST_INSERT(h, dqp, PVP, NXT) \
- { \
- xfs_dquot_t *d; \
- if (((d) = (h)->qh_next)) \
- (d)->PVP = &((dqp)->NXT); \
- (dqp)->NXT = d; \
- (dqp)->PVP = &((h)->qh_next); \
- (h)->qh_next = dqp; \
- (h)->qh_version++; \
- (h)->qh_nelems++; \
- }
-
-#define FOREACH_DQUOT_IN_MP(dqp, mp) \
- for ((dqp) = XFS_QI_MPLNEXT(mp); (dqp) != NULL; (dqp) = (dqp)->MPL_NEXT)
-
-#define FOREACH_DQUOT_IN_FREELIST(dqp, qlist) \
-for ((dqp) = (qlist)->qh_next; (dqp) != (xfs_dquot_t *)(qlist); \
- (dqp) = (dqp)->dq_flnext)
-
-#define XQM_HASHLIST_INSERT(h, dqp) \
- _LIST_INSERT(h, dqp, HL_PREVP, HL_NEXT)
-
-#define XQM_FREELIST_INSERT(h, dqp) \
- xfs_qm_freelist_append(h, dqp)
-
-#define XQM_MPLIST_INSERT(h, dqp) \
- _LIST_INSERT(h, dqp, MPL_PREVP, MPL_NEXT)
-
-#define XQM_HASHLIST_REMOVE(h, dqp) \
- _LIST_REMOVE(h, dqp, HL_PREVP, HL_NEXT)
-#define XQM_FREELIST_REMOVE(dqp) \
- xfs_qm_freelist_unlink(dqp)
-#define XQM_MPLIST_REMOVE(h, dqp) \
- { _LIST_REMOVE(h, dqp, MPL_PREVP, MPL_NEXT); \
- XFS_QI_MPLRECLAIMS((dqp)->q_mount)++; }
-
-#define XFS_DQ_IS_LOGITEM_INITD(dqp) ((dqp)->q_logitem.qli_dquot == (dqp))
-
-#define XFS_QM_DQP_TO_DQACCT(tp, dqp) (XFS_QM_ISUDQ(dqp) ? \
- (tp)->t_dqinfo->dqa_usrdquots : \
- (tp)->t_dqinfo->dqa_grpdquots)
-#define XFS_IS_SUSER_DQUOT(dqp) \
- (!((dqp)->q_core.d_id))
-
#define DQFLAGTO_TYPESTR(d) (((d)->dq_flags & XFS_DQ_USER) ? "USR" : \
(((d)->dq_flags & XFS_DQ_GROUP) ? "GRP" : \
(((d)->dq_flags & XFS_DQ_PROJ) ? "PRJ":"???")))
diff --git a/fs/xfs/quota/xfs_trans_dquot.c b/fs/xfs/quota/xfs_trans_dquot.c
index c3ab75c..061d827 100644
--- a/fs/xfs/quota/xfs_trans_dquot.c
+++ b/fs/xfs/quota/xfs_trans_dquot.c
@@ -59,12 +59,11 @@ xfs_trans_dqjoin(
xfs_trans_t *tp,
xfs_dquot_t *dqp)
{
- xfs_dq_logitem_t *lp;
+ xfs_dq_logitem_t *lp = &dqp->q_logitem;
- ASSERT(! XFS_DQ_IS_ADDEDTO_TRX(tp, dqp));
+ ASSERT(dqp->q_transp != tp);
ASSERT(XFS_DQ_IS_LOCKED(dqp));
- ASSERT(XFS_DQ_IS_LOGITEM_INITD(dqp));
- lp = &dqp->q_logitem;
+ ASSERT(lp->qli_dquot == dqp);
/*
* Get a log_item_desc to point at the new item.
@@ -96,7 +95,7 @@ xfs_trans_log_dquot(
{
xfs_log_item_desc_t *lidp;
- ASSERT(XFS_DQ_IS_ADDEDTO_TRX(tp, dqp));
+ ASSERT(dqp->q_transp == tp);
ASSERT(XFS_DQ_IS_LOCKED(dqp));
lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)(&dqp->q_logitem));
@@ -198,16 +197,16 @@ xfs_trans_get_dqtrx(
int i;
xfs_dqtrx_t *qa;
- for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
- qa = XFS_QM_DQP_TO_DQACCT(tp, dqp);
+ qa = XFS_QM_ISUDQ(dqp) ?
+ tp->t_dqinfo->dqa_usrdquots : tp->t_dqinfo->dqa_grpdquots;
+ for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
if (qa[i].qt_dquot == NULL ||
- qa[i].qt_dquot == dqp) {
- return (&qa[i]);
- }
+ qa[i].qt_dquot == dqp)
+ return &qa[i];
}
- return (NULL);
+ return NULL;
}
/*
@@ -381,7 +380,7 @@ xfs_trans_apply_dquot_deltas(
break;
ASSERT(XFS_DQ_IS_LOCKED(dqp));
- ASSERT(XFS_DQ_IS_ADDEDTO_TRX(tp, dqp));
+ ASSERT(dqp->q_transp == tp);
/*
* adjust the actual number of blocks used
@@ -639,7 +638,7 @@ xfs_trans_dqresv(
softlimit = q->qi_bsoftlimit;
timer = be32_to_cpu(dqp->q_core.d_btimer);
warns = be16_to_cpu(dqp->q_core.d_bwarns);
- warnlimit = XFS_QI_BWARNLIMIT(dqp->q_mount);
+ warnlimit = dqp->q_mount->m_quotainfo->qi_bwarnlimit;
resbcountp = &dqp->q_res_bcount;
} else {
ASSERT(flags & XFS_TRANS_DQ_RES_RTBLKS);
@@ -651,7 +650,7 @@ xfs_trans_dqresv(
softlimit = q->qi_rtbsoftlimit;
timer = be32_to_cpu(dqp->q_core.d_rtbtimer);
warns = be16_to_cpu(dqp->q_core.d_rtbwarns);
- warnlimit = XFS_QI_RTBWARNLIMIT(dqp->q_mount);
+ warnlimit = dqp->q_mount->m_quotainfo->qi_rtbwarnlimit;
resbcountp = &dqp->q_res_rtbcount;
}
@@ -691,7 +690,7 @@ xfs_trans_dqresv(
count = be64_to_cpu(dqp->q_core.d_icount);
timer = be32_to_cpu(dqp->q_core.d_itimer);
warns = be16_to_cpu(dqp->q_core.d_iwarns);
- warnlimit = XFS_QI_IWARNLIMIT(dqp->q_mount);
+ warnlimit = dqp->q_mount->m_quotainfo->qi_iwarnlimit;
hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
if (!hardlimit)
hardlimit = q->qi_ihardlimit;
diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h
index b1a5a1f..abb8222 100644
--- a/fs/xfs/xfs_ag.h
+++ b/fs/xfs/xfs_ag.h
@@ -223,6 +223,7 @@ typedef struct xfs_perag {
int pag_ici_init; /* incore inode cache initialised */
rwlock_t pag_ici_lock; /* incore inode lock */
struct radix_tree_root pag_ici_root; /* incore inode cache root */
+ int pag_ici_reclaimable; /* reclaimable inodes */
#endif
int pagb_count; /* pagb slots in use */
xfs_perag_busy_t pagb_list[XFS_PAGB_NUM_SLOTS]; /* unstable blocks */
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 5c11e4d..99587de 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -3829,7 +3829,7 @@ xfs_bmap_add_attrfork(
}
if ((error = xfs_bmap_finish(&tp, &flist, &committed)))
goto error2;
- error = xfs_trans_commit(tp, XFS_TRANS_PERM_LOG_RES);
+ error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
ASSERT(ip->i_df.if_ext_max ==
XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t));
return error;
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index f3c49e6..240340a 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -372,12 +372,12 @@ xfs_buf_item_pin(
*/
STATIC void
xfs_buf_item_unpin(
- xfs_buf_log_item_t *bip,
- int stale)
+ xfs_buf_log_item_t *bip)
{
struct xfs_ail *ailp;
xfs_buf_t *bp;
int freed;
+ int stale = bip->bli_flags & XFS_BLI_STALE;
bp = bip->bli_buf;
ASSERT(bp != NULL);
@@ -428,40 +428,34 @@ xfs_buf_item_unpin_remove(
xfs_buf_log_item_t *bip,
xfs_trans_t *tp)
{
- xfs_buf_t *bp;
- xfs_log_item_desc_t *lidp;
- int stale = 0;
-
- bp = bip->bli_buf;
- /*
- * will xfs_buf_item_unpin() call xfs_buf_item_relse()?
- */
+ /* will xfs_buf_item_unpin() call xfs_buf_item_relse()? */
if ((atomic_read(&bip->bli_refcount) == 1) &&
(bip->bli_flags & XFS_BLI_STALE)) {
+ /*
+ * yes -- We can safely do some work here and then call
+ * buf_item_unpin to do the rest because we are
+ * are holding the buffer locked so no one else will be
+ * able to bump up the refcount. We have to remove the
+ * log item from the transaction as we are about to release
+ * our reference to the buffer. If we don't, the unlock that
+ * occurs later in the xfs_trans_uncommit() will try to
+ * reference the buffer which we no longer have a hold on.
+ */
+ struct xfs_log_item_desc *lidp;
+
ASSERT(XFS_BUF_VALUSEMA(bip->bli_buf) <= 0);
trace_xfs_buf_item_unpin_stale(bip);
- /*
- * yes -- clear the xaction descriptor in-use flag
- * and free the chunk if required. We can safely
- * do some work here and then call buf_item_unpin
- * to do the rest because if the if is true, then
- * we are holding the buffer locked so no one else
- * will be able to bump up the refcount.
- */
- lidp = xfs_trans_find_item(tp, (xfs_log_item_t *) bip);
- stale = lidp->lid_flags & XFS_LID_BUF_STALE;
+ lidp = xfs_trans_find_item(tp, (xfs_log_item_t *)bip);
xfs_trans_free_item(tp, lidp);
+
/*
- * Since the transaction no longer refers to the buffer,
- * the buffer should no longer refer to the transaction.
+ * Since the transaction no longer refers to the buffer, the
+ * buffer should no longer refer to the transaction.
*/
- XFS_BUF_SET_FSPRIVATE2(bp, NULL);
+ XFS_BUF_SET_FSPRIVATE2(bip->bli_buf, NULL);
}
-
- xfs_buf_item_unpin(bip, stale);
-
- return;
+ xfs_buf_item_unpin(bip);
}
/*
@@ -675,7 +669,7 @@ static struct xfs_item_ops xfs_buf_item_ops = {
.iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
xfs_buf_item_format,
.iop_pin = (void(*)(xfs_log_item_t*))xfs_buf_item_pin,
- .iop_unpin = (void(*)(xfs_log_item_t*, int))xfs_buf_item_unpin,
+ .iop_unpin = (void(*)(xfs_log_item_t*))xfs_buf_item_unpin,
.iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t *))
xfs_buf_item_unpin_remove,
.iop_trylock = (uint(*)(xfs_log_item_t*))xfs_buf_item_trylock,
@@ -733,10 +727,7 @@ xfs_buf_item_init(
bip = (xfs_buf_log_item_t*)kmem_zone_zalloc(xfs_buf_item_zone,
KM_SLEEP);
- bip->bli_item.li_type = XFS_LI_BUF;
- bip->bli_item.li_ops = &xfs_buf_item_ops;
- bip->bli_item.li_mountp = mp;
- bip->bli_item.li_ailp = mp->m_ail;
+ xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops);
bip->bli_buf = bp;
xfs_buf_hold(bp);
bip->bli_format.blf_type = XFS_LI_BUF;
diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h
index 217f34a..df44545 100644
--- a/fs/xfs/xfs_buf_item.h
+++ b/fs/xfs/xfs_buf_item.h
@@ -26,7 +26,7 @@ extern kmem_zone_t *xfs_buf_item_zone;
* have been logged.
* For 6.2 and beyond, this is XFS_LI_BUF. We use this to log everything.
*/
-typedef struct xfs_buf_log_format_t {
+typedef struct xfs_buf_log_format {
unsigned short blf_type; /* buf log item type indicator */
unsigned short blf_size; /* size of this item */
ushort blf_flags; /* misc state */
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index 92d5cd5..ef96175 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -186,18 +186,18 @@ xfs_cmn_err(int panic_tag, int level, xfs_mount_t *mp, char *fmt, ...)
void
xfs_error_report(
- char *tag,
- int level,
- xfs_mount_t *mp,
- char *fname,
- int linenum,
- inst_t *ra)
+ const char *tag,
+ int level,
+ struct xfs_mount *mp,
+ const char *filename,
+ int linenum,
+ inst_t *ra)
{
if (level <= xfs_error_level) {
xfs_cmn_err(XFS_PTAG_ERROR_REPORT,
CE_ALERT, mp,
"XFS internal error %s at line %d of file %s. Caller 0x%p\n",
- tag, linenum, fname, ra);
+ tag, linenum, filename, ra);
xfs_stack_trace();
}
@@ -205,15 +205,15 @@ xfs_error_report(
void
xfs_corruption_error(
- char *tag,
- int level,
- xfs_mount_t *mp,
- void *p,
- char *fname,
- int linenum,
- inst_t *ra)
+ const char *tag,
+ int level,
+ struct xfs_mount *mp,
+ void *p,
+ const char *filename,
+ int linenum,
+ inst_t *ra)
{
if (level <= xfs_error_level)
xfs_hex_dump(p, 16);
- xfs_error_report(tag, level, mp, fname, linenum, ra);
+ xfs_error_report(tag, level, mp, filename, linenum, ra);
}
diff --git a/fs/xfs/xfs_error.h b/fs/xfs/xfs_error.h
index 0c93051..c2c1a07 100644
--- a/fs/xfs/xfs_error.h
+++ b/fs/xfs/xfs_error.h
@@ -29,10 +29,11 @@ extern int xfs_error_trap(int);
struct xfs_mount;
-extern void xfs_error_report(char *tag, int level, struct xfs_mount *mp,
- char *fname, int linenum, inst_t *ra);
-extern void xfs_corruption_error(char *tag, int level, struct xfs_mount *mp,
- void *p, char *fname, int linenum, inst_t *ra);
+extern void xfs_error_report(const char *tag, int level, struct xfs_mount *mp,
+ const char *filename, int linenum, inst_t *ra);
+extern void xfs_corruption_error(const char *tag, int level,
+ struct xfs_mount *mp, void *p, const char *filename,
+ int linenum, inst_t *ra);
#define XFS_ERROR_REPORT(e, lvl, mp) \
xfs_error_report(e, lvl, mp, __FILE__, __LINE__, __return_address)
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index 6f35ed1..409fe81 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -106,7 +106,7 @@ xfs_efi_item_pin(xfs_efi_log_item_t *efip)
*/
/*ARGSUSED*/
STATIC void
-xfs_efi_item_unpin(xfs_efi_log_item_t *efip, int stale)
+xfs_efi_item_unpin(xfs_efi_log_item_t *efip)
{
struct xfs_ail *ailp = efip->efi_item.li_ailp;
@@ -224,7 +224,7 @@ static struct xfs_item_ops xfs_efi_item_ops = {
.iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
xfs_efi_item_format,
.iop_pin = (void(*)(xfs_log_item_t*))xfs_efi_item_pin,
- .iop_unpin = (void(*)(xfs_log_item_t*, int))xfs_efi_item_unpin,
+ .iop_unpin = (void(*)(xfs_log_item_t*))xfs_efi_item_unpin,
.iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t *))
xfs_efi_item_unpin_remove,
.iop_trylock = (uint(*)(xfs_log_item_t*))xfs_efi_item_trylock,
@@ -259,10 +259,7 @@ xfs_efi_init(xfs_mount_t *mp,
KM_SLEEP);
}
- efip->efi_item.li_type = XFS_LI_EFI;
- efip->efi_item.li_ops = &xfs_efi_item_ops;
- efip->efi_item.li_mountp = mp;
- efip->efi_item.li_ailp = mp->m_ail;
+ xfs_log_item_init(mp, &efip->efi_item, XFS_LI_EFI, &xfs_efi_item_ops);
efip->efi_format.efi_nextents = nextents;
efip->efi_format.efi_id = (__psint_t)(void*)efip;
@@ -428,7 +425,7 @@ xfs_efd_item_pin(xfs_efd_log_item_t *efdp)
*/
/*ARGSUSED*/
STATIC void
-xfs_efd_item_unpin(xfs_efd_log_item_t *efdp, int stale)
+xfs_efd_item_unpin(xfs_efd_log_item_t *efdp)
{
return;
}
@@ -518,7 +515,7 @@ static struct xfs_item_ops xfs_efd_item_ops = {
.iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
xfs_efd_item_format,
.iop_pin = (void(*)(xfs_log_item_t*))xfs_efd_item_pin,
- .iop_unpin = (void(*)(xfs_log_item_t*, int))xfs_efd_item_unpin,
+ .iop_unpin = (void(*)(xfs_log_item_t*))xfs_efd_item_unpin,
.iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t*))
xfs_efd_item_unpin_remove,
.iop_trylock = (uint(*)(xfs_log_item_t*))xfs_efd_item_trylock,
@@ -554,10 +551,7 @@ xfs_efd_init(xfs_mount_t *mp,
KM_SLEEP);
}
- efdp->efd_item.li_type = XFS_LI_EFD;
- efdp->efd_item.li_ops = &xfs_efd_item_ops;
- efdp->efd_item.li_mountp = mp;
- efdp->efd_item.li_ailp = mp->m_ail;
+ xfs_log_item_init(mp, &efdp->efd_item, XFS_LI_EFD, &xfs_efd_item_ops);
efdp->efd_efip = efip;
efdp->efd_format.efd_nextents = nextents;
efdp->efd_format.efd_efi_id = efip->efi_format.efi_id;
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 0ffd564..8cd6e8d 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -2449,6 +2449,8 @@ xfs_iunpin_nowait(
{
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
+ trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
+
/* Give the log a push to start the unpinning I/O */
xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0);
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 7bfea85..cf8249a 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -543,6 +543,7 @@ xfs_inode_item_pin(
{
ASSERT(xfs_isilocked(iip->ili_inode, XFS_ILOCK_EXCL));
+ trace_xfs_inode_pin(iip->ili_inode, _RET_IP_);
atomic_inc(&iip->ili_inode->i_pincount);
}
@@ -556,11 +557,11 @@ xfs_inode_item_pin(
/* ARGSUSED */
STATIC void
xfs_inode_item_unpin(
- xfs_inode_log_item_t *iip,
- int stale)
+ xfs_inode_log_item_t *iip)
{
struct xfs_inode *ip = iip->ili_inode;
+ trace_xfs_inode_unpin(ip, _RET_IP_);
ASSERT(atomic_read(&ip->i_pincount) > 0);
if (atomic_dec_and_test(&ip->i_pincount))
wake_up(&ip->i_ipin_wait);
@@ -572,7 +573,7 @@ xfs_inode_item_unpin_remove(
xfs_inode_log_item_t *iip,
xfs_trans_t *tp)
{
- xfs_inode_item_unpin(iip, 0);
+ xfs_inode_item_unpin(iip);
}
/*
@@ -838,7 +839,7 @@ static struct xfs_item_ops xfs_inode_item_ops = {
.iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
xfs_inode_item_format,
.iop_pin = (void(*)(xfs_log_item_t*))xfs_inode_item_pin,
- .iop_unpin = (void(*)(xfs_log_item_t*, int))xfs_inode_item_unpin,
+ .iop_unpin = (void(*)(xfs_log_item_t*))xfs_inode_item_unpin,
.iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t*))
xfs_inode_item_unpin_remove,
.iop_trylock = (uint(*)(xfs_log_item_t*))xfs_inode_item_trylock,
@@ -865,17 +866,9 @@ xfs_inode_item_init(
ASSERT(ip->i_itemp == NULL);
iip = ip->i_itemp = kmem_zone_zalloc(xfs_ili_zone, KM_SLEEP);
- iip->ili_item.li_type = XFS_LI_INODE;
- iip->ili_item.li_ops = &xfs_inode_item_ops;
- iip->ili_item.li_mountp = mp;
- iip->ili_item.li_ailp = mp->m_ail;
iip->ili_inode = ip;
-
- /*
- We have zeroed memory. No need ...
- iip->ili_extents_buf = NULL;
- */
-
+ xfs_log_item_init(mp, &iip->ili_item, XFS_LI_INODE,
+ &xfs_inode_item_ops);
iip->ili_format.ilf_type = XFS_LI_INODE;
iip->ili_format.ilf_ino = ip->i_ino;
iip->ili_format.ilf_blkno = ip->i_imap.im_blkno;
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 0b650399..ef14943 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -55,71 +55,33 @@
#define XFS_STRAT_WRITE_IMAPS 2
#define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP
-STATIC int
-xfs_imap_to_bmap(
- xfs_inode_t *ip,
- xfs_off_t offset,
- xfs_bmbt_irec_t *imap,
- xfs_iomap_t *iomapp,
- int imaps, /* Number of imap entries */
- int iomaps, /* Number of iomap entries */
- int flags)
-{
- xfs_mount_t *mp = ip->i_mount;
- int pbm;
- xfs_fsblock_t start_block;
-
-
- for (pbm = 0; imaps && pbm < iomaps; imaps--, iomapp++, imap++, pbm++) {
- iomapp->iomap_offset = XFS_FSB_TO_B(mp, imap->br_startoff);
- iomapp->iomap_delta = offset - iomapp->iomap_offset;
- iomapp->iomap_bsize = XFS_FSB_TO_B(mp, imap->br_blockcount);
- iomapp->iomap_flags = flags;
-
- if (XFS_IS_REALTIME_INODE(ip)) {
- iomapp->iomap_flags |= IOMAP_REALTIME;
- iomapp->iomap_target = mp->m_rtdev_targp;
- } else {
- iomapp->iomap_target = mp->m_ddev_targp;
- }
- start_block = imap->br_startblock;
- if (start_block == HOLESTARTBLOCK) {
- iomapp->iomap_bn = IOMAP_DADDR_NULL;
- iomapp->iomap_flags |= IOMAP_HOLE;
- } else if (start_block == DELAYSTARTBLOCK) {
- iomapp->iomap_bn = IOMAP_DADDR_NULL;
- iomapp->iomap_flags |= IOMAP_DELAY;
- } else {
- iomapp->iomap_bn = xfs_fsb_to_db(ip, start_block);
- if (ISUNWRITTEN(imap))
- iomapp->iomap_flags |= IOMAP_UNWRITTEN;
- }
-
- offset += iomapp->iomap_bsize - iomapp->iomap_delta;
- }
- return pbm; /* Return the number filled */
-}
+STATIC int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
+ int, struct xfs_bmbt_irec *, int *);
+STATIC int xfs_iomap_write_delay(struct xfs_inode *, xfs_off_t, size_t, int,
+ struct xfs_bmbt_irec *, int *);
+STATIC int xfs_iomap_write_allocate(struct xfs_inode *, xfs_off_t, size_t,
+ struct xfs_bmbt_irec *, int *);
int
xfs_iomap(
- xfs_inode_t *ip,
- xfs_off_t offset,
- ssize_t count,
- int flags,
- xfs_iomap_t *iomapp,
- int *niomaps)
+ struct xfs_inode *ip,
+ xfs_off_t offset,
+ ssize_t count,
+ int flags,
+ struct xfs_bmbt_irec *imap,
+ int *nimaps,
+ int *new)
{
- xfs_mount_t *mp = ip->i_mount;
- xfs_fileoff_t offset_fsb, end_fsb;
- int error = 0;
- int lockmode = 0;
- xfs_bmbt_irec_t imap;
- int nimaps = 1;
- int bmapi_flags = 0;
- int iomap_flags = 0;
+ struct xfs_mount *mp = ip->i_mount;
+ xfs_fileoff_t offset_fsb, end_fsb;
+ int error = 0;
+ int lockmode = 0;
+ int bmapi_flags = 0;
ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
+ *new = 0;
+
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
@@ -160,8 +122,8 @@ xfs_iomap(
error = xfs_bmapi(NULL, ip, offset_fsb,
(xfs_filblks_t)(end_fsb - offset_fsb),
- bmapi_flags, NULL, 0, &imap,
- &nimaps, NULL, NULL);
+ bmapi_flags, NULL, 0, imap,
+ nimaps, NULL, NULL);
if (error)
goto out;
@@ -169,46 +131,41 @@ xfs_iomap(
switch (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)) {
case BMAPI_WRITE:
/* If we found an extent, return it */
- if (nimaps &&
- (imap.br_startblock != HOLESTARTBLOCK) &&
- (imap.br_startblock != DELAYSTARTBLOCK)) {
- trace_xfs_iomap_found(ip, offset, count, flags, &imap);
+ if (*nimaps &&
+ (imap->br_startblock != HOLESTARTBLOCK) &&
+ (imap->br_startblock != DELAYSTARTBLOCK)) {
+ trace_xfs_iomap_found(ip, offset, count, flags, imap);
break;
}
if (flags & (BMAPI_DIRECT|BMAPI_MMAP)) {
error = xfs_iomap_write_direct(ip, offset, count, flags,
- &imap, &nimaps, nimaps);
+ imap, nimaps);
} else {
error = xfs_iomap_write_delay(ip, offset, count, flags,
- &imap, &nimaps);
+ imap, nimaps);
}
if (!error) {
- trace_xfs_iomap_alloc(ip, offset, count, flags, &imap);
+ trace_xfs_iomap_alloc(ip, offset, count, flags, imap);
}
- iomap_flags = IOMAP_NEW;
+ *new = 1;
break;
case BMAPI_ALLOCATE:
/* If we found an extent, return it */
xfs_iunlock(ip, lockmode);
lockmode = 0;
- if (nimaps && !isnullstartblock(imap.br_startblock)) {
- trace_xfs_iomap_found(ip, offset, count, flags, &imap);
+ if (*nimaps && !isnullstartblock(imap->br_startblock)) {
+ trace_xfs_iomap_found(ip, offset, count, flags, imap);
break;
}
error = xfs_iomap_write_allocate(ip, offset, count,
- &imap, &nimaps);
+ imap, nimaps);
break;
}
- if (nimaps) {
- *niomaps = xfs_imap_to_bmap(ip, offset, &imap,
- iomapp, nimaps, *niomaps, iomap_flags);
- } else if (niomaps) {
- *niomaps = 0;
- }
+ ASSERT(*nimaps <= 1);
out:
if (lockmode)
@@ -216,7 +173,6 @@ out:
return XFS_ERROR(error);
}
-
STATIC int
xfs_iomap_eof_align_last_fsb(
xfs_mount_t *mp,
@@ -285,15 +241,14 @@ xfs_cmn_err_fsblock_zero(
return EFSCORRUPTED;
}
-int
+STATIC int
xfs_iomap_write_direct(
xfs_inode_t *ip,
xfs_off_t offset,
size_t count,
int flags,
xfs_bmbt_irec_t *ret_imap,
- int *nmaps,
- int found)
+ int *nmaps)
{
xfs_mount_t *mp = ip->i_mount;
xfs_fileoff_t offset_fsb;
@@ -330,7 +285,7 @@ xfs_iomap_write_direct(
if (error)
goto error_out;
} else {
- if (found && (ret_imap->br_startblock == HOLESTARTBLOCK))
+ if (*nmaps && (ret_imap->br_startblock == HOLESTARTBLOCK))
last_fsb = MIN(last_fsb, (xfs_fileoff_t)
ret_imap->br_blockcount +
ret_imap->br_startoff);
@@ -485,7 +440,7 @@ xfs_iomap_eof_want_preallocate(
return 0;
}
-int
+STATIC int
xfs_iomap_write_delay(
xfs_inode_t *ip,
xfs_off_t offset,
@@ -588,7 +543,7 @@ retry:
* We no longer bother to look at the incoming map - all we have to
* guarantee is that whatever we allocate fills the required range.
*/
-int
+STATIC int
xfs_iomap_write_allocate(
xfs_inode_t *ip,
xfs_off_t offset,
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
index 174f299..81ac4af 100644
--- a/fs/xfs/xfs_iomap.h
+++ b/fs/xfs/xfs_iomap.h
@@ -18,19 +18,6 @@
#ifndef __XFS_IOMAP_H__
#define __XFS_IOMAP_H__
-#define IOMAP_DADDR_NULL ((xfs_daddr_t) (-1LL))
-
-
-typedef enum { /* iomap_flags values */
- IOMAP_READ = 0, /* mapping for a read */
- IOMAP_HOLE = 0x02, /* mapping covers a hole */
- IOMAP_DELAY = 0x04, /* mapping covers delalloc region */
- IOMAP_REALTIME = 0x10, /* mapping on the realtime device */
- IOMAP_UNWRITTEN = 0x20, /* mapping covers allocated */
- /* but uninitialized file data */
- IOMAP_NEW = 0x40 /* just allocate */
-} iomap_flags_t;
-
typedef enum {
/* base extent manipulation calls */
BMAPI_READ = (1 << 0), /* read extents */
@@ -52,43 +39,11 @@ typedef enum {
{ BMAPI_MMAP, "MMAP" }, \
{ BMAPI_TRYLOCK, "TRYLOCK" }
-/*
- * xfs_iomap_t: File system I/O map
- *
- * The iomap_bn field is expressed in 512-byte blocks, and is where the
- * mapping starts on disk.
- *
- * The iomap_offset, iomap_bsize and iomap_delta fields are in bytes.
- * iomap_offset is the offset of the mapping in the file itself.
- * iomap_bsize is the size of the mapping, iomap_delta is the
- * desired data's offset into the mapping, given the offset supplied
- * to the file I/O map routine.
- *
- * When a request is made to read beyond the logical end of the object,
- * iomap_size may be set to 0, but iomap_offset and iomap_length should be set
- * to the actual amount of underlying storage that has been allocated, if any.
- */
-
-typedef struct xfs_iomap {
- xfs_daddr_t iomap_bn; /* first 512B blk of mapping */
- xfs_buftarg_t *iomap_target;
- xfs_off_t iomap_offset; /* offset of mapping, bytes */
- xfs_off_t iomap_bsize; /* size of mapping, bytes */
- xfs_off_t iomap_delta; /* offset into mapping, bytes */
- iomap_flags_t iomap_flags;
-} xfs_iomap_t;
-
struct xfs_inode;
struct xfs_bmbt_irec;
extern int xfs_iomap(struct xfs_inode *, xfs_off_t, ssize_t, int,
- struct xfs_iomap *, int *);
-extern int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
- int, struct xfs_bmbt_irec *, int *, int);
-extern int xfs_iomap_write_delay(struct xfs_inode *, xfs_off_t, size_t, int,
- struct xfs_bmbt_irec *, int *);
-extern int xfs_iomap_write_allocate(struct xfs_inode *, xfs_off_t, size_t,
- struct xfs_bmbt_irec *, int *);
+ struct xfs_bmbt_irec *, int *, int *);
extern int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, size_t);
#endif /* __XFS_IOMAP_H__*/
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 2be0191..3038dd5 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -44,13 +44,8 @@
kmem_zone_t *xfs_log_ticket_zone;
-#define xlog_write_adv_cnt(ptr, len, off, bytes) \
- { (ptr) += (bytes); \
- (len) -= (bytes); \
- (off) += (bytes);}
-
/* Local miscellaneous function prototypes */
-STATIC int xlog_commit_record(xfs_mount_t *mp, xlog_ticket_t *ticket,
+STATIC int xlog_commit_record(struct log *log, struct xlog_ticket *ticket,
xlog_in_core_t **, xfs_lsn_t *);
STATIC xlog_t * xlog_alloc_log(xfs_mount_t *mp,
xfs_buftarg_t *log_target,
@@ -59,11 +54,9 @@ STATIC xlog_t * xlog_alloc_log(xfs_mount_t *mp,
STATIC int xlog_space_left(xlog_t *log, int cycle, int bytes);
STATIC int xlog_sync(xlog_t *log, xlog_in_core_t *iclog);
STATIC void xlog_dealloc_log(xlog_t *log);
-STATIC int xlog_write(xfs_mount_t *mp, xfs_log_iovec_t region[],
- int nentries, struct xlog_ticket *tic,
- xfs_lsn_t *start_lsn,
- xlog_in_core_t **commit_iclog,
- uint flags);
+STATIC int xlog_write(struct log *log, struct xfs_log_vec *log_vector,
+ struct xlog_ticket *tic, xfs_lsn_t *start_lsn,
+ xlog_in_core_t **commit_iclog, uint flags);
/* local state machine functions */
STATIC void xlog_state_done_syncing(xlog_in_core_t *iclog, int);
@@ -102,7 +95,7 @@ STATIC xlog_ticket_t *xlog_ticket_alloc(xlog_t *log,
uint flags);
#if defined(DEBUG)
-STATIC void xlog_verify_dest_ptr(xlog_t *log, __psint_t ptr);
+STATIC void xlog_verify_dest_ptr(xlog_t *log, char *ptr);
STATIC void xlog_verify_grant_head(xlog_t *log, int equals);
STATIC void xlog_verify_iclog(xlog_t *log, xlog_in_core_t *iclog,
int count, boolean_t syncing);
@@ -258,7 +251,7 @@ xfs_log_done(
* If we get an error, just continue and give back the log ticket.
*/
(((ticket->t_flags & XLOG_TIC_INITED) == 0) &&
- (xlog_commit_record(mp, ticket, iclog, &lsn)))) {
+ (xlog_commit_record(log, ticket, iclog, &lsn)))) {
lsn = (xfs_lsn_t) -1;
if (ticket->t_flags & XLOG_TIC_PERM_RESERV) {
flags |= XFS_LOG_REL_PERM_RESERV;
@@ -516,18 +509,10 @@ xfs_log_unmount_write(xfs_mount_t *mp)
#ifdef DEBUG
xlog_in_core_t *first_iclog;
#endif
- xfs_log_iovec_t reg[1];
xlog_ticket_t *tic = NULL;
xfs_lsn_t lsn;
int error;
- /* the data section must be 32 bit size aligned */
- struct {
- __uint16_t magic;
- __uint16_t pad1;
- __uint32_t pad2; /* may as well make it 64 bits */
- } magic = { XLOG_UNMOUNT_TYPE, 0, 0 };
-
/*
* Don't write out unmount record on read-only mounts.
* Or, if we are doing a forced umount (typically because of IO errors).
@@ -549,16 +534,30 @@ xfs_log_unmount_write(xfs_mount_t *mp)
} while (iclog != first_iclog);
#endif
if (! (XLOG_FORCED_SHUTDOWN(log))) {
- reg[0].i_addr = (void*)&magic;
- reg[0].i_len = sizeof(magic);
- reg[0].i_type = XLOG_REG_TYPE_UNMOUNT;
-
error = xfs_log_reserve(mp, 600, 1, &tic,
XFS_LOG, 0, XLOG_UNMOUNT_REC_TYPE);
if (!error) {
+ /* the data section must be 32 bit size aligned */
+ struct {
+ __uint16_t magic;
+ __uint16_t pad1;
+ __uint32_t pad2; /* may as well make it 64 bits */
+ } magic = {
+ .magic = XLOG_UNMOUNT_TYPE,
+ };
+ struct xfs_log_iovec reg = {
+ .i_addr = (void *)&magic,
+ .i_len = sizeof(magic),
+ .i_type = XLOG_REG_TYPE_UNMOUNT,
+ };
+ struct xfs_log_vec vec = {
+ .lv_niovecs = 1,
+ .lv_iovecp = &reg,
+ };
+
/* remove inited flag */
- ((xlog_ticket_t *)tic)->t_flags = 0;
- error = xlog_write(mp, reg, 1, tic, &lsn,
+ tic->t_flags = 0;
+ error = xlog_write(log, &vec, tic, &lsn,
NULL, XLOG_UNMOUNT_TRANS);
/*
* At this point, we're umounting anyway,
@@ -648,10 +647,26 @@ xfs_log_unmount(xfs_mount_t *mp)
xlog_dealloc_log(mp->m_log);
}
+void
+xfs_log_item_init(
+ struct xfs_mount *mp,
+ struct xfs_log_item *item,
+ int type,
+ struct xfs_item_ops *ops)
+{
+ item->li_mountp = mp;
+ item->li_ailp = mp->m_ail;
+ item->li_type = type;
+ item->li_ops = ops;
+}
+
/*
* Write region vectors to log. The write happens using the space reservation
* of the ticket (tic). It is not a requirement that all writes for a given
- * transaction occur with one call to xfs_log_write().
+ * transaction occur with one call to xfs_log_write(). However, it is important
+ * to note that the transaction reservation code makes an assumption about the
+ * number of log headers a transaction requires that may be violated if you
+ * don't pass all the transaction vectors in one call....
*/
int
xfs_log_write(
@@ -663,11 +678,15 @@ xfs_log_write(
{
struct log *log = mp->m_log;
int error;
+ struct xfs_log_vec vec = {
+ .lv_niovecs = nentries,
+ .lv_iovecp = reg,
+ };
if (XLOG_FORCED_SHUTDOWN(log))
return XFS_ERROR(EIO);
- error = xlog_write(mp, reg, nentries, tic, start_lsn, NULL, 0);
+ error = xlog_write(log, &vec, tic, start_lsn, NULL, 0);
if (error)
xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
return error;
@@ -1020,6 +1039,7 @@ xlog_alloc_log(xfs_mount_t *mp,
int i;
int iclogsize;
int error = ENOMEM;
+ uint log2_size = 0;
log = kmem_zalloc(sizeof(xlog_t), KM_MAYFAIL);
if (!log) {
@@ -1045,29 +1065,30 @@ xlog_alloc_log(xfs_mount_t *mp,
error = EFSCORRUPTED;
if (xfs_sb_version_hassector(&mp->m_sb)) {
- log->l_sectbb_log = mp->m_sb.sb_logsectlog - BBSHIFT;
- if (log->l_sectbb_log < 0 ||
- log->l_sectbb_log > mp->m_sectbb_log) {
- xlog_warn("XFS: Log sector size (0x%x) out of range.",
- log->l_sectbb_log);
+ log2_size = mp->m_sb.sb_logsectlog;
+ if (log2_size < BBSHIFT) {
+ xlog_warn("XFS: Log sector size too small "
+ "(0x%x < 0x%x)", log2_size, BBSHIFT);
goto out_free_log;
}
- /* for larger sector sizes, must have v2 or external log */
- if (log->l_sectbb_log != 0 &&
- (log->l_logBBstart != 0 &&
- !xfs_sb_version_haslogv2(&mp->m_sb))) {
- xlog_warn("XFS: log sector size (0x%x) invalid "
- "for configuration.", log->l_sectbb_log);
+ log2_size -= BBSHIFT;
+ if (log2_size > mp->m_sectbb_log) {
+ xlog_warn("XFS: Log sector size too large "
+ "(0x%x > 0x%x)", log2_size, mp->m_sectbb_log);
goto out_free_log;
}
- if (mp->m_sb.sb_logsectlog < BBSHIFT) {
- xlog_warn("XFS: Log sector log (0x%x) too small.",
- mp->m_sb.sb_logsectlog);
+
+ /* for larger sector sizes, must have v2 or external log */
+ if (log2_size && log->l_logBBstart > 0 &&
+ !xfs_sb_version_haslogv2(&mp->m_sb)) {
+
+ xlog_warn("XFS: log sector size (0x%x) invalid "
+ "for configuration.", log2_size);
goto out_free_log;
}
}
- log->l_sectbb_mask = (1 << log->l_sectbb_log) - 1;
+ log->l_sectBBsize = 1 << log2_size;
xlog_get_iclog_buffer_size(mp, log);
@@ -1174,26 +1195,31 @@ out:
* ticket. Return the lsn of the commit record.
*/
STATIC int
-xlog_commit_record(xfs_mount_t *mp,
- xlog_ticket_t *ticket,
- xlog_in_core_t **iclog,
- xfs_lsn_t *commitlsnp)
+xlog_commit_record(
+ struct log *log,
+ struct xlog_ticket *ticket,
+ struct xlog_in_core **iclog,
+ xfs_lsn_t *commitlsnp)
{
- int error;
- xfs_log_iovec_t reg[1];
-
- reg[0].i_addr = NULL;
- reg[0].i_len = 0;
- reg[0].i_type = XLOG_REG_TYPE_COMMIT;
+ struct xfs_mount *mp = log->l_mp;
+ int error;
+ struct xfs_log_iovec reg = {
+ .i_addr = NULL,
+ .i_len = 0,
+ .i_type = XLOG_REG_TYPE_COMMIT,
+ };
+ struct xfs_log_vec vec = {
+ .lv_niovecs = 1,
+ .lv_iovecp = &reg,
+ };
ASSERT_ALWAYS(iclog);
- if ((error = xlog_write(mp, reg, 1, ticket, commitlsnp,
- iclog, XLOG_COMMIT_TRANS))) {
+ error = xlog_write(log, &vec, ticket, commitlsnp, iclog,
+ XLOG_COMMIT_TRANS);
+ if (error)
xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
- }
return error;
-} /* xlog_commit_record */
-
+}
/*
* Push on the buffer cache code if we ever use more than 75% of the on-disk
@@ -1614,6 +1640,192 @@ xlog_print_tic_res(xfs_mount_t *mp, xlog_ticket_t *ticket)
}
/*
+ * Calculate the potential space needed by the log vector. Each region gets
+ * its own xlog_op_header_t and may need to be double word aligned.
+ */
+static int
+xlog_write_calc_vec_length(
+ struct xlog_ticket *ticket,
+ struct xfs_log_vec *log_vector)
+{
+ struct xfs_log_vec *lv;
+ int headers = 0;
+ int len = 0;
+ int i;
+
+ /* acct for start rec of xact */
+ if (ticket->t_flags & XLOG_TIC_INITED)
+ headers++;
+
+ for (lv = log_vector; lv; lv = lv->lv_next) {
+ headers += lv->lv_niovecs;
+
+ for (i = 0; i < lv->lv_niovecs; i++) {
+ struct xfs_log_iovec *vecp = &lv->lv_iovecp[i];
+
+ len += vecp->i_len;
+ xlog_tic_add_region(ticket, vecp->i_len, vecp->i_type);
+ }
+ }
+
+ ticket->t_res_num_ophdrs += headers;
+ len += headers * sizeof(struct xlog_op_header);
+
+ return len;
+}
+
+/*
+ * If first write for transaction, insert start record We can't be trying to
+ * commit if we are inited. We can't have any "partial_copy" if we are inited.
+ */
+static int
+xlog_write_start_rec(
+ struct xlog_op_header *ophdr,
+ struct xlog_ticket *ticket)
+{
+ if (!(ticket->t_flags & XLOG_TIC_INITED))
+ return 0;
+
+ ophdr->oh_tid = cpu_to_be32(ticket->t_tid);
+ ophdr->oh_clientid = ticket->t_clientid;
+ ophdr->oh_len = 0;
+ ophdr->oh_flags = XLOG_START_TRANS;
+ ophdr->oh_res2 = 0;
+
+ ticket->t_flags &= ~XLOG_TIC_INITED;
+
+ return sizeof(struct xlog_op_header);
+}
+
+static xlog_op_header_t *
+xlog_write_setup_ophdr(
+ struct log *log,
+ struct xlog_op_header *ophdr,
+ struct xlog_ticket *ticket,
+ uint flags)
+{
+ ophdr->oh_tid = cpu_to_be32(ticket->t_tid);
+ ophdr->oh_clientid = ticket->t_clientid;
+ ophdr->oh_res2 = 0;
+
+ /* are we copying a commit or unmount record? */
+ ophdr->oh_flags = flags;
+
+ /*
+ * We've seen logs corrupted with bad transaction client ids. This
+ * makes sure that XFS doesn't generate them on. Turn this into an EIO
+ * and shut down the filesystem.
+ */
+ switch (ophdr->oh_clientid) {
+ case XFS_TRANSACTION:
+ case XFS_VOLUME:
+ case XFS_LOG:
+ break;
+ default:
+ xfs_fs_cmn_err(CE_WARN, log->l_mp,
+ "Bad XFS transaction clientid 0x%x in ticket 0x%p",
+ ophdr->oh_clientid, ticket);
+ return NULL;
+ }
+
+ return ophdr;
+}
+
+/*
+ * Set up the parameters of the region copy into the log. This has
+ * to handle region write split across multiple log buffers - this
+ * state is kept external to this function so that this code can
+ * can be written in an obvious, self documenting manner.
+ */
+static int
+xlog_write_setup_copy(
+ struct xlog_ticket *ticket,
+ struct xlog_op_header *ophdr,
+ int space_available,
+ int space_required,
+ int *copy_off,
+ int *copy_len,
+ int *last_was_partial_copy,
+ int *bytes_consumed)
+{
+ int still_to_copy;
+
+ still_to_copy = space_required - *bytes_consumed;
+ *copy_off = *bytes_consumed;
+
+ if (still_to_copy <= space_available) {
+ /* write of region completes here */
+ *copy_len = still_to_copy;
+ ophdr->oh_len = cpu_to_be32(*copy_len);
+ if (*last_was_partial_copy)
+ ophdr->oh_flags |= (XLOG_END_TRANS|XLOG_WAS_CONT_TRANS);
+ *last_was_partial_copy = 0;
+ *bytes_consumed = 0;
+ return 0;
+ }
+
+ /* partial write of region, needs extra log op header reservation */
+ *copy_len = space_available;
+ ophdr->oh_len = cpu_to_be32(*copy_len);
+ ophdr->oh_flags |= XLOG_CONTINUE_TRANS;
+ if (*last_was_partial_copy)
+ ophdr->oh_flags |= XLOG_WAS_CONT_TRANS;
+ *bytes_consumed += *copy_len;
+ (*last_was_partial_copy)++;
+
+ /* account for new log op header */
+ ticket->t_curr_res -= sizeof(struct xlog_op_header);
+ ticket->t_res_num_ophdrs++;
+
+ return sizeof(struct xlog_op_header);
+}
+
+static int
+xlog_write_copy_finish(
+ struct log *log,
+ struct xlog_in_core *iclog,
+ uint flags,
+ int *record_cnt,
+ int *data_cnt,
+ int *partial_copy,
+ int *partial_copy_len,
+ int log_offset,
+ struct xlog_in_core **commit_iclog)
+{
+ if (*partial_copy) {
+ /*
+ * This iclog has already been marked WANT_SYNC by
+ * xlog_state_get_iclog_space.
+ */
+ xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
+ *record_cnt = 0;
+ *data_cnt = 0;
+ return xlog_state_release_iclog(log, iclog);
+ }
+
+ *partial_copy = 0;
+ *partial_copy_len = 0;
+
+ if (iclog->ic_size - log_offset <= sizeof(xlog_op_header_t)) {
+ /* no more space in this iclog - push it. */
+ xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
+ *record_cnt = 0;
+ *data_cnt = 0;
+
+ spin_lock(&log->l_icloglock);
+ xlog_state_want_sync(log, iclog);
+ spin_unlock(&log->l_icloglock);
+
+ if (!commit_iclog)
+ return xlog_state_release_iclog(log, iclog);
+ ASSERT(flags & XLOG_COMMIT_TRANS);
+ *commit_iclog = iclog;
+ }
+
+ return 0;
+}
+
+/*
* Write some region out to in-core log
*
* This will be called when writing externally provided regions or when
@@ -1655,209 +1867,157 @@ xlog_print_tic_res(xfs_mount_t *mp, xlog_ticket_t *ticket)
*/
STATIC int
xlog_write(
- struct xfs_mount *mp,
- struct xfs_log_iovec reg[],
- int nentries,
+ struct log *log,
+ struct xfs_log_vec *log_vector,
struct xlog_ticket *ticket,
xfs_lsn_t *start_lsn,
struct xlog_in_core **commit_iclog,
uint flags)
{
- xlog_t *log = mp->m_log;
- xlog_in_core_t *iclog = NULL; /* ptr to current in-core log */
- xlog_op_header_t *logop_head; /* ptr to log operation header */
- __psint_t ptr; /* copy address into data region */
- int len; /* # xlog_write() bytes 2 still copy */
- int index; /* region index currently copying */
- int log_offset; /* offset (from 0) into data region */
- int start_rec_copy; /* # bytes to copy for start record */
- int partial_copy; /* did we split a region? */
- int partial_copy_len;/* # bytes copied if split region */
- int need_copy; /* # bytes need to memcpy this region */
- int copy_len; /* # bytes actually memcpy'ing */
- int copy_off; /* # bytes from entry start */
- int contwr; /* continued write of in-core log? */
- int error;
- int record_cnt = 0, data_cnt = 0;
-
- partial_copy_len = partial_copy = 0;
-
- /* Calculate potential maximum space. Each region gets its own
- * xlog_op_header_t and may need to be double word aligned.
- */
- len = 0;
- if (ticket->t_flags & XLOG_TIC_INITED) { /* acct for start rec of xact */
- len += sizeof(xlog_op_header_t);
- ticket->t_res_num_ophdrs++;
- }
+ struct xlog_in_core *iclog = NULL;
+ struct xfs_log_iovec *vecp;
+ struct xfs_log_vec *lv;
+ int len;
+ int index;
+ int partial_copy = 0;
+ int partial_copy_len = 0;
+ int contwr = 0;
+ int record_cnt = 0;
+ int data_cnt = 0;
+ int error;
- for (index = 0; index < nentries; index++) {
- len += sizeof(xlog_op_header_t); /* each region gets >= 1 */
- ticket->t_res_num_ophdrs++;
- len += reg[index].i_len;
- xlog_tic_add_region(ticket, reg[index].i_len, reg[index].i_type);
- }
- contwr = *start_lsn = 0;
+ *start_lsn = 0;
- if (ticket->t_curr_res < len) {
- xlog_print_tic_res(mp, ticket);
+ len = xlog_write_calc_vec_length(ticket, log_vector);
+ if (ticket->t_curr_res < len) {
+ xlog_print_tic_res(log->l_mp, ticket);
#ifdef DEBUG
- xlog_panic(
- "xfs_log_write: reservation ran out. Need to up reservation");
+ xlog_panic(
+ "xfs_log_write: reservation ran out. Need to up reservation");
#else
- /* Customer configurable panic */
- xfs_cmn_err(XFS_PTAG_LOGRES, CE_ALERT, mp,
- "xfs_log_write: reservation ran out. Need to up reservation");
- /* If we did not panic, shutdown the filesystem */
- xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+ /* Customer configurable panic */
+ xfs_cmn_err(XFS_PTAG_LOGRES, CE_ALERT, log->l_mp,
+ "xfs_log_write: reservation ran out. Need to up reservation");
+
+ /* If we did not panic, shutdown the filesystem */
+ xfs_force_shutdown(log->l_mp, SHUTDOWN_CORRUPT_INCORE);
#endif
- } else
+ }
+
ticket->t_curr_res -= len;
- for (index = 0; index < nentries; ) {
- if ((error = xlog_state_get_iclog_space(log, len, &iclog, ticket,
- &contwr, &log_offset)))
- return error;
+ index = 0;
+ lv = log_vector;
+ vecp = lv->lv_iovecp;
+ while (lv && index < lv->lv_niovecs) {
+ void *ptr;
+ int log_offset;
- ASSERT(log_offset <= iclog->ic_size - 1);
- ptr = (__psint_t) ((char *)iclog->ic_datap+log_offset);
+ error = xlog_state_get_iclog_space(log, len, &iclog, ticket,
+ &contwr, &log_offset);
+ if (error)
+ return error;
- /* start_lsn is the first lsn written to. That's all we need. */
- if (! *start_lsn)
- *start_lsn = be64_to_cpu(iclog->ic_header.h_lsn);
+ ASSERT(log_offset <= iclog->ic_size - 1);
+ ptr = iclog->ic_datap + log_offset;
- /* This loop writes out as many regions as can fit in the amount
- * of space which was allocated by xlog_state_get_iclog_space().
- */
- while (index < nentries) {
- ASSERT(reg[index].i_len % sizeof(__int32_t) == 0);
- ASSERT((__psint_t)ptr % sizeof(__int32_t) == 0);
- start_rec_copy = 0;
-
- /* If first write for transaction, insert start record.
- * We can't be trying to commit if we are inited. We can't
- * have any "partial_copy" if we are inited.
- */
- if (ticket->t_flags & XLOG_TIC_INITED) {
- logop_head = (xlog_op_header_t *)ptr;
- logop_head->oh_tid = cpu_to_be32(ticket->t_tid);
- logop_head->oh_clientid = ticket->t_clientid;
- logop_head->oh_len = 0;
- logop_head->oh_flags = XLOG_START_TRANS;
- logop_head->oh_res2 = 0;
- ticket->t_flags &= ~XLOG_TIC_INITED; /* clear bit */
- record_cnt++;
-
- start_rec_copy = sizeof(xlog_op_header_t);
- xlog_write_adv_cnt(ptr, len, log_offset, start_rec_copy);
- }
+ /* start_lsn is the first lsn written to. That's all we need. */
+ if (!*start_lsn)
+ *start_lsn = be64_to_cpu(iclog->ic_header.h_lsn);
- /* Copy log operation header directly into data section */
- logop_head = (xlog_op_header_t *)ptr;
- logop_head->oh_tid = cpu_to_be32(ticket->t_tid);
- logop_head->oh_clientid = ticket->t_clientid;
- logop_head->oh_res2 = 0;
+ /*
+ * This loop writes out as many regions as can fit in the amount
+ * of space which was allocated by xlog_state_get_iclog_space().
+ */
+ while (lv && index < lv->lv_niovecs) {
+ struct xfs_log_iovec *reg = &vecp[index];
+ struct xlog_op_header *ophdr;
+ int start_rec_copy;
+ int copy_len;
+ int copy_off;
+
+ ASSERT(reg->i_len % sizeof(__int32_t) == 0);
+ ASSERT((unsigned long)ptr % sizeof(__int32_t) == 0);
+
+ start_rec_copy = xlog_write_start_rec(ptr, ticket);
+ if (start_rec_copy) {
+ record_cnt++;
+ xlog_write_adv_cnt(&ptr, &len, &log_offset,
+ start_rec_copy);
+ }
- /* header copied directly */
- xlog_write_adv_cnt(ptr, len, log_offset, sizeof(xlog_op_header_t));
+ ophdr = xlog_write_setup_ophdr(log, ptr, ticket, flags);
+ if (!ophdr)
+ return XFS_ERROR(EIO);
- /* are we copying a commit or unmount record? */
- logop_head->oh_flags = flags;
+ xlog_write_adv_cnt(&ptr, &len, &log_offset,
+ sizeof(struct xlog_op_header));
+
+ len += xlog_write_setup_copy(ticket, ophdr,
+ iclog->ic_size-log_offset,
+ reg->i_len,
+ &copy_off, &copy_len,
+ &partial_copy,
+ &partial_copy_len);
+ xlog_verify_dest_ptr(log, ptr);
+
+ /* copy region */
+ ASSERT(copy_len >= 0);
+ memcpy(ptr, reg->i_addr + copy_off, copy_len);
+ xlog_write_adv_cnt(&ptr, &len, &log_offset, copy_len);
+
+ copy_len += start_rec_copy + sizeof(xlog_op_header_t);
+ record_cnt++;
+ data_cnt += contwr ? copy_len : 0;
+
+ error = xlog_write_copy_finish(log, iclog, flags,
+ &record_cnt, &data_cnt,
+ &partial_copy,
+ &partial_copy_len,
+ log_offset,
+ commit_iclog);
+ if (error)
+ return error;
- /*
- * We've seen logs corrupted with bad transaction client
- * ids. This makes sure that XFS doesn't generate them on.
- * Turn this into an EIO and shut down the filesystem.
- */
- switch (logop_head->oh_clientid) {
- case XFS_TRANSACTION:
- case XFS_VOLUME:
- case XFS_LOG:
- break;
- default:
- xfs_fs_cmn_err(CE_WARN, mp,
- "Bad XFS transaction clientid 0x%x in ticket 0x%p",
- logop_head->oh_clientid, ticket);
- return XFS_ERROR(EIO);
- }
+ /*
+ * if we had a partial copy, we need to get more iclog
+ * space but we don't want to increment the region
+ * index because there is still more is this region to
+ * write.
+ *
+ * If we completed writing this region, and we flushed
+ * the iclog (indicated by resetting of the record
+ * count), then we also need to get more log space. If
+ * this was the last record, though, we are done and
+ * can just return.
+ */
+ if (partial_copy)
+ break;
- /* Partial write last time? => (partial_copy != 0)
- * need_copy is the amount we'd like to copy if everything could
- * fit in the current memcpy.
- */
- need_copy = reg[index].i_len - partial_copy_len;
-
- copy_off = partial_copy_len;
- if (need_copy <= iclog->ic_size - log_offset) { /*complete write */
- copy_len = need_copy;
- logop_head->oh_len = cpu_to_be32(copy_len);
- if (partial_copy)
- logop_head->oh_flags|= (XLOG_END_TRANS|XLOG_WAS_CONT_TRANS);
- partial_copy_len = partial_copy = 0;
- } else { /* partial write */
- copy_len = iclog->ic_size - log_offset;
- logop_head->oh_len = cpu_to_be32(copy_len);
- logop_head->oh_flags |= XLOG_CONTINUE_TRANS;
- if (partial_copy)
- logop_head->oh_flags |= XLOG_WAS_CONT_TRANS;
- partial_copy_len += copy_len;
- partial_copy++;
- len += sizeof(xlog_op_header_t); /* from splitting of region */
- /* account for new log op header */
- ticket->t_curr_res -= sizeof(xlog_op_header_t);
- ticket->t_res_num_ophdrs++;
- }
- xlog_verify_dest_ptr(log, ptr);
-
- /* copy region */
- ASSERT(copy_len >= 0);
- memcpy((xfs_caddr_t)ptr, reg[index].i_addr + copy_off, copy_len);
- xlog_write_adv_cnt(ptr, len, log_offset, copy_len);
-
- /* make copy_len total bytes copied, including headers */
- copy_len += start_rec_copy + sizeof(xlog_op_header_t);
- record_cnt++;
- data_cnt += contwr ? copy_len : 0;
- if (partial_copy) { /* copied partial region */
- /* already marked WANT_SYNC by xlog_state_get_iclog_space */
- xlog_state_finish_copy(log, iclog, record_cnt, data_cnt);
- record_cnt = data_cnt = 0;
- if ((error = xlog_state_release_iclog(log, iclog)))
- return error;
- break; /* don't increment index */
- } else { /* copied entire region */
- index++;
- partial_copy_len = partial_copy = 0;
-
- if (iclog->ic_size - log_offset <= sizeof(xlog_op_header_t)) {
- xlog_state_finish_copy(log, iclog, record_cnt, data_cnt);
- record_cnt = data_cnt = 0;
- spin_lock(&log->l_icloglock);
- xlog_state_want_sync(log, iclog);
- spin_unlock(&log->l_icloglock);
- if (commit_iclog) {
- ASSERT(flags & XLOG_COMMIT_TRANS);
- *commit_iclog = iclog;
- } else if ((error = xlog_state_release_iclog(log, iclog)))
- return error;
- if (index == nentries)
- return 0; /* we are done */
- else
- break;
+ if (++index == lv->lv_niovecs) {
+ lv = lv->lv_next;
+ index = 0;
+ if (lv)
+ vecp = lv->lv_iovecp;
+ }
+ if (record_cnt == 0) {
+ if (!lv)
+ return 0;
+ break;
+ }
}
- } /* if (partial_copy) */
- } /* while (index < nentries) */
- } /* for (index = 0; index < nentries; ) */
- ASSERT(len == 0);
+ }
+
+ ASSERT(len == 0);
+
+ xlog_state_finish_copy(log, iclog, record_cnt, data_cnt);
+ if (!commit_iclog)
+ return xlog_state_release_iclog(log, iclog);
- xlog_state_finish_copy(log, iclog, record_cnt, data_cnt);
- if (commit_iclog) {
ASSERT(flags & XLOG_COMMIT_TRANS);
*commit_iclog = iclog;
return 0;
- }
- return xlog_state_release_iclog(log, iclog);
-} /* xlog_write */
+}
/*****************************************************************************
@@ -3157,14 +3317,16 @@ xfs_log_ticket_get(
* Allocate and initialise a new log ticket.
*/
STATIC xlog_ticket_t *
-xlog_ticket_alloc(xlog_t *log,
- int unit_bytes,
- int cnt,
- char client,
- uint xflags)
+xlog_ticket_alloc(
+ struct log *log,
+ int unit_bytes,
+ int cnt,
+ char client,
+ uint xflags)
{
- xlog_ticket_t *tic;
+ struct xlog_ticket *tic;
uint num_headers;
+ int iclog_space;
tic = kmem_zone_zalloc(xfs_log_ticket_zone, KM_SLEEP|KM_MAYFAIL);
if (!tic)
@@ -3208,16 +3370,40 @@ xlog_ticket_alloc(xlog_t *log,
/* for start-rec */
unit_bytes += sizeof(xlog_op_header_t);
- /* for LR headers */
- num_headers = ((unit_bytes + log->l_iclog_size-1) >> log->l_iclog_size_log);
+ /*
+ * for LR headers - the space for data in an iclog is the size minus
+ * the space used for the headers. If we use the iclog size, then we
+ * undercalculate the number of headers required.
+ *
+ * Furthermore - the addition of op headers for split-recs might
+ * increase the space required enough to require more log and op
+ * headers, so take that into account too.
+ *
+ * IMPORTANT: This reservation makes the assumption that if this
+ * transaction is the first in an iclog and hence has the LR headers
+ * accounted to it, then the remaining space in the iclog is
+ * exclusively for this transaction. i.e. if the transaction is larger
+ * than the iclog, it will be the only thing in that iclog.
+ * Fundamentally, this means we must pass the entire log vector to
+ * xlog_write to guarantee this.
+ */
+ iclog_space = log->l_iclog_size - log->l_iclog_hsize;
+ num_headers = howmany(unit_bytes, iclog_space);
+
+ /* for split-recs - ophdrs added when data split over LRs */
+ unit_bytes += sizeof(xlog_op_header_t) * num_headers;
+
+ /* add extra header reservations if we overrun */
+ while (!num_headers ||
+ howmany(unit_bytes, iclog_space) > num_headers) {
+ unit_bytes += sizeof(xlog_op_header_t);
+ num_headers++;
+ }
unit_bytes += log->l_iclog_hsize * num_headers;
/* for commit-rec LR header - note: padding will subsume the ophdr */
unit_bytes += log->l_iclog_hsize;
- /* for split-recs - ophdrs added when data split over LRs */
- unit_bytes += sizeof(xlog_op_header_t) * num_headers;
-
/* for roundoff padding for transaction data and one for commit record */
if (xfs_sb_version_haslogv2(&log->l_mp->m_sb) &&
log->l_mp->m_sb.sb_logsunit > 1) {
@@ -3233,13 +3419,13 @@ xlog_ticket_alloc(xlog_t *log,
tic->t_curr_res = unit_bytes;
tic->t_cnt = cnt;
tic->t_ocnt = cnt;
- tic->t_tid = (xlog_tid_t)((__psint_t)tic & 0xffffffff);
+ tic->t_tid = random32();
tic->t_clientid = client;
tic->t_flags = XLOG_TIC_INITED;
tic->t_trans_type = 0;
if (xflags & XFS_LOG_PERM_RESERV)
tic->t_flags |= XLOG_TIC_PERM_RESERV;
- sv_init(&(tic->t_wait), SV_DEFAULT, "logtick");
+ sv_init(&tic->t_wait, SV_DEFAULT, "logtick");
xlog_tic_reset_res(tic);
@@ -3260,20 +3446,22 @@ xlog_ticket_alloc(xlog_t *log,
* part of the log in case we trash the log structure.
*/
void
-xlog_verify_dest_ptr(xlog_t *log,
- __psint_t ptr)
+xlog_verify_dest_ptr(
+ struct log *log,
+ char *ptr)
{
int i;
int good_ptr = 0;
- for (i=0; i < log->l_iclog_bufs; i++) {
- if (ptr >= (__psint_t)log->l_iclog_bak[i] &&
- ptr <= (__psint_t)log->l_iclog_bak[i]+log->l_iclog_size)
+ for (i = 0; i < log->l_iclog_bufs; i++) {
+ if (ptr >= log->l_iclog_bak[i] &&
+ ptr <= log->l_iclog_bak[i] + log->l_iclog_size)
good_ptr++;
}
- if (! good_ptr)
+
+ if (!good_ptr)
xlog_panic("xlog_verify_dest_ptr: invalid ptr");
-} /* xlog_verify_dest_ptr */
+}
STATIC void
xlog_verify_grant_head(xlog_t *log, int equals)
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index 97a24c7..229d1f3 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -110,6 +110,12 @@ typedef struct xfs_log_iovec {
uint i_type; /* type of region */
} xfs_log_iovec_t;
+struct xfs_log_vec {
+ struct xfs_log_vec *lv_next; /* next lv in build list */
+ int lv_niovecs; /* number of iovecs in lv */
+ struct xfs_log_iovec *lv_iovecp; /* iovec array */
+};
+
/*
* Structure used to pass callback function and the function's argument
* to the log manager.
@@ -126,6 +132,13 @@ typedef struct xfs_log_callback {
struct xfs_mount;
struct xlog_in_core;
struct xlog_ticket;
+struct xfs_log_item;
+struct xfs_item_ops;
+
+void xfs_log_item_init(struct xfs_mount *mp,
+ struct xfs_log_item *item,
+ int type,
+ struct xfs_item_ops *ops);
xfs_lsn_t xfs_log_done(struct xfs_mount *mp,
struct xlog_ticket *ticket,
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index fd02a18..9cf6951 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -396,9 +396,7 @@ typedef struct log {
struct xfs_buf_cancel **l_buf_cancel_table;
int l_iclog_hsize; /* size of iclog header */
int l_iclog_heads; /* # of iclog header sectors */
- uint l_sectbb_log; /* log2 of sector size in BBs */
- uint l_sectbb_mask; /* sector size (in BBs)
- * alignment mask */
+ uint l_sectBBsize; /* sector size in BBs (2^n) */
int l_iclog_size; /* size of log in bytes */
int l_iclog_size_log; /* log power size of log */
int l_iclog_bufs; /* number of iclog buffers */
@@ -449,6 +447,14 @@ extern void xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int);
extern kmem_zone_t *xfs_log_ticket_zone;
+static inline void
+xlog_write_adv_cnt(void **ptr, int *len, int *off, size_t bytes)
+{
+ *ptr += bytes;
+ *len -= bytes;
+ *off += bytes;
+}
+
/*
* Unmount record type is used as a pseudo transaction type for the ticket.
* It's value must be outside the range of XFS_TRANS_* values.
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 22e6efd..0de08e3 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -56,33 +56,61 @@ STATIC void xlog_recover_check_summary(xlog_t *);
#define xlog_recover_check_summary(log)
#endif
-
/*
* Sector aligned buffer routines for buffer create/read/write/access
*/
-#define XLOG_SECTOR_ROUNDUP_BBCOUNT(log, bbs) \
- ( ((log)->l_sectbb_mask && (bbs & (log)->l_sectbb_mask)) ? \
- ((bbs + (log)->l_sectbb_mask + 1) & ~(log)->l_sectbb_mask) : (bbs) )
-#define XLOG_SECTOR_ROUNDDOWN_BLKNO(log, bno) ((bno) & ~(log)->l_sectbb_mask)
+/*
+ * Verify the given count of basic blocks is valid number of blocks
+ * to specify for an operation involving the given XFS log buffer.
+ * Returns nonzero if the count is valid, 0 otherwise.
+ */
+static inline int
+xlog_buf_bbcount_valid(
+ xlog_t *log,
+ int bbcount)
+{
+ return bbcount > 0 && bbcount <= log->l_logBBsize;
+}
+
+/*
+ * Allocate a buffer to hold log data. The buffer needs to be able
+ * to map to a range of nbblks basic blocks at any valid (basic
+ * block) offset within the log.
+ */
STATIC xfs_buf_t *
xlog_get_bp(
xlog_t *log,
int nbblks)
{
- if (nbblks <= 0 || nbblks > log->l_logBBsize) {
- xlog_warn("XFS: Invalid block length (0x%x) given for buffer", nbblks);
- XFS_ERROR_REPORT("xlog_get_bp(1)",
- XFS_ERRLEVEL_HIGH, log->l_mp);
+ if (!xlog_buf_bbcount_valid(log, nbblks)) {
+ xlog_warn("XFS: Invalid block length (0x%x) given for buffer",
+ nbblks);
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
return NULL;
}
- if (log->l_sectbb_log) {
- if (nbblks > 1)
- nbblks += XLOG_SECTOR_ROUNDUP_BBCOUNT(log, 1);
- nbblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, nbblks);
- }
+ /*
+ * We do log I/O in units of log sectors (a power-of-2
+ * multiple of the basic block size), so we round up the
+ * requested size to acommodate the basic blocks required
+ * for complete log sectors.
+ *
+ * In addition, the buffer may be used for a non-sector-
+ * aligned block offset, in which case an I/O of the
+ * requested size could extend beyond the end of the
+ * buffer. If the requested size is only 1 basic block it
+ * will never straddle a sector boundary, so this won't be
+ * an issue. Nor will this be a problem if the log I/O is
+ * done in basic blocks (sector size 1). But otherwise we
+ * extend the buffer by one extra log sector to ensure
+ * there's space to accomodate this possiblility.
+ */
+ if (nbblks > 1 && log->l_sectBBsize > 1)
+ nbblks += log->l_sectBBsize;
+ nbblks = round_up(nbblks, log->l_sectBBsize);
+
return xfs_buf_get_noaddr(BBTOB(nbblks), log->l_mp->m_logdev_targp);
}
@@ -93,6 +121,10 @@ xlog_put_bp(
xfs_buf_free(bp);
}
+/*
+ * Return the address of the start of the given block number's data
+ * in a log buffer. The buffer covers a log sector-aligned region.
+ */
STATIC xfs_caddr_t
xlog_align(
xlog_t *log,
@@ -100,14 +132,14 @@ xlog_align(
int nbblks,
xfs_buf_t *bp)
{
+ xfs_daddr_t offset;
xfs_caddr_t ptr;
- if (!log->l_sectbb_log)
- return XFS_BUF_PTR(bp);
+ offset = blk_no & ((xfs_daddr_t) log->l_sectBBsize - 1);
+ ptr = XFS_BUF_PTR(bp) + BBTOB(offset);
+
+ ASSERT(ptr + BBTOB(nbblks) <= XFS_BUF_PTR(bp) + XFS_BUF_SIZE(bp));
- ptr = XFS_BUF_PTR(bp) + BBTOB((int)blk_no & log->l_sectbb_mask);
- ASSERT(XFS_BUF_SIZE(bp) >=
- BBTOB(nbblks + (blk_no & log->l_sectbb_mask)));
return ptr;
}
@@ -124,21 +156,18 @@ xlog_bread_noalign(
{
int error;
- if (nbblks <= 0 || nbblks > log->l_logBBsize) {
- xlog_warn("XFS: Invalid block length (0x%x) given for buffer", nbblks);
- XFS_ERROR_REPORT("xlog_bread(1)",
- XFS_ERRLEVEL_HIGH, log->l_mp);
+ if (!xlog_buf_bbcount_valid(log, nbblks)) {
+ xlog_warn("XFS: Invalid block length (0x%x) given for buffer",
+ nbblks);
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
return EFSCORRUPTED;
}
- if (log->l_sectbb_log) {
- blk_no = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, blk_no);
- nbblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, nbblks);
- }
+ blk_no = round_down(blk_no, log->l_sectBBsize);
+ nbblks = round_up(nbblks, log->l_sectBBsize);
ASSERT(nbblks > 0);
ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp));
- ASSERT(bp);
XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
XFS_BUF_READ(bp);
@@ -186,17 +215,15 @@ xlog_bwrite(
{
int error;
- if (nbblks <= 0 || nbblks > log->l_logBBsize) {
- xlog_warn("XFS: Invalid block length (0x%x) given for buffer", nbblks);
- XFS_ERROR_REPORT("xlog_bwrite(1)",
- XFS_ERRLEVEL_HIGH, log->l_mp);
+ if (!xlog_buf_bbcount_valid(log, nbblks)) {
+ xlog_warn("XFS: Invalid block length (0x%x) given for buffer",
+ nbblks);
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
return EFSCORRUPTED;
}
- if (log->l_sectbb_log) {
- blk_no = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, blk_no);
- nbblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, nbblks);
- }
+ blk_no = round_down(blk_no, log->l_sectBBsize);
+ nbblks = round_up(nbblks, log->l_sectBBsize);
ASSERT(nbblks > 0);
ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp));
@@ -327,39 +354,38 @@ xlog_find_cycle_start(
{
xfs_caddr_t offset;
xfs_daddr_t mid_blk;
+ xfs_daddr_t end_blk;
uint mid_cycle;
int error;
- mid_blk = BLK_AVG(first_blk, *last_blk);
- while (mid_blk != first_blk && mid_blk != *last_blk) {
+ end_blk = *last_blk;
+ mid_blk = BLK_AVG(first_blk, end_blk);
+ while (mid_blk != first_blk && mid_blk != end_blk) {
error = xlog_bread(log, mid_blk, 1, bp, &offset);
if (error)
return error;
mid_cycle = xlog_get_cycle(offset);
- if (mid_cycle == cycle) {
- *last_blk = mid_blk;
- /* last_half_cycle == mid_cycle */
- } else {
- first_blk = mid_blk;
- /* first_half_cycle == mid_cycle */
- }
- mid_blk = BLK_AVG(first_blk, *last_blk);
+ if (mid_cycle == cycle)
+ end_blk = mid_blk; /* last_half_cycle == mid_cycle */
+ else
+ first_blk = mid_blk; /* first_half_cycle == mid_cycle */
+ mid_blk = BLK_AVG(first_blk, end_blk);
}
- ASSERT((mid_blk == first_blk && mid_blk+1 == *last_blk) ||
- (mid_blk == *last_blk && mid_blk-1 == first_blk));
+ ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
+ (mid_blk == end_blk && mid_blk-1 == first_blk));
+
+ *last_blk = end_blk;
return 0;
}
/*
- * Check that the range of blocks does not contain the cycle number
- * given. The scan needs to occur from front to back and the ptr into the
- * region must be updated since a later routine will need to perform another
- * test. If the region is completely good, we end up returning the same
- * last block number.
- *
- * Set blkno to -1 if we encounter no errors. This is an invalid block number
- * since we don't ever expect logs to get this large.
+ * Check that a range of blocks does not contain stop_on_cycle_no.
+ * Fill in *new_blk with the block offset where such a block is
+ * found, or with -1 (an invalid block number) if there is no such
+ * block in the range. The scan needs to occur from front to back
+ * and the pointer into the region must be updated since a later
+ * routine will need to perform another test.
*/
STATIC int
xlog_find_verify_cycle(
@@ -376,12 +402,16 @@ xlog_find_verify_cycle(
xfs_caddr_t buf = NULL;
int error = 0;
+ /*
+ * Greedily allocate a buffer big enough to handle the full
+ * range of basic blocks we'll be examining. If that fails,
+ * try a smaller size. We need to be able to read at least
+ * a log sector, or we're out of luck.
+ */
bufblks = 1 << ffs(nbblks);
-
while (!(bp = xlog_get_bp(log, bufblks))) {
- /* can't get enough memory to do everything in one big buffer */
bufblks >>= 1;
- if (bufblks <= log->l_sectbb_log)
+ if (bufblks < log->l_sectBBsize)
return ENOMEM;
}
@@ -629,7 +659,7 @@ xlog_find_head(
* In this case we want to find the first block with cycle
* number matching last_half_cycle. We expect the log to be
* some variation on
- * x + 1 ... | x ...
+ * x + 1 ... | x ... | x
* The first block with cycle number x (last_half_cycle) will
* be where the new head belongs. First we do a binary search
* for the first occurrence of last_half_cycle. The binary
@@ -639,11 +669,13 @@ xlog_find_head(
* the log, then we look for occurrences of last_half_cycle - 1
* at the end of the log. The cases we're looking for look
* like
- * x + 1 ... | x | x + 1 | x ...
- * ^ binary search stopped here
+ * v binary search stopped here
+ * x + 1 ... | x | x + 1 | x ... | x
+ * ^ but we want to locate this spot
* or
- * x + 1 ... | x ... | x - 1 | x
* <---------> less than scan distance
+ * x + 1 ... | x ... | x - 1 | x
+ * ^ we want to locate this spot
*/
stop_on_cycle = last_half_cycle;
if ((error = xlog_find_cycle_start(log, bp, first_blk,
@@ -699,16 +731,16 @@ xlog_find_head(
* certainly not the head of the log. By searching for
* last_half_cycle-1 we accomplish that.
*/
- start_blk = log_bbnum - num_scan_bblks + head_blk;
ASSERT(head_blk <= INT_MAX &&
- (xfs_daddr_t) num_scan_bblks - head_blk >= 0);
+ (xfs_daddr_t) num_scan_bblks >= head_blk);
+ start_blk = log_bbnum - (num_scan_bblks - head_blk);
if ((error = xlog_find_verify_cycle(log, start_blk,
num_scan_bblks - (int)head_blk,
(stop_on_cycle - 1), &new_blk)))
goto bp_err;
if (new_blk != -1) {
head_blk = new_blk;
- goto bad_blk;
+ goto validate_head;
}
/*
@@ -726,7 +758,7 @@ xlog_find_head(
head_blk = new_blk;
}
- bad_blk:
+validate_head:
/*
* Now we need to make sure head_blk is not pointing to a block in
* the middle of a log record.
@@ -748,7 +780,7 @@ xlog_find_head(
if ((error = xlog_find_verify_log_record(log, start_blk,
&head_blk, 0)) == -1) {
/* We hit the beginning of the log during our search */
- start_blk = log_bbnum - num_scan_bblks + head_blk;
+ start_blk = log_bbnum - (num_scan_bblks - head_blk);
new_blk = log_bbnum;
ASSERT(start_blk <= INT_MAX &&
(xfs_daddr_t) log_bbnum-start_blk >= 0);
@@ -833,12 +865,12 @@ xlog_find_tail(
if (*head_blk == 0) { /* special case */
error = xlog_bread(log, 0, 1, bp, &offset);
if (error)
- goto bread_err;
+ goto done;
if (xlog_get_cycle(offset) == 0) {
*tail_blk = 0;
/* leave all other log inited values alone */
- goto exit;
+ goto done;
}
}
@@ -849,7 +881,7 @@ xlog_find_tail(
for (i = (int)(*head_blk) - 1; i >= 0; i--) {
error = xlog_bread(log, i, 1, bp, &offset);
if (error)
- goto bread_err;
+ goto done;
if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(*(__be32 *)offset)) {
found = 1;
@@ -866,7 +898,7 @@ xlog_find_tail(
for (i = log->l_logBBsize - 1; i >= (int)(*head_blk); i--) {
error = xlog_bread(log, i, 1, bp, &offset);
if (error)
- goto bread_err;
+ goto done;
if (XLOG_HEADER_MAGIC_NUM ==
be32_to_cpu(*(__be32 *)offset)) {
@@ -941,7 +973,7 @@ xlog_find_tail(
umount_data_blk = (i + hblks) % log->l_logBBsize;
error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
if (error)
- goto bread_err;
+ goto done;
op_head = (xlog_op_header_t *)offset;
if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
@@ -987,12 +1019,10 @@ xlog_find_tail(
* But... if the -device- itself is readonly, just skip this.
* We can't recover this device anyway, so it won't matter.
*/
- if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp)) {
+ if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp))
error = xlog_clear_stale_blocks(log, tail_lsn);
- }
-bread_err:
-exit:
+done:
xlog_put_bp(bp);
if (error)
@@ -1152,16 +1182,22 @@ xlog_write_log_records(
xfs_caddr_t offset;
xfs_buf_t *bp;
int balign, ealign;
- int sectbb = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, 1);
+ int sectbb = log->l_sectBBsize;
int end_block = start_block + blocks;
int bufblks;
int error = 0;
int i, j = 0;
+ /*
+ * Greedily allocate a buffer big enough to handle the full
+ * range of basic blocks to be written. If that fails, try
+ * a smaller size. We need to be able to write at least a
+ * log sector, or we're out of luck.
+ */
bufblks = 1 << ffs(blocks);
while (!(bp = xlog_get_bp(log, bufblks))) {
bufblks >>= 1;
- if (bufblks <= log->l_sectbb_log)
+ if (bufblks < sectbb)
return ENOMEM;
}
@@ -1169,7 +1205,7 @@ xlog_write_log_records(
* the buffer in the starting sector not covered by the first
* write below.
*/
- balign = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, start_block);
+ balign = round_down(start_block, sectbb);
if (balign != start_block) {
error = xlog_bread_noalign(log, start_block, 1, bp);
if (error)
@@ -1188,7 +1224,7 @@ xlog_write_log_records(
* the buffer in the final sector not covered by the write.
* If this is the same sector as the above read, skip it.
*/
- ealign = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, end_block);
+ ealign = round_down(end_block, sectbb);
if (j == 0 && (start_block + endcount > ealign)) {
offset = XFS_BUF_PTR(bp);
balign = BBTOB(ealign - start_block);
@@ -1408,6 +1444,7 @@ xlog_recover_add_item(
STATIC int
xlog_recover_add_to_cont_trans(
+ struct log *log,
xlog_recover_t *trans,
xfs_caddr_t dp,
int len)
@@ -1434,6 +1471,7 @@ xlog_recover_add_to_cont_trans(
memcpy(&ptr[old_len], dp, len); /* d, s, l */
item->ri_buf[item->ri_cnt-1].i_len += len;
item->ri_buf[item->ri_cnt-1].i_addr = ptr;
+ trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
return 0;
}
@@ -1452,6 +1490,7 @@ xlog_recover_add_to_cont_trans(
*/
STATIC int
xlog_recover_add_to_trans(
+ struct log *log,
xlog_recover_t *trans,
xfs_caddr_t dp,
int len)
@@ -1510,6 +1549,7 @@ xlog_recover_add_to_trans(
item->ri_buf[item->ri_cnt].i_addr = ptr;
item->ri_buf[item->ri_cnt].i_len = len;
item->ri_cnt++;
+ trace_xfs_log_recover_item_add(log, trans, item, 0);
return 0;
}
@@ -1521,7 +1561,9 @@ xlog_recover_add_to_trans(
*/
STATIC int
xlog_recover_reorder_trans(
- xlog_recover_t *trans)
+ struct log *log,
+ xlog_recover_t *trans,
+ int pass)
{
xlog_recover_item_t *item, *n;
LIST_HEAD(sort_list);
@@ -1535,6 +1577,8 @@ xlog_recover_reorder_trans(
switch (ITEM_TYPE(item)) {
case XFS_LI_BUF:
if (!(buf_f->blf_flags & XFS_BLI_CANCEL)) {
+ trace_xfs_log_recover_item_reorder_head(log,
+ trans, item, pass);
list_move(&item->ri_list, &trans->r_itemq);
break;
}
@@ -1543,6 +1587,8 @@ xlog_recover_reorder_trans(
case XFS_LI_QUOTAOFF:
case XFS_LI_EFD:
case XFS_LI_EFI:
+ trace_xfs_log_recover_item_reorder_tail(log,
+ trans, item, pass);
list_move_tail(&item->ri_list, &trans->r_itemq);
break;
default:
@@ -1592,8 +1638,10 @@ xlog_recover_do_buffer_pass1(
/*
* If this isn't a cancel buffer item, then just return.
*/
- if (!(flags & XFS_BLI_CANCEL))
+ if (!(flags & XFS_BLI_CANCEL)) {
+ trace_xfs_log_recover_buf_not_cancel(log, buf_f);
return;
+ }
/*
* Insert an xfs_buf_cancel record into the hash table of
@@ -1627,6 +1675,7 @@ xlog_recover_do_buffer_pass1(
while (nextp != NULL) {
if (nextp->bc_blkno == blkno && nextp->bc_len == len) {
nextp->bc_refcount++;
+ trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
return;
}
prevp = nextp;
@@ -1640,6 +1689,7 @@ xlog_recover_do_buffer_pass1(
bcp->bc_refcount = 1;
bcp->bc_next = NULL;
prevp->bc_next = bcp;
+ trace_xfs_log_recover_buf_cancel_add(log, buf_f);
}
/*
@@ -1779,6 +1829,8 @@ xlog_recover_do_inode_buffer(
unsigned int *data_map = NULL;
unsigned int map_size = 0;
+ trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
+
switch (buf_f->blf_type) {
case XFS_LI_BUF:
data_map = buf_f->blf_data_map;
@@ -1874,6 +1926,7 @@ xlog_recover_do_inode_buffer(
/*ARGSUSED*/
STATIC void
xlog_recover_do_reg_buffer(
+ struct xfs_mount *mp,
xlog_recover_item_t *item,
xfs_buf_t *bp,
xfs_buf_log_format_t *buf_f)
@@ -1885,6 +1938,8 @@ xlog_recover_do_reg_buffer(
unsigned int map_size = 0;
int error;
+ trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
+
switch (buf_f->blf_type) {
case XFS_LI_BUF:
data_map = buf_f->blf_data_map;
@@ -2083,6 +2138,8 @@ xlog_recover_do_dquot_buffer(
{
uint type;
+ trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
+
/*
* Filesystems are required to send in quota flags at mount time.
*/
@@ -2103,7 +2160,7 @@ xlog_recover_do_dquot_buffer(
if (log->l_quotaoffs_flag & type)
return;
- xlog_recover_do_reg_buffer(item, bp, buf_f);
+ xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
}
/*
@@ -2164,9 +2221,11 @@ xlog_recover_do_buffer_trans(
*/
cancel = xlog_recover_do_buffer_pass2(log, buf_f);
if (cancel) {
+ trace_xfs_log_recover_buf_cancel(log, buf_f);
return 0;
}
}
+ trace_xfs_log_recover_buf_recover(log, buf_f);
switch (buf_f->blf_type) {
case XFS_LI_BUF:
blkno = buf_f->blf_blkno;
@@ -2204,7 +2263,7 @@ xlog_recover_do_buffer_trans(
(XFS_BLI_UDQUOT_BUF|XFS_BLI_PDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) {
xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
} else {
- xlog_recover_do_reg_buffer(item, bp, buf_f);
+ xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
}
if (error)
return XFS_ERROR(error);
@@ -2284,8 +2343,10 @@ xlog_recover_do_inode_trans(
if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno,
in_f->ilf_len, 0)) {
error = 0;
+ trace_xfs_log_recover_inode_cancel(log, in_f);
goto error;
}
+ trace_xfs_log_recover_inode_recover(log, in_f);
bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len,
XBF_LOCK);
@@ -2337,6 +2398,7 @@ xlog_recover_do_inode_trans(
/* do nothing */
} else {
xfs_buf_relse(bp);
+ trace_xfs_log_recover_inode_skip(log, in_f);
error = 0;
goto error;
}
@@ -2758,11 +2820,12 @@ xlog_recover_do_trans(
int error = 0;
xlog_recover_item_t *item;
- error = xlog_recover_reorder_trans(trans);
+ error = xlog_recover_reorder_trans(log, trans, pass);
if (error)
return error;
list_for_each_entry(item, &trans->r_itemq, ri_list) {
+ trace_xfs_log_recover_item_recover(log, trans, item, pass);
switch (ITEM_TYPE(item)) {
case XFS_LI_BUF:
error = xlog_recover_do_buffer_trans(log, item, pass);
@@ -2919,8 +2982,9 @@ xlog_recover_process_data(
error = xlog_recover_unmount_trans(trans);
break;
case XLOG_WAS_CONT_TRANS:
- error = xlog_recover_add_to_cont_trans(trans,
- dp, be32_to_cpu(ohead->oh_len));
+ error = xlog_recover_add_to_cont_trans(log,
+ trans, dp,
+ be32_to_cpu(ohead->oh_len));
break;
case XLOG_START_TRANS:
xlog_warn(
@@ -2930,7 +2994,7 @@ xlog_recover_process_data(
break;
case 0:
case XLOG_CONTINUE_TRANS:
- error = xlog_recover_add_to_trans(trans,
+ error = xlog_recover_add_to_trans(log, trans,
dp, be32_to_cpu(ohead->oh_len));
break;
default:
@@ -3331,42 +3395,6 @@ xlog_pack_data(
}
}
-#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
-STATIC void
-xlog_unpack_data_checksum(
- xlog_rec_header_t *rhead,
- xfs_caddr_t dp,
- xlog_t *log)
-{
- __be32 *up = (__be32 *)dp;
- uint chksum = 0;
- int i;
-
- /* divide length by 4 to get # words */
- for (i=0; i < be32_to_cpu(rhead->h_len) >> 2; i++) {
- chksum ^= be32_to_cpu(*up);
- up++;
- }
- if (chksum != be32_to_cpu(rhead->h_chksum)) {
- if (rhead->h_chksum ||
- ((log->l_flags & XLOG_CHKSUM_MISMATCH) == 0)) {
- cmn_err(CE_DEBUG,
- "XFS: LogR chksum mismatch: was (0x%x) is (0x%x)\n",
- be32_to_cpu(rhead->h_chksum), chksum);
- cmn_err(CE_DEBUG,
-"XFS: Disregard message if filesystem was created with non-DEBUG kernel");
- if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
- cmn_err(CE_DEBUG,
- "XFS: LogR this is a LogV2 filesystem\n");
- }
- log->l_flags |= XLOG_CHKSUM_MISMATCH;
- }
- }
-}
-#else
-#define xlog_unpack_data_checksum(rhead, dp, log)
-#endif
-
STATIC void
xlog_unpack_data(
xlog_rec_header_t *rhead,
@@ -3390,8 +3418,6 @@ xlog_unpack_data(
dp += BBSIZE;
}
}
-
- xlog_unpack_data_checksum(rhead, dp, log);
}
STATIC int
@@ -3490,7 +3516,7 @@ xlog_do_recovery_pass(
hblks = 1;
}
} else {
- ASSERT(log->l_sectbb_log == 0);
+ ASSERT(log->l_sectBBsize == 1);
hblks = 1;
hbp = xlog_get_bp(log, 1);
h_size = XLOG_BIG_RECORD_BSIZE;
@@ -3946,10 +3972,6 @@ xlog_recover_check_summary(
xfs_agf_t *agfp;
xfs_buf_t *agfbp;
xfs_buf_t *agibp;
- xfs_buf_t *sbbp;
-#ifdef XFS_LOUD_RECOVERY
- xfs_sb_t *sbp;
-#endif
xfs_agnumber_t agno;
__uint64_t freeblks;
__uint64_t itotal;
@@ -3984,30 +4006,5 @@ xlog_recover_check_summary(
xfs_buf_relse(agibp);
}
}
-
- sbbp = xfs_getsb(mp, 0);
-#ifdef XFS_LOUD_RECOVERY
- sbp = &mp->m_sb;
- xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(sbbp));
- cmn_err(CE_NOTE,
- "xlog_recover_check_summary: sb_icount %Lu itotal %Lu",
- sbp->sb_icount, itotal);
- cmn_err(CE_NOTE,
- "xlog_recover_check_summary: sb_ifree %Lu itotal %Lu",
- sbp->sb_ifree, ifree);
- cmn_err(CE_NOTE,
- "xlog_recover_check_summary: sb_fdblocks %Lu freeblks %Lu",
- sbp->sb_fdblocks, freeblks);
-#if 0
- /*
- * This is turned off until I account for the allocation
- * btree blocks which live in free space.
- */
- ASSERT(sbp->sb_icount == itotal);
- ASSERT(sbp->sb_ifree == ifree);
- ASSERT(sbp->sb_fdblocks == freeblks);
-#endif
-#endif
- xfs_buf_relse(sbbp);
}
#endif /* DEBUG */
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index e79b56b..d7bf38c 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -1405,13 +1405,6 @@ xfs_mountfs(
xfs_qm_mount_quotas(mp);
}
-#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
- if (XFS_IS_QUOTA_ON(mp))
- xfs_fs_cmn_err(CE_NOTE, mp, "Disk quotas turned on");
- else
- xfs_fs_cmn_err(CE_NOTE, mp, "Disk quotas not turned on");
-#endif
-
/*
* Now we are mounted, reserve a small amount of unused space for
* privileged transactions. This is needed so that transaction
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 4fa0bc7..9ff48a16 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -259,6 +259,7 @@ typedef struct xfs_mount {
wait_queue_head_t m_wait_single_sync_task;
__int64_t m_update_flags; /* sb flags we need to update
on the next remount,rw */
+ struct list_head m_mplist; /* inode shrinker mount list */
} xfs_mount_t;
/*
diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h
index fdcab3f..e0e64b1 100644
--- a/fs/xfs/xfs_quota.h
+++ b/fs/xfs/xfs_quota.h
@@ -201,9 +201,6 @@ typedef struct xfs_qoff_logformat {
#define XFS_QMOPT_FORCE_RES 0x0000010 /* ignore quota limits */
#define XFS_QMOPT_DQSUSER 0x0000020 /* don't cache super users dquot */
#define XFS_QMOPT_SBVERSION 0x0000040 /* change superblock version num */
-#define XFS_QMOPT_QUOTAOFF 0x0000080 /* quotas are being turned off */
-#define XFS_QMOPT_UMOUNTING 0x0000100 /* filesys is being unmounted */
-#define XFS_QMOPT_DOLOG 0x0000200 /* log buf changes (in quotacheck) */
#define XFS_QMOPT_DOWARN 0x0000400 /* increase warning cnt if needed */
#define XFS_QMOPT_DQREPAIR 0x0001000 /* repair dquot if damaged */
#define XFS_QMOPT_GQUOTA 0x0002000 /* group dquot requested */
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index f73e358..be578ec 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -45,23 +45,12 @@
#include "xfs_trans_space.h"
#include "xfs_inode_item.h"
-
-STATIC void xfs_trans_apply_sb_deltas(xfs_trans_t *);
-STATIC uint xfs_trans_count_vecs(xfs_trans_t *);
-STATIC void xfs_trans_fill_vecs(xfs_trans_t *, xfs_log_iovec_t *);
-STATIC void xfs_trans_uncommit(xfs_trans_t *, uint);
-STATIC void xfs_trans_committed(xfs_trans_t *, int);
-STATIC void xfs_trans_chunk_committed(xfs_log_item_chunk_t *, xfs_lsn_t, int);
-STATIC void xfs_trans_free(xfs_trans_t *);
-
kmem_zone_t *xfs_trans_zone;
-
/*
* Reservation functions here avoid a huge stack in xfs_trans_init
* due to register overflow from temporaries in the calculations.
*/
-
STATIC uint
xfs_calc_write_reservation(xfs_mount_t *mp)
{
@@ -261,6 +250,19 @@ _xfs_trans_alloc(
}
/*
+ * Free the transaction structure. If there is more clean up
+ * to do when the structure is freed, add it here.
+ */
+STATIC void
+xfs_trans_free(
+ xfs_trans_t *tp)
+{
+ atomic_dec(&tp->t_mountp->m_active_trans);
+ xfs_trans_free_dqinfo(tp);
+ kmem_zone_free(xfs_trans_zone, tp);
+}
+
+/*
* This is called to create a new transaction which will share the
* permanent log reservation of the given transaction. The remaining
* unused block and rt extent reservations are also inherited. This
@@ -764,94 +766,278 @@ xfs_trans_unreserve_and_mod_sb(
}
}
+/*
+ * Total up the number of log iovecs needed to commit this
+ * transaction. The transaction itself needs one for the
+ * transaction header. Ask each dirty item in turn how many
+ * it needs to get the total.
+ */
+static uint
+xfs_trans_count_vecs(
+ struct xfs_trans *tp)
+{
+ int nvecs;
+ xfs_log_item_desc_t *lidp;
+
+ nvecs = 1;
+ lidp = xfs_trans_first_item(tp);
+ ASSERT(lidp != NULL);
+
+ /* In the non-debug case we need to start bailing out if we
+ * didn't find a log_item here, return zero and let trans_commit
+ * deal with it.
+ */
+ if (lidp == NULL)
+ return 0;
+
+ while (lidp != NULL) {
+ /*
+ * Skip items which aren't dirty in this transaction.
+ */
+ if (!(lidp->lid_flags & XFS_LID_DIRTY)) {
+ lidp = xfs_trans_next_item(tp, lidp);
+ continue;
+ }
+ lidp->lid_size = IOP_SIZE(lidp->lid_item);
+ nvecs += lidp->lid_size;
+ lidp = xfs_trans_next_item(tp, lidp);
+ }
+
+ return nvecs;
+}
/*
- * xfs_trans_commit
+ * Fill in the vector with pointers to data to be logged
+ * by this transaction. The transaction header takes
+ * the first vector, and then each dirty item takes the
+ * number of vectors it indicated it needed in xfs_trans_count_vecs().
*
- * Commit the given transaction to the log a/synchronously.
+ * As each item fills in the entries it needs, also pin the item
+ * so that it cannot be flushed out until the log write completes.
+ */
+static void
+xfs_trans_fill_vecs(
+ struct xfs_trans *tp,
+ struct xfs_log_iovec *log_vector)
+{
+ xfs_log_item_desc_t *lidp;
+ struct xfs_log_iovec *vecp;
+ uint nitems;
+
+ /*
+ * Skip over the entry for the transaction header, we'll
+ * fill that in at the end.
+ */
+ vecp = log_vector + 1;
+
+ nitems = 0;
+ lidp = xfs_trans_first_item(tp);
+ ASSERT(lidp);
+ while (lidp) {
+ /* Skip items which aren't dirty in this transaction. */
+ if (!(lidp->lid_flags & XFS_LID_DIRTY)) {
+ lidp = xfs_trans_next_item(tp, lidp);
+ continue;
+ }
+
+ /*
+ * The item may be marked dirty but not log anything. This can
+ * be used to get called when a transaction is committed.
+ */
+ if (lidp->lid_size)
+ nitems++;
+ IOP_FORMAT(lidp->lid_item, vecp);
+ vecp += lidp->lid_size;
+ IOP_PIN(lidp->lid_item);
+ lidp = xfs_trans_next_item(tp, lidp);
+ }
+
+ /*
+ * Now that we've counted the number of items in this transaction, fill
+ * in the transaction header. Note that the transaction header does not
+ * have a log item.
+ */
+ tp->t_header.th_magic = XFS_TRANS_HEADER_MAGIC;
+ tp->t_header.th_type = tp->t_type;
+ tp->t_header.th_num_items = nitems;
+ log_vector->i_addr = (xfs_caddr_t)&tp->t_header;
+ log_vector->i_len = sizeof(xfs_trans_header_t);
+ log_vector->i_type = XLOG_REG_TYPE_TRANSHDR;
+}
+
+/*
+ * The committed item processing consists of calling the committed routine of
+ * each logged item, updating the item's position in the AIL if necessary, and
+ * unpinning each item. If the committed routine returns -1, then do nothing
+ * further with the item because it may have been freed.
*
- * XFS disk error handling mechanism is not based on a typical
- * transaction abort mechanism. Logically after the filesystem
- * gets marked 'SHUTDOWN', we can't let any new transactions
- * be durable - ie. committed to disk - because some metadata might
- * be inconsistent. In such cases, this returns an error, and the
- * caller may assume that all locked objects joined to the transaction
- * have already been unlocked as if the commit had succeeded.
- * Do not reference the transaction structure after this call.
+ * Since items are unlocked when they are copied to the incore log, it is
+ * possible for two transactions to be completing and manipulating the same
+ * item simultaneously. The AIL lock will protect the lsn field of each item.
+ * The value of this field can never go backwards.
+ *
+ * We unpin the items after repositioning them in the AIL, because otherwise
+ * they could be immediately flushed and we'd have to race with the flusher
+ * trying to pull the item from the AIL as we add it.
*/
- /*ARGSUSED*/
-int
-_xfs_trans_commit(
- xfs_trans_t *tp,
- uint flags,
- int *log_flushed)
+static void
+xfs_trans_item_committed(
+ struct xfs_log_item *lip,
+ xfs_lsn_t commit_lsn,
+ int aborted)
{
- xfs_log_iovec_t *log_vector;
- int nvec;
- xfs_mount_t *mp;
- xfs_lsn_t commit_lsn;
- /* REFERENCED */
- int error;
- int log_flags;
- int sync;
-#define XFS_TRANS_LOGVEC_COUNT 16
- xfs_log_iovec_t log_vector_fast[XFS_TRANS_LOGVEC_COUNT];
- struct xlog_in_core *commit_iclog;
- int shutdown;
+ xfs_lsn_t item_lsn;
+ struct xfs_ail *ailp;
- commit_lsn = -1;
+ if (aborted)
+ lip->li_flags |= XFS_LI_ABORTED;
+ item_lsn = IOP_COMMITTED(lip, commit_lsn);
+
+ /* If the committed routine returns -1, item has been freed. */
+ if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
+ return;
/*
- * Determine whether this commit is releasing a permanent
- * log reservation or not.
+ * If the returned lsn is greater than what it contained before, update
+ * the location of the item in the AIL. If it is not, then do nothing.
+ * Items can never move backwards in the AIL.
+ *
+ * While the new lsn should usually be greater, it is possible that a
+ * later transaction completing simultaneously with an earlier one
+ * using the same item could complete first with a higher lsn. This
+ * would cause the earlier transaction to fail the test below.
*/
- if (flags & XFS_TRANS_RELEASE_LOG_RES) {
- ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
- log_flags = XFS_LOG_REL_PERM_RESERV;
+ ailp = lip->li_ailp;
+ spin_lock(&ailp->xa_lock);
+ if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0) {
+ /*
+ * This will set the item's lsn to item_lsn and update the
+ * position of the item in the AIL.
+ *
+ * xfs_trans_ail_update() drops the AIL lock.
+ */
+ xfs_trans_ail_update(ailp, lip, item_lsn);
} else {
- log_flags = 0;
+ spin_unlock(&ailp->xa_lock);
}
- mp = tp->t_mountp;
/*
- * If there is nothing to be logged by the transaction,
- * then unlock all of the items associated with the
- * transaction and free the transaction structure.
- * Also make sure to return any reserved blocks to
- * the free pool.
+ * Now that we've repositioned the item in the AIL, unpin it so it can
+ * be flushed. Pass information about buffer stale state down from the
+ * log item flags, if anyone else stales the buffer we do not want to
+ * pay any attention to it.
*/
-shut_us_down:
- shutdown = XFS_FORCED_SHUTDOWN(mp) ? EIO : 0;
- if (!(tp->t_flags & XFS_TRANS_DIRTY) || shutdown) {
- xfs_trans_unreserve_and_mod_sb(tp);
+ IOP_UNPIN(lip);
+}
+
+/* Clear all the per-AG busy list items listed in this transaction */
+static void
+xfs_trans_clear_busy_extents(
+ struct xfs_trans *tp)
+{
+ xfs_log_busy_chunk_t *lbcp;
+ xfs_log_busy_slot_t *lbsp;
+ int i;
+
+ for (lbcp = &tp->t_busy; lbcp != NULL; lbcp = lbcp->lbc_next) {
+ i = 0;
+ for (lbsp = lbcp->lbc_busy; i < lbcp->lbc_unused; i++, lbsp++) {
+ if (XFS_LBC_ISFREE(lbcp, i))
+ continue;
+ xfs_alloc_clear_busy(tp, lbsp->lbc_ag, lbsp->lbc_idx);
+ }
+ }
+ xfs_trans_free_busy(tp);
+}
+
+/*
+ * This is typically called by the LM when a transaction has been fully
+ * committed to disk. It needs to unpin the items which have
+ * been logged by the transaction and update their positions
+ * in the AIL if necessary.
+ *
+ * This also gets called when the transactions didn't get written out
+ * because of an I/O error. Abortflag & XFS_LI_ABORTED is set then.
+ */
+STATIC void
+xfs_trans_committed(
+ struct xfs_trans *tp,
+ int abortflag)
+{
+ xfs_log_item_desc_t *lidp;
+ xfs_log_item_chunk_t *licp;
+ xfs_log_item_chunk_t *next_licp;
+
+ /* Call the transaction's completion callback if there is one. */
+ if (tp->t_callback != NULL)
+ tp->t_callback(tp, tp->t_callarg);
+
+ for (lidp = xfs_trans_first_item(tp);
+ lidp != NULL;
+ lidp = xfs_trans_next_item(tp, lidp)) {
+ xfs_trans_item_committed(lidp->lid_item, tp->t_lsn, abortflag);
+ }
+
+ /* free the item chunks, ignoring the embedded chunk */
+ for (licp = tp->t_items.lic_next; licp != NULL; licp = next_licp) {
+ next_licp = licp->lic_next;
+ kmem_free(licp);
+ }
+
+ xfs_trans_clear_busy_extents(tp);
+ xfs_trans_free(tp);
+}
+
+/*
+ * Called from the trans_commit code when we notice that
+ * the filesystem is in the middle of a forced shutdown.
+ */
+STATIC void
+xfs_trans_uncommit(
+ struct xfs_trans *tp,
+ uint flags)
+{
+ xfs_log_item_desc_t *lidp;
+
+ for (lidp = xfs_trans_first_item(tp);
+ lidp != NULL;
+ lidp = xfs_trans_next_item(tp, lidp)) {
/*
- * It is indeed possible for the transaction to be
- * not dirty but the dqinfo portion to be. All that
- * means is that we have some (non-persistent) quota
- * reservations that need to be unreserved.
+ * Unpin all but those that aren't dirty.
*/
- xfs_trans_unreserve_and_mod_dquots(tp);
- if (tp->t_ticket) {
- commit_lsn = xfs_log_done(mp, tp->t_ticket,
- NULL, log_flags);
- if (commit_lsn == -1 && !shutdown)
- shutdown = XFS_ERROR(EIO);
- }
- current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
- xfs_trans_free_items(tp, shutdown? XFS_TRANS_ABORT : 0);
- xfs_trans_free_busy(tp);
- xfs_trans_free(tp);
- XFS_STATS_INC(xs_trans_empty);
- return (shutdown);
+ if (lidp->lid_flags & XFS_LID_DIRTY)
+ IOP_UNPIN_REMOVE(lidp->lid_item, tp);
}
- ASSERT(tp->t_ticket != NULL);
- /*
- * If we need to update the superblock, then do it now.
- */
- if (tp->t_flags & XFS_TRANS_SB_DIRTY)
- xfs_trans_apply_sb_deltas(tp);
- xfs_trans_apply_dquot_deltas(tp);
+ xfs_trans_unreserve_and_mod_sb(tp);
+ xfs_trans_unreserve_and_mod_dquots(tp);
+
+ xfs_trans_free_items(tp, flags);
+ xfs_trans_free_busy(tp);
+ xfs_trans_free(tp);
+}
+
+/*
+ * Format the transaction direct to the iclog. This isolates the physical
+ * transaction commit operation from the logical operation and hence allows
+ * other methods to be introduced without affecting the existing commit path.
+ */
+static int
+xfs_trans_commit_iclog(
+ struct xfs_mount *mp,
+ struct xfs_trans *tp,
+ xfs_lsn_t *commit_lsn,
+ int flags)
+{
+ int shutdown;
+ int error;
+ int log_flags = 0;
+ struct xlog_in_core *commit_iclog;
+#define XFS_TRANS_LOGVEC_COUNT 16
+ struct xfs_log_iovec log_vector_fast[XFS_TRANS_LOGVEC_COUNT];
+ struct xfs_log_iovec *log_vector;
+ uint nvec;
+
/*
* Ask each log item how many log_vector entries it will
@@ -861,8 +1047,7 @@ shut_us_down:
*/
nvec = xfs_trans_count_vecs(tp);
if (nvec == 0) {
- xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
- goto shut_us_down;
+ return ENOMEM; /* triggers a shutdown! */
} else if (nvec <= XFS_TRANS_LOGVEC_COUNT) {
log_vector = log_vector_fast;
} else {
@@ -877,6 +1062,9 @@ shut_us_down:
*/
xfs_trans_fill_vecs(tp, log_vector);
+ if (flags & XFS_TRANS_RELEASE_LOG_RES)
+ log_flags = XFS_LOG_REL_PERM_RESERV;
+
error = xfs_log_write(mp, log_vector, nvec, tp->t_ticket, &(tp->t_lsn));
/*
@@ -884,18 +1072,17 @@ shut_us_down:
* at any time after this call. However, all the items associated
* with the transaction are still locked and pinned in memory.
*/
- commit_lsn = xfs_log_done(mp, tp->t_ticket, &commit_iclog, log_flags);
+ *commit_lsn = xfs_log_done(mp, tp->t_ticket, &commit_iclog, log_flags);
- tp->t_commit_lsn = commit_lsn;
- if (nvec > XFS_TRANS_LOGVEC_COUNT) {
+ tp->t_commit_lsn = *commit_lsn;
+ if (nvec > XFS_TRANS_LOGVEC_COUNT)
kmem_free(log_vector);
- }
/*
* If we got a log write error. Unpin the logitems that we
* had pinned, clean up, free trans structure, and return error.
*/
- if (error || commit_lsn == -1) {
+ if (error || *commit_lsn == -1) {
current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
xfs_trans_uncommit(tp, flags|XFS_TRANS_ABORT);
return XFS_ERROR(EIO);
@@ -909,8 +1096,6 @@ shut_us_down:
*/
xfs_trans_unreserve_and_mod_sb(tp);
- sync = tp->t_flags & XFS_TRANS_SYNC;
-
/*
* Tell the LM to call the transaction completion routine
* when the log write with LSN commit_lsn completes (e.g.
@@ -953,7 +1138,7 @@ shut_us_down:
* the commit lsn of this transaction for dependency tracking
* purposes.
*/
- xfs_trans_unlock_items(tp, commit_lsn);
+ xfs_trans_unlock_items(tp, *commit_lsn);
/*
* If we detected a log error earlier, finish committing
@@ -973,156 +1158,114 @@ shut_us_down:
* and the items are released we can finally allow the iclog to
* go to disk.
*/
- error = xfs_log_release_iclog(mp, commit_iclog);
-
- /*
- * If the transaction needs to be synchronous, then force the
- * log out now and wait for it.
- */
- if (sync) {
- if (!error) {
- error = _xfs_log_force_lsn(mp, commit_lsn,
- XFS_LOG_SYNC, log_flushed);
- }
- XFS_STATS_INC(xs_trans_sync);
- } else {
- XFS_STATS_INC(xs_trans_async);
- }
-
- return (error);
+ return xfs_log_release_iclog(mp, commit_iclog);
}
/*
- * Total up the number of log iovecs needed to commit this
- * transaction. The transaction itself needs one for the
- * transaction header. Ask each dirty item in turn how many
- * it needs to get the total.
+ * xfs_trans_commit
+ *
+ * Commit the given transaction to the log a/synchronously.
+ *
+ * XFS disk error handling mechanism is not based on a typical
+ * transaction abort mechanism. Logically after the filesystem
+ * gets marked 'SHUTDOWN', we can't let any new transactions
+ * be durable - ie. committed to disk - because some metadata might
+ * be inconsistent. In such cases, this returns an error, and the
+ * caller may assume that all locked objects joined to the transaction
+ * have already been unlocked as if the commit had succeeded.
+ * Do not reference the transaction structure after this call.
*/
-STATIC uint
-xfs_trans_count_vecs(
- xfs_trans_t *tp)
+int
+_xfs_trans_commit(
+ struct xfs_trans *tp,
+ uint flags,
+ int *log_flushed)
{
- int nvecs;
- xfs_log_item_desc_t *lidp;
+ struct xfs_mount *mp = tp->t_mountp;
+ xfs_lsn_t commit_lsn = -1;
+ int error = 0;
+ int log_flags = 0;
+ int sync = tp->t_flags & XFS_TRANS_SYNC;
- nvecs = 1;
- lidp = xfs_trans_first_item(tp);
- ASSERT(lidp != NULL);
-
- /* In the non-debug case we need to start bailing out if we
- * didn't find a log_item here, return zero and let trans_commit
- * deal with it.
+ /*
+ * Determine whether this commit is releasing a permanent
+ * log reservation or not.
*/
- if (lidp == NULL)
- return 0;
-
- while (lidp != NULL) {
- /*
- * Skip items which aren't dirty in this transaction.
- */
- if (!(lidp->lid_flags & XFS_LID_DIRTY)) {
- lidp = xfs_trans_next_item(tp, lidp);
- continue;
- }
- lidp->lid_size = IOP_SIZE(lidp->lid_item);
- nvecs += lidp->lid_size;
- lidp = xfs_trans_next_item(tp, lidp);
+ if (flags & XFS_TRANS_RELEASE_LOG_RES) {
+ ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
+ log_flags = XFS_LOG_REL_PERM_RESERV;
}
- return nvecs;
-}
-
-/*
- * Called from the trans_commit code when we notice that
- * the filesystem is in the middle of a forced shutdown.
- */
-STATIC void
-xfs_trans_uncommit(
- xfs_trans_t *tp,
- uint flags)
-{
- xfs_log_item_desc_t *lidp;
+ /*
+ * If there is nothing to be logged by the transaction,
+ * then unlock all of the items associated with the
+ * transaction and free the transaction structure.
+ * Also make sure to return any reserved blocks to
+ * the free pool.
+ */
+ if (!(tp->t_flags & XFS_TRANS_DIRTY))
+ goto out_unreserve;
- for (lidp = xfs_trans_first_item(tp);
- lidp != NULL;
- lidp = xfs_trans_next_item(tp, lidp)) {
- /*
- * Unpin all but those that aren't dirty.
- */
- if (lidp->lid_flags & XFS_LID_DIRTY)
- IOP_UNPIN_REMOVE(lidp->lid_item, tp);
+ if (XFS_FORCED_SHUTDOWN(mp)) {
+ error = XFS_ERROR(EIO);
+ goto out_unreserve;
}
- xfs_trans_unreserve_and_mod_sb(tp);
- xfs_trans_unreserve_and_mod_dquots(tp);
+ ASSERT(tp->t_ticket != NULL);
- xfs_trans_free_items(tp, flags);
- xfs_trans_free_busy(tp);
- xfs_trans_free(tp);
-}
+ /*
+ * If we need to update the superblock, then do it now.
+ */
+ if (tp->t_flags & XFS_TRANS_SB_DIRTY)
+ xfs_trans_apply_sb_deltas(tp);
+ xfs_trans_apply_dquot_deltas(tp);
-/*
- * Fill in the vector with pointers to data to be logged
- * by this transaction. The transaction header takes
- * the first vector, and then each dirty item takes the
- * number of vectors it indicated it needed in xfs_trans_count_vecs().
- *
- * As each item fills in the entries it needs, also pin the item
- * so that it cannot be flushed out until the log write completes.
- */
-STATIC void
-xfs_trans_fill_vecs(
- xfs_trans_t *tp,
- xfs_log_iovec_t *log_vector)
-{
- xfs_log_item_desc_t *lidp;
- xfs_log_iovec_t *vecp;
- uint nitems;
+ error = xfs_trans_commit_iclog(mp, tp, &commit_lsn, flags);
+ if (error == ENOMEM) {
+ xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
+ error = XFS_ERROR(EIO);
+ goto out_unreserve;
+ }
/*
- * Skip over the entry for the transaction header, we'll
- * fill that in at the end.
+ * If the transaction needs to be synchronous, then force the
+ * log out now and wait for it.
*/
- vecp = log_vector + 1; /* pointer arithmetic */
-
- nitems = 0;
- lidp = xfs_trans_first_item(tp);
- ASSERT(lidp != NULL);
- while (lidp != NULL) {
- /*
- * Skip items which aren't dirty in this transaction.
- */
- if (!(lidp->lid_flags & XFS_LID_DIRTY)) {
- lidp = xfs_trans_next_item(tp, lidp);
- continue;
- }
- /*
- * The item may be marked dirty but not log anything.
- * This can be used to get called when a transaction
- * is committed.
- */
- if (lidp->lid_size) {
- nitems++;
+ if (sync) {
+ if (!error) {
+ error = _xfs_log_force_lsn(mp, commit_lsn,
+ XFS_LOG_SYNC, log_flushed);
}
- IOP_FORMAT(lidp->lid_item, vecp);
- vecp += lidp->lid_size; /* pointer arithmetic */
- IOP_PIN(lidp->lid_item);
- lidp = xfs_trans_next_item(tp, lidp);
+ XFS_STATS_INC(xs_trans_sync);
+ } else {
+ XFS_STATS_INC(xs_trans_async);
}
+ return error;
+
+out_unreserve:
+ xfs_trans_unreserve_and_mod_sb(tp);
+
/*
- * Now that we've counted the number of items in this
- * transaction, fill in the transaction header.
+ * It is indeed possible for the transaction to be not dirty but
+ * the dqinfo portion to be. All that means is that we have some
+ * (non-persistent) quota reservations that need to be unreserved.
*/
- tp->t_header.th_magic = XFS_TRANS_HEADER_MAGIC;
- tp->t_header.th_type = tp->t_type;
- tp->t_header.th_num_items = nitems;
- log_vector->i_addr = (xfs_caddr_t)&tp->t_header;
- log_vector->i_len = sizeof(xfs_trans_header_t);
- log_vector->i_type = XLOG_REG_TYPE_TRANSHDR;
-}
+ xfs_trans_unreserve_and_mod_dquots(tp);
+ if (tp->t_ticket) {
+ commit_lsn = xfs_log_done(mp, tp->t_ticket, NULL, log_flags);
+ if (commit_lsn == -1 && !error)
+ error = XFS_ERROR(EIO);
+ }
+ current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
+ xfs_trans_free_items(tp, error ? XFS_TRANS_ABORT : 0);
+ xfs_trans_free_busy(tp);
+ xfs_trans_free(tp);
+ XFS_STATS_INC(xs_trans_empty);
+ return error;
+}
/*
* Unlock all of the transaction's items and free the transaction.
@@ -1200,20 +1343,6 @@ xfs_trans_cancel(
xfs_trans_free(tp);
}
-
-/*
- * Free the transaction structure. If there is more clean up
- * to do when the structure is freed, add it here.
- */
-STATIC void
-xfs_trans_free(
- xfs_trans_t *tp)
-{
- atomic_dec(&tp->t_mountp->m_active_trans);
- xfs_trans_free_dqinfo(tp);
- kmem_zone_free(xfs_trans_zone, tp);
-}
-
/*
* Roll from one trans in the sequence of PERMANENT transactions to
* the next: permanent transactions are only flushed out when
@@ -1283,174 +1412,3 @@ xfs_trans_roll(
xfs_trans_ihold(trans, dp);
return 0;
}
-
-/*
- * THIS SHOULD BE REWRITTEN TO USE xfs_trans_next_item().
- *
- * This is typically called by the LM when a transaction has been fully
- * committed to disk. It needs to unpin the items which have
- * been logged by the transaction and update their positions
- * in the AIL if necessary.
- * This also gets called when the transactions didn't get written out
- * because of an I/O error. Abortflag & XFS_LI_ABORTED is set then.
- *
- * Call xfs_trans_chunk_committed() to process the items in
- * each chunk.
- */
-STATIC void
-xfs_trans_committed(
- xfs_trans_t *tp,
- int abortflag)
-{
- xfs_log_item_chunk_t *licp;
- xfs_log_item_chunk_t *next_licp;
- xfs_log_busy_chunk_t *lbcp;
- xfs_log_busy_slot_t *lbsp;
- int i;
-
- /*
- * Call the transaction's completion callback if there
- * is one.
- */
- if (tp->t_callback != NULL) {
- tp->t_callback(tp, tp->t_callarg);
- }
-
- /*
- * Special case the chunk embedded in the transaction.
- */
- licp = &(tp->t_items);
- if (!(xfs_lic_are_all_free(licp))) {
- xfs_trans_chunk_committed(licp, tp->t_lsn, abortflag);
- }
-
- /*
- * Process the items in each chunk in turn.
- */
- licp = licp->lic_next;
- while (licp != NULL) {
- ASSERT(!xfs_lic_are_all_free(licp));
- xfs_trans_chunk_committed(licp, tp->t_lsn, abortflag);
- next_licp = licp->lic_next;
- kmem_free(licp);
- licp = next_licp;
- }
-
- /*
- * Clear all the per-AG busy list items listed in this transaction
- */
- lbcp = &tp->t_busy;
- while (lbcp != NULL) {
- for (i = 0, lbsp = lbcp->lbc_busy; i < lbcp->lbc_unused; i++, lbsp++) {
- if (!XFS_LBC_ISFREE(lbcp, i)) {
- xfs_alloc_clear_busy(tp, lbsp->lbc_ag,
- lbsp->lbc_idx);
- }
- }
- lbcp = lbcp->lbc_next;
- }
- xfs_trans_free_busy(tp);
-
- /*
- * That's it for the transaction structure. Free it.
- */
- xfs_trans_free(tp);
-}
-
-/*
- * This is called to perform the commit processing for each
- * item described by the given chunk.
- *
- * The commit processing consists of unlocking items which were
- * held locked with the SYNC_UNLOCK attribute, calling the committed
- * routine of each logged item, updating the item's position in the AIL
- * if necessary, and unpinning each item. If the committed routine
- * returns -1, then do nothing further with the item because it
- * may have been freed.
- *
- * Since items are unlocked when they are copied to the incore
- * log, it is possible for two transactions to be completing
- * and manipulating the same item simultaneously. The AIL lock
- * will protect the lsn field of each item. The value of this
- * field can never go backwards.
- *
- * We unpin the items after repositioning them in the AIL, because
- * otherwise they could be immediately flushed and we'd have to race
- * with the flusher trying to pull the item from the AIL as we add it.
- */
-STATIC void
-xfs_trans_chunk_committed(
- xfs_log_item_chunk_t *licp,
- xfs_lsn_t lsn,
- int aborted)
-{
- xfs_log_item_desc_t *lidp;
- xfs_log_item_t *lip;
- xfs_lsn_t item_lsn;
- int i;
-
- lidp = licp->lic_descs;
- for (i = 0; i < licp->lic_unused; i++, lidp++) {
- struct xfs_ail *ailp;
-
- if (xfs_lic_isfree(licp, i)) {
- continue;
- }
-
- lip = lidp->lid_item;
- if (aborted)
- lip->li_flags |= XFS_LI_ABORTED;
-
- /*
- * Send in the ABORTED flag to the COMMITTED routine
- * so that it knows whether the transaction was aborted
- * or not.
- */
- item_lsn = IOP_COMMITTED(lip, lsn);
-
- /*
- * If the committed routine returns -1, make
- * no more references to the item.
- */
- if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0) {
- continue;
- }
-
- /*
- * If the returned lsn is greater than what it
- * contained before, update the location of the
- * item in the AIL. If it is not, then do nothing.
- * Items can never move backwards in the AIL.
- *
- * While the new lsn should usually be greater, it
- * is possible that a later transaction completing
- * simultaneously with an earlier one using the
- * same item could complete first with a higher lsn.
- * This would cause the earlier transaction to fail
- * the test below.
- */
- ailp = lip->li_ailp;
- spin_lock(&ailp->xa_lock);
- if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0) {
- /*
- * This will set the item's lsn to item_lsn
- * and update the position of the item in
- * the AIL.
- *
- * xfs_trans_ail_update() drops the AIL lock.
- */
- xfs_trans_ail_update(ailp, lip, item_lsn);
- } else {
- spin_unlock(&ailp->xa_lock);
- }
-
- /*
- * Now that we've repositioned the item in the AIL,
- * unpin it so it can be flushed. Pass information
- * about buffer stale state down from the log item
- * flags, if anyone else stales the buffer we do not
- * want to pay any attention to it.
- */
- IOP_UNPIN(lip, lidp->lid_flags & XFS_LID_BUF_STALE);
- }
-}
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 79c8bab..c62beee 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -49,6 +49,15 @@ typedef struct xfs_trans_header {
#define XFS_LI_DQUOT 0x123d
#define XFS_LI_QUOTAOFF 0x123e
+#define XFS_LI_TYPE_DESC \
+ { XFS_LI_EFI, "XFS_LI_EFI" }, \
+ { XFS_LI_EFD, "XFS_LI_EFD" }, \
+ { XFS_LI_IUNLINK, "XFS_LI_IUNLINK" }, \
+ { XFS_LI_INODE, "XFS_LI_INODE" }, \
+ { XFS_LI_BUF, "XFS_LI_BUF" }, \
+ { XFS_LI_DQUOT, "XFS_LI_DQUOT" }, \
+ { XFS_LI_QUOTAOFF, "XFS_LI_QUOTAOFF" }
+
/*
* Transaction types. Used to distinguish types of buffers.
*/
@@ -159,7 +168,6 @@ typedef struct xfs_log_item_desc {
#define XFS_LID_DIRTY 0x1
#define XFS_LID_PINNED 0x2
-#define XFS_LID_BUF_STALE 0x8
/*
* This structure is used to maintain a chunk list of log_item_desc
@@ -833,7 +841,7 @@ typedef struct xfs_item_ops {
uint (*iop_size)(xfs_log_item_t *);
void (*iop_format)(xfs_log_item_t *, struct xfs_log_iovec *);
void (*iop_pin)(xfs_log_item_t *);
- void (*iop_unpin)(xfs_log_item_t *, int);
+ void (*iop_unpin)(xfs_log_item_t *);
void (*iop_unpin_remove)(xfs_log_item_t *, struct xfs_trans *);
uint (*iop_trylock)(xfs_log_item_t *);
void (*iop_unlock)(xfs_log_item_t *);
@@ -846,7 +854,7 @@ typedef struct xfs_item_ops {
#define IOP_SIZE(ip) (*(ip)->li_ops->iop_size)(ip)
#define IOP_FORMAT(ip,vp) (*(ip)->li_ops->iop_format)(ip, vp)
#define IOP_PIN(ip) (*(ip)->li_ops->iop_pin)(ip)
-#define IOP_UNPIN(ip, flags) (*(ip)->li_ops->iop_unpin)(ip, flags)
+#define IOP_UNPIN(ip) (*(ip)->li_ops->iop_unpin)(ip)
#define IOP_UNPIN_REMOVE(ip,tp) (*(ip)->li_ops->iop_unpin_remove)(ip, tp)
#define IOP_TRYLOCK(ip) (*(ip)->li_ops->iop_trylock)(ip)
#define IOP_UNLOCK(ip) (*(ip)->li_ops->iop_unlock)(ip)
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index fb586360..9cd8090 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -40,11 +40,51 @@
#include "xfs_rw.h"
#include "xfs_trace.h"
+/*
+ * Check to see if a buffer matching the given parameters is already
+ * a part of the given transaction.
+ */
+STATIC struct xfs_buf *
+xfs_trans_buf_item_match(
+ struct xfs_trans *tp,
+ struct xfs_buftarg *target,
+ xfs_daddr_t blkno,
+ int len)
+{
+ xfs_log_item_chunk_t *licp;
+ xfs_log_item_desc_t *lidp;
+ xfs_buf_log_item_t *blip;
+ int i;
-STATIC xfs_buf_t *xfs_trans_buf_item_match(xfs_trans_t *, xfs_buftarg_t *,
- xfs_daddr_t, int);
-STATIC xfs_buf_t *xfs_trans_buf_item_match_all(xfs_trans_t *, xfs_buftarg_t *,
- xfs_daddr_t, int);
+ len = BBTOB(len);
+ for (licp = &tp->t_items; licp != NULL; licp = licp->lic_next) {
+ if (xfs_lic_are_all_free(licp)) {
+ ASSERT(licp == &tp->t_items);
+ ASSERT(licp->lic_next == NULL);
+ return NULL;
+ }
+
+ for (i = 0; i < licp->lic_unused; i++) {
+ /*
+ * Skip unoccupied slots.
+ */
+ if (xfs_lic_isfree(licp, i))
+ continue;
+
+ lidp = xfs_lic_slot(licp, i);
+ blip = (xfs_buf_log_item_t *)lidp->lid_item;
+ if (blip->bli_item.li_type != XFS_LI_BUF)
+ continue;
+
+ if (XFS_BUF_TARGET(blip->bli_buf) == target &&
+ XFS_BUF_ADDR(blip->bli_buf) == blkno &&
+ XFS_BUF_COUNT(blip->bli_buf) == len)
+ return blip->bli_buf;
+ }
+ }
+
+ return NULL;
+}
/*
* Add the locked buffer to the transaction.
@@ -112,14 +152,6 @@ xfs_trans_bjoin(
* within the transaction, just increment its lock recursion count
* and return a pointer to it.
*
- * Use the fast path function xfs_trans_buf_item_match() or the buffer
- * cache routine incore_match() to find the buffer
- * if it is already owned by this transaction.
- *
- * If we don't already own the buffer, use get_buf() to get it.
- * If it doesn't yet have an associated xfs_buf_log_item structure,
- * then allocate one and add the item to this transaction.
- *
* If the transaction pointer is NULL, make this just a normal
* get_buf() call.
*/
@@ -149,11 +181,7 @@ xfs_trans_get_buf(xfs_trans_t *tp,
* have it locked. In this case we just increment the lock
* recursion count and return the buffer to the caller.
*/
- if (tp->t_items.lic_next == NULL) {
- bp = xfs_trans_buf_item_match(tp, target_dev, blkno, len);
- } else {
- bp = xfs_trans_buf_item_match_all(tp, target_dev, blkno, len);
- }
+ bp = xfs_trans_buf_item_match(tp, target_dev, blkno, len);
if (bp != NULL) {
ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
if (XFS_FORCED_SHUTDOWN(tp->t_mountp))
@@ -259,14 +287,6 @@ int xfs_error_mod = 33;
* within the transaction and already read in, just increment its
* lock recursion count and return a pointer to it.
*
- * Use the fast path function xfs_trans_buf_item_match() or the buffer
- * cache routine incore_match() to find the buffer
- * if it is already owned by this transaction.
- *
- * If we don't already own the buffer, use read_buf() to get it.
- * If it doesn't yet have an associated xfs_buf_log_item structure,
- * then allocate one and add the item to this transaction.
- *
* If the transaction pointer is NULL, make this just a normal
* read_buf() call.
*/
@@ -328,11 +348,7 @@ xfs_trans_read_buf(
* If the buffer is not yet read in, then we read it in, increment
* the lock recursion count, and return it to the caller.
*/
- if (tp->t_items.lic_next == NULL) {
- bp = xfs_trans_buf_item_match(tp, target, blkno, len);
- } else {
- bp = xfs_trans_buf_item_match_all(tp, target, blkno, len);
- }
+ bp = xfs_trans_buf_item_match(tp, target, blkno, len);
if (bp != NULL) {
ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
@@ -696,7 +712,6 @@ xfs_trans_log_buf(xfs_trans_t *tp,
tp->t_flags |= XFS_TRANS_DIRTY;
lidp->lid_flags |= XFS_LID_DIRTY;
- lidp->lid_flags &= ~XFS_LID_BUF_STALE;
bip->bli_flags |= XFS_BLI_LOGGED;
xfs_buf_item_log(bip, first, last);
}
@@ -782,7 +797,7 @@ xfs_trans_binval(
bip->bli_format.blf_flags |= XFS_BLI_CANCEL;
memset((char *)(bip->bli_format.blf_data_map), 0,
(bip->bli_format.blf_map_size * sizeof(uint)));
- lidp->lid_flags |= XFS_LID_DIRTY|XFS_LID_BUF_STALE;
+ lidp->lid_flags |= XFS_LID_DIRTY;
tp->t_flags |= XFS_TRANS_DIRTY;
}
@@ -902,111 +917,3 @@ xfs_trans_dquot_buf(
bip->bli_format.blf_flags |= type;
}
-
-/*
- * Check to see if a buffer matching the given parameters is already
- * a part of the given transaction. Only check the first, embedded
- * chunk, since we don't want to spend all day scanning large transactions.
- */
-STATIC xfs_buf_t *
-xfs_trans_buf_item_match(
- xfs_trans_t *tp,
- xfs_buftarg_t *target,
- xfs_daddr_t blkno,
- int len)
-{
- xfs_log_item_chunk_t *licp;
- xfs_log_item_desc_t *lidp;
- xfs_buf_log_item_t *blip;
- xfs_buf_t *bp;
- int i;
-
- bp = NULL;
- len = BBTOB(len);
- licp = &tp->t_items;
- if (!xfs_lic_are_all_free(licp)) {
- for (i = 0; i < licp->lic_unused; i++) {
- /*
- * Skip unoccupied slots.
- */
- if (xfs_lic_isfree(licp, i)) {
- continue;
- }
-
- lidp = xfs_lic_slot(licp, i);
- blip = (xfs_buf_log_item_t *)lidp->lid_item;
- if (blip->bli_item.li_type != XFS_LI_BUF) {
- continue;
- }
-
- bp = blip->bli_buf;
- if ((XFS_BUF_TARGET(bp) == target) &&
- (XFS_BUF_ADDR(bp) == blkno) &&
- (XFS_BUF_COUNT(bp) == len)) {
- /*
- * We found it. Break out and
- * return the pointer to the buffer.
- */
- break;
- } else {
- bp = NULL;
- }
- }
- }
- return bp;
-}
-
-/*
- * Check to see if a buffer matching the given parameters is already
- * a part of the given transaction. Check all the chunks, we
- * want to be thorough.
- */
-STATIC xfs_buf_t *
-xfs_trans_buf_item_match_all(
- xfs_trans_t *tp,
- xfs_buftarg_t *target,
- xfs_daddr_t blkno,
- int len)
-{
- xfs_log_item_chunk_t *licp;
- xfs_log_item_desc_t *lidp;
- xfs_buf_log_item_t *blip;
- xfs_buf_t *bp;
- int i;
-
- bp = NULL;
- len = BBTOB(len);
- for (licp = &tp->t_items; licp != NULL; licp = licp->lic_next) {
- if (xfs_lic_are_all_free(licp)) {
- ASSERT(licp == &tp->t_items);
- ASSERT(licp->lic_next == NULL);
- return NULL;
- }
- for (i = 0; i < licp->lic_unused; i++) {
- /*
- * Skip unoccupied slots.
- */
- if (xfs_lic_isfree(licp, i)) {
- continue;
- }
-
- lidp = xfs_lic_slot(licp, i);
- blip = (xfs_buf_log_item_t *)lidp->lid_item;
- if (blip->bli_item.li_type != XFS_LI_BUF) {
- continue;
- }
-
- bp = blip->bli_buf;
- if ((XFS_BUF_TARGET(bp) == target) &&
- (XFS_BUF_ADDR(bp) == blkno) &&
- (XFS_BUF_COUNT(bp) == len)) {
- /*
- * We found it. Break out and
- * return the pointer to the buffer.
- */
- return bp;
- }
- }
- }
- return NULL;
-}
OpenPOWER on IntegriCloud